From eb06464d41da2535ff60a977c93b7092bf8476a0 Mon Sep 17 00:00:00 2001 From: spaceoddity91719 Date: Wed, 16 Oct 2024 11:32:53 +0800 Subject: [PATCH 1/4] update(mogdb):50x eng --- .../1-1-x-tuner-overview.md | 10 - .../1-2-preparations.md | 227 -- .../1-3-examples.md | 158 - .../1-4-obtaining-help-information.md | 51 - .../1-5-command-reference.md | 50 - .../1-6-Troubleshooting.md | 14 - .../2-1-single-query-index-recommendation.md | 59 - .../2-2-virtual-index.md | 123 - ...2-3-workload-level-index-recommendation.md | 105 - .../3-1-overview.md | 10 - .../3-2-environment-deployment.md | 11 - .../3-3-usage-guide.md | 34 - .../3-4-obtaining-help-information.md | 41 - .../3-5-command-reference.md | 20 - .../3-6-troubleshooting.md | 15 - .../4-1-overview.md | 10 - .../4-2-environment-deployment.md | 10 - .../4-3-usage-guide.md | 40 - .../4-4-obtaining-help-information.md | 38 - .../4-5-command-reference.md | 21 - .../4-6-troubleshooting.md | 11 - .../5-1-overview.md | 16 - .../5-2-usage-guide.md | 96 - .../5-3-obtaining-help-information.md | 49 - .../5-4-command-reference.md | 17 - .../5-5-troubleshooting.md | 11 - .../6-1-overview.md | 20 - .../6-2-usage-guide.md | 42 - .../6-3-obtaining-help-information.md | 36 - .../6-4-command-reference.md | 19 - .../6-5-troubleshooting.md | 10 - .../7-anomaly-detection/7-1-overview.md | 10 - .../7-anomaly-detection/7-2-usage-guide.md | 54 - .../7-3-obtaining-help-information.md | 41 - .../7-4-command-reference.md | 21 - .../7-5-troubleshooting.md | 11 - .../ai4db/dbmind-mode/1-service.md | 207 -- .../ai4db/dbmind-mode/2-component.md | 37 - .../AI-features/ai4db/dbmind-mode/3-set.md | 62 - .../about-mogdb/mogdb-new-feature/5.0.6.md | 175 ++ .../about-mogdb/mogdb-new-feature/5.0.7.md | 32 + .../about-mogdb/mogdb-new-feature/5.0.8.md | 122 + .../mogdb-new-feature/release-note.md | 15 +- ...mon-primary-backup-deployment-scenarios.md | 170 +- .../resource-pooling-architecture.md | 38 +- .../exporting-data/exporting-data.md | 24 +- .../importing-and-exporting-data.md | 20 +- .../importing-data/importing-data.md | 36 +- .../localization/localization.md | 22 +- .../routine-maintenance.md | 46 +- .../abo-optimizer/adaptive-plan-selection.md | 82 +- ...haracteristic-description-abo-optimizer.md | 20 +- .../intelligent-cardinality-estimation.md | 90 +- .../ai-capabilities/ai-capabilities.md | 22 +- ...ection-forecast-and-exception-detection.md | 88 +- ...-cause-analysis-for-slow-sql-statements.md | 82 +- .../3-index-recommendation.md | 86 +- .../4-parameter-tuning-and-diagnosis.md | 98 +- .../5-slow-sql-statement-discovery.md | 84 +- .../characteristic-description-ai4db.md | 26 +- .../db4ai-database-driven-ai.md | 84 +- .../1-standard-sql.md | 82 +- .../2-standard-development-interfaces.md | 78 +- .../3-postgresql-api-compatibility.md | 78 +- .../ECPG.md | 535 +++- .../MogDB-MySQL-compatibility.md | 58 +- .../MogDB-Oracle-compatibility.md | 58 +- .../application-development-interfaces.md | 28 +- .../characteristic-description-overview.md | 25 + .../characteristic-description.md | 38 +- .../compatibility/authid-current-user.md | 98 + .../compatibility/compatibility.md | 72 +- .../compatibility/insert-on-conflict.md | 115 + .../mod-function-float-to-int.md | 44 + .../modify-table-log-property.md | 105 + .../nesting-of-aggregate-functions.md | 112 + .../order-by-group-by-scenario-expansion.md | 94 + .../compatibility/pivot-and-unpivot.md | 669 +++++ ...ed-procedure-out-parameters-in-pbe-mode.md | 179 ++ .../1-access-control-model.md | 90 +- .../10-row-level-access-control.md | 92 +- .../11-password-strength-verification.md | 150 +- ...ity-query-in-a-fully-encrypted-database.md | 188 +- .../13-ledger-database-mechanism.md | 94 +- .../14-transparent-data-encryption.md | 112 +- ...ation-of-control-and-access-permissions.md | 98 +- .../3-database-encryption-authentication.md | 78 +- .../4-data-encryption-and-storage.md | 102 +- .../database-security/5-database-audit.md | 78 +- .../6-network-communication-security.md | 94 +- .../database-security/7-resource-label.md | 104 +- .../database-security/8-unified-audit.md | 154 +- .../9-dynamic-data-anonymization.md | 222 +- .../database-security/database-security.md | 44 +- ...ort-for-functions-and-stored-procedures.md | 82 +- .../10-autonomous-transaction.md | 94 +- .../11-global-temporary-table.md | 96 +- .../12-pseudocolumn-rownum.md | 92 +- .../13-stored-procedure-debugging.md | 82 +- ...-load-balancing-and-readwrite-isolation.md | 78 +- .../15-in-place-update-storage-engine.md | 78 +- .../16-publication-subscription.md | 110 +- .../18-data-compression-in-oltp-scenarios.md | 80 +- .../enterprise-level-features/2-sql-hints.md | 86 +- .../3-full-text-indexing.md | 112 +- .../4-copy-interface-for-error-tolerance.md | 78 +- ...support-for-advanced-analysis-functions.md | 120 +- .../7-materialized-view.md | 78 +- .../9-creating-an-index-online.md | 88 +- .../enterprise-level-features.md | 75 +- .../import-export-specific-objects.md | 167 ++ .../index-support-fuzzy-matching.md | 51 + .../pruning-order-by-in-subqueries.md | 211 ++ .../scroll-cursor.md | 49 + ...for-pruning-subquery-projection-columns.md | 88 + .../high-availability/1-primary-standby.md | 90 +- .../10-adding-or-deleting-a-standby-server.md | 122 +- ...-entering-the-maximum-availability-mode.md | 90 +- ...-a-standby-node-to-build-a-standby-node.md | 78 +- .../17-two-city-three-dc-dr.md | 92 +- .../2-logical-replication.md | 80 +- .../high-availability/4-logical-backup.md | 86 +- .../high-availability/5-physical-backup.md | 110 +- .../6-automatic-job-retry-upon-failure.md | 180 +- .../high-availability/7-ultimate-rto.md | 84 +- .../8-cascaded-standby-server.md | 96 +- .../high-availability/9-delayed-replay.md | 98 +- .../cm-dual-network-segment-deployment.md | 175 ++ ...fficiency-of-logical-backup-and-restore.md | 175 ++ .../high-availability/high-availability.md | 4 +- .../high-performance/1-cbo-optimizer.md | 76 +- ...store-execution-to-vectorized-execution.md | 218 +- .../high-performance/2-llvm.md | 78 +- .../high-performance/3-vectorized-engine.md | 92 +- ...ent-of-dirty-pages-flushing-performance.md | 102 + .../high-performance/high-performance.md | 5 +- .../high-performance/seqscan-prefetch.md | 158 + .../high-performance/ustore-smp.md | 96 + .../2-workload-diagnosis-report.md | 150 +- .../5-system-kpi-aided-diagnosis.md | 138 +- .../autonomous-transaction-management.md | 72 + .../maintainability/corrupt-files-handling.md | 64 + .../maintainability/fault-diagnosis.md | 68 +- .../maintainability/maintainability.md | 4 +- .../maintainability/sql-patch.md | 266 +- ...a-distributed-database-using-kubernetes.md | 70 +- .../distributed-database-capability.md | 70 +- .../middleware/middleware.md | 22 +- .../workload-management.md | 18 +- .../jdbc-release-notes.md | 23 - .../1-development-based-on-odbc.md | 1 + ...nfiguring-a-data-source-in-the-linux-os.md | 2 +- .../odbc-release-notes.md | 14 + .../development-based-on-libpq.md | 3 +- .../libpq-release-notes.md | 14 + .../logical-decoding-support-for-DDL.md | 186 ++ .../logical-decoding/logical-decoding.md | 1 + .../sql-syntax/dolphin-alter-table.md | 2 +- .../sql-syntax/dolphin-sql-syntax.md | 2 +- ...-use-db_name.md => dolphin-use-db-name.md} | 10 +- .../plpgsql/1-4-arrays-and-records.md | 44 +- .../cm-cm_server.md | 2 +- .../installation-guide/manual-installation.md | 2 +- .../v5.0/mogeaver/mogeaver-release-notes.md | 26 - .../performance-tuning/performance-tuning.md | 24 +- .../experience-in-rewriting-sql-statements.md | 126 +- .../introduction-to-the-sql-execution-plan.md | 270 +- .../sql-tuning/query-execution-process.md | 114 +- ...etting-key-parameters-during-sql-tuning.md | 54 +- ...iewing-and-modifying-a-table-definition.md | 128 +- .../sql-tuning/sql-tuning-optimizer.md | 30 - .../sql-tuning/sql-tuning.md | 35 +- .../sql-tuning/tuning-process.md | 66 +- .../sql-tuning/updating-statistics.md | 108 +- .../system-tuning/configuring-llvm.md | 150 +- .../system-tuning/configuring-smp.md | 222 +- .../configuring-vector-engine.md | 116 +- .../system-tuning/optimizing-os-parameters.md | 234 +- .../resource-load-management-overview.md | 50 +- .../resource-load-management.md | 20 +- .../enabling-resource-load-management.md | 108 +- .../resource-management-preparations.md | 24 +- .../resource-planning.md | 66 +- .../setting-control-group.md | 416 +-- .../v5.0/performance-tuning/wdr/wdr-report.md | 770 ++--- .../wdr/wdr-snapshot-schema.md | 544 ++-- .../v5.0/performance-tuning/wdr/wdr.md | 20 +- .../ai-feature-functions.md | 400 +-- .../array-functions-and-operators.md | 1258 ++++---- .../binary-string-functions-and-operators.md | 448 +-- .../bit-string-functions-and-operators.md | 306 +- .../comparison-operators.md | 54 +- ...a-damage-detection-and-repair-functions.md | 412 +-- ...time-processing-functions-and-operators.md | 2640 ++++++++--------- .../dynamic-data-masking-functions.md | 130 +- .../encrypted-equality-functions.md | 376 +-- .../event-trigger-functions.md | 230 +- .../fault-injection-system-function.md | 40 +- .../functions-and-operators.md | 98 +- .../geometric-functions-and-operators.md | 1896 ++++++------ .../global-syscache-feature-functions.md | 198 +- .../global-temporary-table-functions.md | 264 +- .../functions-and-operators/hash-function.md | 1186 ++++---- .../hll-functions-and-operators.md | 1752 +++++------ .../internal-functions/internal-functions.md | 22 +- .../ledger-database-functions.md | 192 +- .../logical-operators.md | 52 +- ...network-address-functions-and-operators.md | 912 +++--- .../obsolete-functions.md | 36 +- .../other-system-functions.md | 22 +- .../prompt-message-function.md | 42 +- .../range-functions-and-operators.md | 870 +++--- .../set-returning-functions.md | 262 +- .../statistics-information-functions-1.md | 1318 ++++---- .../statistics-information-functions-2.md | 1322 ++++----- .../statistics-information-functions.md | 26 +- .../guc-value-inquiry-functions.md | 49 - .../system-information-functions.md | 31 +- .../advisory-lock-functions.md | 486 +-- .../configuration-settings-functions.md | 132 +- .../database-object-functions.md | 876 +++--- .../logical-replication-functions.md | 1186 ++++---- .../other-functions.md | 1320 ++++----- .../row-store-compression-system-functions.md | 190 +- .../segment-page-storage-functions.md | 226 +- .../server-signal-functions.md | 132 +- .../snapshot-synchronization-functions.md | 48 +- .../system-management-functions.md | 40 +- .../undo-system-functions.md | 176 +- .../universal-file-access-functions.md | 296 +- .../text-search-functions-and-operators.md | 1070 +++---- .../trigger-functions.md | 110 +- .../type-conversion-functions.md | 22 +- .../window-functions.md | 1252 ++++---- .../functions-and-operators/xml-functions.md | 758 ++--- .../guc-parameters/AI-features.md | 164 +- .../guc-parameters/DCF-parameters-settings.md | 15 + .../guc-parameters/HyperLogLog.md | 202 +- .../guc-parameters/alarm-detection.md | 146 +- .../guc-parameters/auditing/audit-switch.md | 2 + .../guc-parameters/auditing/auditing.md | 22 +- .../auditing/operation-audit.md | 399 +-- .../auditing/user-and-permission-audit.md | 191 +- .../guc-parameters/automatic-vacuuming.md | 13 + .../guc-parameters/backend-compression.md | 300 +- .../communication-library-parameters.md | 211 +- .../connection-and-authentication.md | 22 +- .../connection-settings.md | 65 + .../security-and-authentication.md | 34 +- .../connection-pool-parameters.md | 70 +- .../guc-parameters/data-import-export.md | 89 + .../default-settings-of-client-connection.md | 22 +- .../other-default-parameters.md | 108 +- .../guc-parameters/delimiter.md | 18 + .../guc-parameters/developer-options.md | 162 +- .../error-reporting-and-logging.md | 24 +- .../logging-content.md | 23 + .../using-csv-log-output.md | 176 +- .../guc-parameters/fault-tolerance.md | 36 +- .../guc-parameters/flashback.md | 145 +- .../guc-parameters/global-temporary-table.md | 75 +- .../guc-parameters/guc-parameter-list.md | 1448 +++++---- .../guc-parameters/guc-parameter-usage.md | 36 +- .../ha-replication/primary-server.md | 45 +- .../ha-replication/sending-server.md | 100 + .../ha-replication/standby-server.md | 21 +- .../guc-parameters/load-management.md | 20 + .../guc-parameters/lock-management.md | 304 +- .../miscellaneous-parameters.md | 114 +- ...multi-level-cache-management-parameters.md | 69 + .../query-planning/genetic-query-optimizer.md | 212 +- .../optimizer-cost-constants.md | 60 + .../optimizer-method-configuration.md | 748 ++--- .../query-planning/other-optimizer-options.md | 127 +- .../query-planning/query-planning.md | 40 +- .../reference-guide/guc-parameters/query.md | 105 +- .../reference-guide-guc-parameters.md | 8 +- ...on-parameters-of-two-database-instances.md | 34 +- .../guc-parameters/reserved-parameters.md | 58 +- .../asynchronous-io-operations.md | 30 + .../resource-consumption/background-writer.md | 51 +- .../cost-based-vacuum-delay.md | 152 +- .../kernel-resource-usage.md | 82 +- .../resource-consumption/memory.md | 53 +- .../resource-consumption.md | 28 +- .../resource-pooling-parameters.md | 284 ++ .../guc-parameters/scheduled-task.md | 84 +- .../guc-parameters/security-configuration.md | 172 +- .../performance-statistics.md | 70 +- .../query-and-index-statistics-collector.md | 296 +- .../statistics-during-the-database-running.md | 20 +- .../guc-parameters/thread-pool.md | 278 +- .../guc-parameters/upgrade-parameters.md | 69 +- .../compatibility-with-earlier-versions.md | 294 +- .../platform-and-client-compatibility.md | 486 ++- .../version-and-platform-compatibility.md | 20 +- .../guc-parameters/wait-events.md | 46 +- .../write-ahead-log/archiving.md | 39 +- .../write-ahead-log/log-replay.md | 27 +- .../write-ahead-log/settings.md | 45 +- .../write-ahead-log/write-ahead-log.md | 24 +- ...-parameters-supported-by-standby-server.md | 52 +- .../memory/GS_SHARED_MEMORY_DETAIL.md | 21 - .../schema/DBE_PERF/memory/memory-schema.md | 1 - .../reference-guide/sql-reference/alias.md | 106 +- .../appendix/extended-functions.md | 72 +- .../sql-reference/appendix/extended-syntax.md | 104 +- .../gin-indexes/gin-indexes-introduction.md | 32 +- .../appendix/gin-indexes/gin-indexes.md | 24 +- .../gin-indexes/gin-tips-and-tricks.md | 52 +- .../appendix/gin-indexes/implementation.md | 48 +- ...{appendix.md => sql-reference-appendix.md} | 22 +- .../sql-reference/constant-and-macro.md | 48 +- .../sql-reference/controlling-transactions.md | 29 - .../sql-reference/dcl-syntax-overview.md | 100 +- .../sql-reference/ddl-syntax-overview.md | 450 +-- .../sql-reference/dml-syntax-overview.md | 102 +- .../expressions/array-expressions.md | 188 +- .../expressions/row-expressions.md | 56 +- .../expressions/subquery-expressions.md | 292 +- .../additional-features.md | 24 +- .../gathering-document-statistics.md | 100 +- .../manipulating-queries.md | 104 +- .../manipulating-tsvector.md | 62 +- .../additional-features/rewriting-queries.md | 136 +- .../controlling-text-search.md | 24 +- .../highlighting-results.md | 132 +- .../parsing-queries.md | 148 +- .../ranking-search-results.md | 216 +- .../dictionaries/dictionaries-overview.md | 72 +- .../dictionaries/dictionaries.md | 30 +- .../dictionaries/ispell-dictionary.md | 98 +- .../dictionaries/simple-dictionary.md | 128 +- .../dictionaries/snowball-dictionary.md | 28 +- .../dictionaries/stop-words.md | 66 +- .../dictionaries/thesaurus-dictionary.md | 184 +- .../full-text-search/full-text-search.md | 34 +- .../introduction/basic-text-matching.md | 112 +- .../introduction/configurations.md | 42 +- .../full-text-search-introduction.md | 24 +- .../introduction/what-is-a-document.md | 66 +- .../full-text-search/limitations.md | 32 +- .../sql-reference/full-text-search/parser.md | 216 +- .../constraints-on-index-use.md | 88 +- .../tables-and-indexes/creating-an-index.md | 130 +- .../tables-and-indexes/searching-a-table.md | 178 +- .../tables-and-indexes/tables-and-indexes.md | 22 +- .../testing-a-configuration.md | 130 +- .../testing-a-dictionary.md | 58 +- .../testing-a-parser.md | 138 +- .../testing-and-debugging-text-search.md | 22 +- .../sql-reference/mogdb-sql.md | 74 +- .../sql-reference/ordinary-table.md | 102 +- .../sql-reference-anonymous-block.md | 150 +- .../sql-reference/sql-reference-contraints.md | 302 +- .../sql-reference/sql-reference-index.md | 316 +- .../sql-reference/sql-reference-llvm.md | 178 +- .../sql-reference/sql-reference-lock.md | 150 +- .../sql-reference/sql-reference-trigger.md | 314 +- .../sql-reference/sql-reference.md | 3 +- .../sql-reference/system-operation.md | 76 +- .../transaction/sql-reference-transaction.md | 23 +- .../transaction/transaction-auto-commit.md | 146 + .../transaction/transaction-control.md | 54 +- .../sql-reference/type-base-value.md | 164 +- .../type-conversion/functions.md | 198 +- .../type-conversion-overview.md | 104 +- .../type-conversion/type-conversion.md | 26 +- .../union-case-and-related-constructs.md | 408 +-- .../type-conversion/value-storage.md | 72 +- .../sql-syntax/ALTER-EVENT-TRIGGER.md | 110 +- .../sql-syntax/ALTER-FOREIGN-DATA-WRAPPER.md | 130 +- .../sql-syntax/ALTER-OPERATOR.md | 116 +- .../sql-syntax/CREATE-EVENT.md | 202 +- .../sql-syntax/CREATE-FOREIGN-DATA-WRAPPER.md | 102 +- .../sql-syntax/CREATE-GROUP.md | 2 +- .../sql-syntax/CREATE-RESOURCE-POOL.md | 304 +- .../reference-guide/sql-syntax/CREATE-ROLE.md | 2 +- .../sql-syntax/CREATE-SERVER.md | 256 +- .../reference-guide/sql-syntax/CREATE-USER.md | 254 +- .../reference-guide/sql-syntax/DELIMITER.md | 114 +- .../sql-syntax/DROP-EVENT-TRIGGER.md | 92 +- .../reference-guide/sql-syntax/DROP-EVENT.md | 74 +- .../sql-syntax/DROP-FOREIGN-DATA-WRAPPER.md | 80 +- .../sql-syntax/DROP-RESOURCE-POOL.md | 86 +- .../reference-guide/sql-syntax/SHOW-EVENTS.md | 84 +- .../supported-data-types/HLL.md | 414 +-- .../supported-data-types/bit-string-types.md | 100 +- .../data-type-used-by-the-ledger-database.md | 62 +- ...-types-supported-by-column-store-tables.md | 464 +-- .../supported-data-types/date-time-types.md | 646 ++-- .../supported-data-types/geometric.md | 236 +- .../supported-data-types/monetary.md | 68 +- .../supported-data-types/network-address.md | 134 +- .../object-identifier-types.md | 180 +- .../supported-data-types/pseudo-types.md | 130 +- .../supported-data-types.md | 60 +- .../supported-data-types/text-search-types.md | 336 +-- .../supported-data-types/uuid-type.md | 54 +- .../supported-data-types/xml-type.md | 136 +- ...iew-of-system-catalogs-and-system-views.md | 34 +- .../system-catalogs-and-system-views.md | 28 +- .../system-catalogs/GS_TXN_SNAPSHOT.md | 36 +- .../system-catalogs/PG_EVENT_TRIGGER.md | 40 +- .../system-catalogs/PG_SUBSCRIPTION_REL.md | 42 +- .../system-catalogs/PG_TYPE.md | 92 +- .../system-catalogs/STATEMENT_HISTORY.md | 4 - .../system-catalogs/system-catalogs.md | 238 +- .../system-views/GS_SHARED_MEMORY_DETAIL.md | 21 + .../system-views/IOS_STATUS.md | 27 + .../system-views/PATCH_INFORMATION_TABLE.md | 20 + .../system-views/system-views.md | 257 +- .../viewing-system-catalogs.md | 262 +- .../client-tool/gsql/client-tool-gsql.md | 3 +- .../client-tool/gsql/gsql-release-notes.md | 14 + .../tool-reference/server-tools/gs_check.md | 10 +- .../2-managing-users-and-their-permissions.md | 2064 ++++++------- .../security/3-configuring-database-audit.md | 744 ++--- .../security/database-security-management.md | 28 +- product/en/docs-mogdb/v5.0/toc.md | 40 +- product/en/docs-mogdb/v5.0/toc_about.md | 5 +- .../v5.0/toc_characteristic_description.md | 20 + .../docs-mogdb/v5.0/toc_datatypes-and-sql.md | 4 +- product/en/docs-mogdb/v5.0/toc_dev.md | 4 + .../v5.0/toc_parameters-and-tools.md | 6 +- product/en/docs-mogdb/v5.0/toc_performance.md | 1 - .../v5.0/toc_system-catalogs-and-functions.md | 3 + .../upgrade-guide/2-read-before-upgrade.md | 6 +- 428 files changed, 36001 insertions(+), 31476 deletions(-) delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-1-x-tuner-overview.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-2-preparations.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-3-examples.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-4-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-5-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-6-Troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-1-single-query-index-recommendation.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-2-virtual-index.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-3-workload-level-index-recommendation.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-1-overview.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-2-environment-deployment.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-3-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-4-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-5-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-6-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-1-overview.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-2-environment-deployment.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-3-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-4-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-5-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-6-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-1-overview.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-2-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-3-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-4-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-5-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-1-overview.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-2-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-3-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-4-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-5-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-1-overview.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-2-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-3-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-4-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-5-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/dbmind-mode/1-service.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/dbmind-mode/2-component.md delete mode 100644 product/en/docs-mogdb/v5.0/AI-features/ai4db/dbmind-mode/3-set.md create mode 100644 product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/5.0.6.md create mode 100644 product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/5.0.7.md create mode 100644 product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/5.0.8.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/compatibility/authid-current-user.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/compatibility/insert-on-conflict.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/compatibility/mod-function-float-to-int.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/compatibility/modify-table-log-property.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/compatibility/nesting-of-aggregate-functions.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/compatibility/order-by-group-by-scenario-expansion.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/compatibility/pivot-and-unpivot.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/compatibility/stored-procedure-out-parameters-in-pbe-mode.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/import-export-specific-objects.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/index-support-fuzzy-matching.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/pruning-order-by-in-subqueries.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/scroll-cursor.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/support-for-pruning-subquery-projection-columns.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/high-availability/cm-dual-network-segment-deployment.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/high-availability/enhanced-efficiency-of-logical-backup-and-restore.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/high-performance/enhancement-of-dirty-pages-flushing-performance.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/high-performance/seqscan-prefetch.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/high-performance/ustore-smp.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/maintainability/autonomous-transaction-management.md create mode 100644 product/en/docs-mogdb/v5.0/characteristic-description/maintainability/corrupt-files-handling.md create mode 100644 product/en/docs-mogdb/v5.0/developer-guide/dev/3-development-based-on-odbc/odbc-release-notes.md create mode 100644 product/en/docs-mogdb/v5.0/developer-guide/dev/4-development-based-on-libpq/libpq-release-notes.md create mode 100644 product/en/docs-mogdb/v5.0/developer-guide/logical-replication/logical-decoding/logical-decoding-support-for-DDL.md rename product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/{dolphin-use-db_name.md => dolphin-use-db-name.md} (68%) delete mode 100644 product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/sql-tuning-optimizer.md delete mode 100644 product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-information-functions/guc-value-inquiry-functions.md create mode 100644 product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/data-import-export.md create mode 100644 product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/delimiter.md create mode 100644 product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/multi-level-cache-management-parameters.md create mode 100644 product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-pooling-parameters.md delete mode 100644 product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/memory/GS_SHARED_MEMORY_DETAIL.md rename product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/{appendix.md => sql-reference-appendix.md} (95%) delete mode 100644 product/en/docs-mogdb/v5.0/reference-guide/sql-reference/controlling-transactions.md create mode 100644 product/en/docs-mogdb/v5.0/reference-guide/sql-reference/transaction/transaction-auto-commit.md create mode 100644 product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/GS_SHARED_MEMORY_DETAIL.md create mode 100644 product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/IOS_STATUS.md create mode 100644 product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/PATCH_INFORMATION_TABLE.md create mode 100644 product/en/docs-mogdb/v5.0/reference-guide/tool-reference/client-tool/gsql/gsql-release-notes.md diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-1-x-tuner-overview.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-1-x-tuner-overview.md deleted file mode 100644 index 17d57b3f..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-1-x-tuner-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2021-05-19 ---- - -# Overview - -X-Tuner is a parameter tuning tool integrated into databases. It uses AI technologies such as deep reinforcement learning and global search algorithm to obtain the optimal database parameter settings without manual intervention. This function is not necessarily deployed with the database environment. It can be independently deployed and run without the database installation environment. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-2-preparations.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-2-preparations.md deleted file mode 100644 index dfd50971..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-2-preparations.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Preparations -summary: Preparations -author: Guo Huan -date: 2021-10-21 ---- - -# Preparations - -
- -## Prerequisites and Precautions - -- The database status is normal; the client can be properly connected; and data can be imported to the database. As a result, the optimization program can perform the benchmark test for optimization effect. -- To use this tool, you need to specify the user who logs in to the database. The user who logs in to the database must have sufficient permissions to obtain sufficient database status information. -- If you log in to the database host as a Linux user, add **$GAUSSHOME/bin** to the **PATH** environment variable so that you can directly run database O&M tools, such as gsql, gs_guc, and gs_ctl. -- The recommended Python version is Python 3.6 or later. The required dependency has been installed in the operating environment, and the optimization program can be started properly. You can install a Python 3.6+ environment independently without setting it as a global environment variable. You are not advised to install the tool as the root user. If you install the tool as the root user and run the tool as another user, ensure that you have the read permission on the configuration file. -- This tool can run in three modes. In **tune** and **train** modes, you need to configure the benchmark running environment and import data. This tool will iteratively run the benchmark to check whether the performance is improved after the parameters are modified. -- In **recommend** mode, you are advised to run the command when the database is executing the workload to obtain more accurate real-time workload information. -- By default, this tool provides benchmark running script samples of TPC-C, TPC-H, TPC-DS, and sysbench. If you use the benchmarks to perform pressure tests on the database system, you can modify or configure the preceding configuration files. To adapt to your own service scenarios, you need to compile the script file that drives your customized benchmark based on the **template.py** file in the **benchmark** directory. - -
- -## Principles - -The tuning program is a tool independent of the database kernel. The usernames and passwords for the database and instances are required to control the benchmark performance test of the database. Before starting the tuning program, ensure that the interaction in the test environment is normal, the benchmark test script can be run properly, and the database can be connected properly. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If the parameters to be tuned include the parameters that take effect only after the database is restarted, the database will be restarted multiple times during the tuning. Exercise caution when using **train** and **tune** modes if the database is running jobs. - -X-Tuner can run in any of the following modes: - -- **recommend**: Log in to the database using the specified user name, obtain the feature information about the running workload, and generate a parameter recommendation report based on the feature information. Report improper parameter settings and potential risks in the current database. Output the currently running workload behavior and characteristics. Output the recommended parameter settings. In this mode, the database does not need to be restarted. In other modes, the database may need to be restarted repeatedly. -- **train**: Modify parameters and execute the benchmark based on the benchmark information provided by users. The reinforcement learning model is trained through repeated iteration so that you can load the model in **tune** mode for optimization. -- **tune**: Use an optimization algorithm to tune database parameters. Currently, two types of algorithms are supported: deep reinforcement learning and global search algorithm (global optimization algorithm). The deep reinforcement learning mode requires **train** mode to generate the optimized model after training. However, the global search algorithm does not need to be trained in advance and can be directly used for search and optimization. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** If the deep reinforcement learning algorithm is used in **tune** mode, a trained model must be available, and the parameters for training the model must be the same as those in the parameter list (including max and min) for tuning. - -**Figure 1** X-Tuner structure - -![x-tuner-structure](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/preparations-1.png) - -Figure 1 X-Tuner architecture shows the overall architecture of the X-Tuner. The X-Tuner system can be divided into the following parts: - -- DB: The DB_Agent module is used to abstract database instances. It can be used to obtain the internal database status information and current database parameters and set database parameters. The SSH connection used for logging in to the database environment is included on the database side. -- Algorithm: algorithm package used for optimization, including global search algorithms (such as Bayesian optimization and particle swarm optimization) and deep reinforcement learning (such as DDPG). -- X-Tuner main logic module: encapsulated by the environment module. Each step is an optimization process. The entire optimization process is iterated through multiple steps. -- benchmark: a user-specified benchmark performance test script, which is used to run benchmark jobs. The benchmark result reflects the performance of the database system. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Ensure that the larger the benchmark script score is, the better the performance is. For example, for the benchmark used to measure the overall execution duration of SQL statements, such as TPC-H, the inverse value of the overall execution duration can be used as the benchmark score. - -
- -## Installing and Running X-Tuner - -You can run the X-Tuner in two ways. One is to run the X-Tuner directly through the source code. The other is to install the X-Tuner on the system through the Python setuptools, and then run the **gs_xtuner** command to call the X-Tuner. The following describes two methods of running the X-Tuner. - -Method 1: Run the source code directly. - -1. Switch to the **xtuner** source code directory. For the openGauss community code, the path is **openGauss-server/src/gausskernel/dbmind/tools/xtuner**. For an installed database system, the source code path is *$GAUSSHOME***/bin/dbmind/xtuner**. - -2. You can view the **requirements.txt** file in the current directory. Use the pip package management tool to install the dependency based on the **requirements.txt** file. - - ```bash - pip install -r requirements.txt - ``` - -3. After the installation is successful, add the environment variable PYTHONPATH, and then run **main.py**. For example, to obtain the help information, run the following command: - - ```bash - cd tuner # Switch to the directory where the main.py entry file is located. - export PYTHONPATH='..' # Add the upper-level directory to the path for searching for packages. - python main.py --help # Obtain help information. The methods of using other functions are similar. - ``` - -Method 2: Install the X-Tuner in the system. - -1. You can use the **setup.py** file to install the X-Tuner to the system and then run the **gs_xtuner** command. You need to switch to the root directory of **xtuner**. For details about the directory location, see the preceding description. - -2. Run the following command to install the tool in the Python environment using Python setuptools: - - ```bash - python setup.py install - ``` - - If the **bin** directory of Python is added to the *PATH* environment variable, the **gs_xtuner** command can be directly called anywhere. - -3. For example, to obtain the help information, run the following command: - - ```bash - gs_xtuner --help - ``` - -
- -## Description of the X-Tuner Configuration File - -Before running the X-Tuner, you need to load the configuration file. The default path of the configuration file is tuner/xtuner.conf. You can run the **gs_xtuner -help** command to view the absolute path of the configuration file that is loaded by default. - -``` -... - -x TUNER_CONFIG_FILE, --tuner-config-file TUNER_CONFIG_FILE - This is the path of the core configuration file of the - X-Tuner. You can specify the path of the new - configuration file. The default path is /path/to/xtuner/xtuner.conf. - You can modify the configuration file to control the - tuning process. -... -``` - -You can modify the configuration items in the configuration file as required to instruct the X-Tuner to perform different actions. For details about the configuration items in the configuration file, see Table 2 in [Command Reference](1-5-command-reference.md). If you need to change the loading path of the configuration file, you can specify the path through the **-x** command line option. - -
- -## Benchmark Selection and Configuration - -The benchmark drive script is stored in the benchmark subdirectory of the X-Tuner. X-Tuner provides common benchmark driver scripts, such as TPC-C and TPC-H. The X-Tuner invokes the **get_benchmark_instance()** command in the benchmark/__init__.py file to load different benchmark driver scripts and obtain benchmark driver instances. The format of the benchmark driver script is described as follows: - -- Name of the driver script: name of the benchmark. The name is used to uniquely identify the driver script. You can specify the benchmark driver script to be loaded by setting the **benchmark_script** configuration item in the configuration file of the X-Tuner. -- The driver script contains the *path* variable, *cmd* variable, and the **run** function. - -The following describes the three elements of the driver script: - -1. *path*: path for saving the benchmark script. You can modify the path in the driver script or specify the path by setting the **benchmark_path** configuration item in the configuration file. - -2. *cmd*: command for executing the benchmark script. You can modify the command in the driver script or specify the command by setting the **benchmark_cmd** configuration item in the configuration file. Placeholders can be used in the text of cmd to obtain necessary information for running cmd commands. For details, see the TPC-H driver script example. These placeholders include: - - - {host}: IP address of the database host machine - - {port}: listening port number of the database instance - - {user}: user name for logging in to the database - - {password}: password of the user who logs in to the database system - - {db}: name of the database that is being optimized - -3. **run** function: The signature of this function is as follows: - - ``` - def run(remote_server, local_host) -> float: - ``` - - The returned data type is float, indicating the evaluation score after the benchmark is executed. A larger value indicates better performance. For example, the TPC-C test result tpmC can be used as the returned value, the inverse number of the total execution time of all SQL statements in TPC-H can also be used as the return value. A larger return value indicates better performance. - - The *remote_server* variable is the shell command interface transferred by the X-Tuner program to the remote host (database host machine) used by the script. The *local_host* variable is the shell command interface of the local host (host where the X-Tuner script is executed) transferred by the X-Tuner program. Methods provided by the preceding shell command interface include: - - ``` - exec_command_sync(command, timeout) - Function: This method is used to run the shell command on the host. - Parameter list: - command: The data type can be str, and the element can be a list or tuple of the str type. This parameter is optional. - timeout: The timeout interval for command execution in seconds. This parameter is optional. - Return value: - Returns 2-tuple (stdout and stderr). stdout indicates the standard output stream result, and stderr indicates the standard error stream result. The data type is str. - ``` - - ``` - exit_status - Function: This attribute indicates the exit status code after the latest shell command is executed. - Note: Generally, if the exit status code is 0, the execution is normal. If the exit status code is not 0, an error occurs. - ``` - -Benchmark driver script example: - -1. TPC-C driver script - - ```bash - from tuner.exceptions import ExecutionError - - # WARN: You need to download the benchmark-sql test tool to the system, - # replace the PostgreSQL JDBC driver with the openGauss driver, - # and configure the benchmark-sql configuration file. - # The program starts the test by running the following command: - path = '/path/to/benchmarksql/run' # Path for storing the TPC-C test script benchmark-sql - cmd = "./runBenchmark.sh props.gs" # Customize a benchmark-sql test configuration file named props.gs. - - def run(remote_server, local_host): - # Switch to the TPC-C script directory, clear historical error logs, and run the test command. - # You are advised to wait for several seconds because the benchmark-sql test script generates the final test report through a shell script. The entire process may be delayed. - # To ensure that the final tpmC value report can be obtained, wait for 3 seconds. - stdout, stderr = remote_server.exec_command_sync(['cd %s' % path, 'rm -rf benchmarksql-error.log', cmd, 'sleep 3']) - # If there is data in the standard error stream, an exception is reported and the system exits abnormally. - if len(stderr) > 0: - raise ExecutionError(stderr) - - # Find the final tpmC result. - tpmC = None - split_string = stdout.split() # Split the standard output stream result. - for i, st in enumerate(split_string): - # In the benchmark-sql of version 5.0, the value of tpmC is the last two digits of the keyword (NewOrders). In normal cases, the value of tpmC is returned after the keyword is found. - if "(NewOrders)" in st: - tpmC = split_string[i + 2] - break - stdout, stderr = remote_server.exec_command_sync( - "cat %s/benchmarksql-error.log" % path) - nb_err = stdout.count("ERROR:") # Check whether errors occur during the benchmark running and record the number of errors. - return float(tpmC) - 10 * nb_err # The number of errors is used as a penalty item, and the penalty coefficient is 10. A higher penalty coefficient indicates a larger number of errors. - - ``` - -2. TPC-H driver script - - ```bash - import time - - from tuner.exceptions import ExecutionError - - # WARN: You need to import data into the database and SQL statements in the following path will be executed. - # The program automatically collects the total execution duration of these SQL statements. - path = '/path/to/tpch/queries' # Directory for storing SQL scripts used for the TPC-H test - cmd = "gsql -U {user} -W {password} -d {db} -p {port} -f {file}" # The command for running the TPC-H test script. Generally, gsql -f script file is used. - - def run(remote_server, local_host): - # Traverse all test case file names in the current directory. - find_file_cmd = "find . -type f -name '*.sql'" - stdout, stderr = remote_server.exec_command_sync(['cd %s' % path, find_file_cmd]) - if len(stderr) > 0: - raise ExecutionError(stderr) - files = stdout.strip().split('\n') - time_start = time.time() - for file in files: - # Replace {file} with the file variable and run the command. - perform_cmd = cmd.format(file=file) - stdout, stderr = remote_server.exec_command_sync(['cd %s' % path, perform_cmd]) - if len(stderr) > 0: - print(stderr) - # The cost is the total execution duration of all test cases. - cost = time.time() - time_start - # Use the inverse number to adapt to the definition of the run function. The larger the returned result is, the better the performance is. - return - cost - ``` diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-3-examples.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-3-examples.md deleted file mode 100644 index eb207db5..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-3-examples.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: Examples -summary: Examples -author: Guo Huan -date: 2021-05-19 ---- - -# Examples - -X-Tuner supports three modes: recommend mode for obtaining parameter diagnosis reports, train mode for training reinforcement learning models, and tune mode for using optimization algorithms. The preceding three modes are distinguished by command line parameters, and the details are specified in the configuration file. - -## Configuring the Database Connection Information - -Configuration items for connecting to a database in the three modes are the same. You can enter the detailed connection information in the command line or in the JSON configuration file. Both methods are described as follows: - -1. Entering the connection information in the command line - - Input the following options: **-db-name -db-user -port -host -host-user**. The **-host-ssh-port** is optional. The following is an example: - - ``` - gs_xtuner recommend --db-name postgres --db-user omm --port 5678 --host 192.168.1.100 --host-user omm - ``` - -2. Entering the connection information in the JSON configuration file - - Assume that the file name is **connection.json**. The following is an example of the JSON configuration file: - - ``` - { - "db_name": "postgres", # Database name - "db_user": "dba", # Username for logging in to the database - "host": "127.0.0.1", # IP address of the database host - "host_user": "dba", # Username for logging in to the database host - "port": 5432, # Listening port number of the database - "ssh_port": 22 # SSH listening port number of the database host - } - ``` - - Input **-f connection.json**. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** To prevent password leakage, the configuration file and command line parameters do not contain password information by default. After you enter the preceding connection information, the program prompts you to enter the database password and the OS login password in interactive mode. - -## Example of Using recommend Mode - -The configuration item **scenario** takes effect for recommend mode. If the value is **auto**, the workload type is automatically detected. - -Run the following command to obtain the diagnosis result: - -``` - -gs_xtuner recommend -f connection.json - -``` - -The diagnosis report is generated as follows: - -**Figure 1** Report generated in recommend mode - -![report-generated-in-recommend-mode](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/examples-1.png) - -In the preceding report, the database parameter configuration in the environment is recommended, and a risk warning is provided. The report also generates the current workload features. The following features are for reference: - -- **temp_file_size**: number of generated temporary files. If the value is greater than 0, the system uses temporary files. If too many temporary files are used, the performance is poor. If possible, increase the value of **work_mem**. -- **cache_hit_rate**: cache hit ratio of **shared_buffer**, indicating the cache efficiency of the current workload. -- **read_write_ratio**: read/write ratio of database jobs. -- **search_modify_ratio**: ratio of data query to data modification of a database job. -- **ap_index**: AP index of the current workload. The value ranges from 0 to 10. A larger value indicates a higher preference for data analysis and retrieval. -- **workload_type**: workload type, which can be AP, TP, or HTAP based on database statistics. -- **checkpoint_avg_sync_time**: average duration for refreshing data to the disk each time when the database is at the checkpoint, in milliseconds. -- **load_average**: average load of each CPU core in 1 minute, 5 minutes, and 15 minutes. Generally, if the value is about 1, the current hardware matches the workload. If the value is about 3, the current workload is heavy. If the value is greater than 5, the current workload is too heavy. In this case, you are advised to reduce the load or upgrade the hardware. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Some system catalogs keep recording statistics, which may affect load feature identification. Therefore, you are advised to clear the statistics of some system catalogs, run the workload for a period of time, and then use recommend mode for diagnosis to obtain more accurate results. To clear the statistics, run the following command: -> -> ```sql -> select pg_stat_reset_shared('bgwriter'); -> select pg_stat_reset(); -> ``` -> ->- In recommend mode, information in the **pg\_stat\_database** and **pg\_stat\_bgwriter** system catalogs in the database is read. Therefore, the database login user must have sufficient permissions. (You are advised to own the administrator permission which can be granted to *username* by running **alter user username sysadmin**.) - -## Example of Using train Mode - -This mode is used to train the deep reinforcement learning model. The configuration items related to this mode are as follows: - -- **rl_algorithm**: algorithm used to train the reinforcement learning model. Currently, this parameter can be set to **ddpg**. - -- **rl_model_path**: path for storing the reinforcement learning model generated after training. - -- **rl_steps**: maximum number of training steps in the training process. - -- **max_episode_steps**: maximum number of steps in each episode. - -- **scenario**: specifies the workload type. If the value is **auto**, the system automatically determines the workload type. The recommended parameter tuning list varies according to the mode. - -- **tuning_list**: specifies the parameters to be tuned. If this parameter is not specified, the list of parameters to be tuned is automatically recommended based on the workload type. If this parameter is specified, **tuning_list**indicates the path of the tuning list file. The following is an example of the content of a tuning list configuration file. - - ``` - { - "work_mem": { - "default": 65536, - "min": 65536, - "max": 655360, - "type": "int", - "restart": false - }, - "shared_buffers": { - "default": 32000, - "min": 16000, - "max": 64000, - "type": "int", - "restart": true - }, - "random_page_cost": { - "default": 4.0, - "min": 1.0, - "max": 4.0, - "type": "float", - "restart": false - }, - "enable_nestloop": { - "default": true, - "type": "bool", - "restart": false - } - } - ``` - -After the preceding configuration items are configured, run the following command to start the training: - -``` - -gs_xtuner train -f connection.json - -``` - -After the training is complete, a model file is generated in the directory specified by the **rl_model_path**configuration item. - -## Example of Using tune Mode - -The tune mode supports a plurality of algorithms, including a DDPG algorithm based on reinforcement learning (RL), and a Bayesian optimization algorithm and a particle swarm algorithm (PSO) which are both based on a global optimization algorithm (GOP). - -The configuration items related to tune mode are as follows: - -- **tune_strategy**: specifies the algorithm to be used for optimization. The value can be **rl**(using the reinforcement learning model), **gop**(using the global optimization algorithm), or **auto**(automatic selection). If this parameter is set to **rl**, RL-related configuration items take effect. In addition to the preceding configuration items that take effect in train mode, the **test_episode**configuration item also takes effect. This configuration item indicates the maximum number of episodes in the tuning process. This parameter directly affects the execution time of the tuning process. Generally, a larger value indicates longer time consumption. -- **gop_algorithm**: specifies a global optimization algorithm. The value can be **bayes** or **pso**. -- **max_iterations**: specifies the maximum number of iterations. A larger value indicates a longer search time and better search effect. -- **particle_nums**: specifies the number of particles. This parameter is valid only for the PSO algorithm. -- For details about **scenario** and **tuning_list**, see the description of train mode. - -After the preceding items are configured, run the following command to start tuning: - -``` - -gs_xtuner tune -f connection.json - -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** Before using tune and train modes, you need to import the data required by the benchmark, check whether the benchmark can run properly, and back up the current database parameters. To query the current database parameters, run the following command: select name, setting from pg_settings; diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-4-obtaining-help-information.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-4-obtaining-help-information.md deleted file mode 100644 index 12f32ef2..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-4-obtaining-help-information.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2021-05-19 ---- - -# Obtaining Help Information - -Before starting the tuning program, run the following command to obtain help information: - -```bash -python main.py --help -``` - -The command output is as follows: - -```bash -usage: main.py [-h] [-m {train,tune}] [-f CONFIG_FILE] [--db-name DB_NAME] -[--db-user DB_USER] [--port PORT] [--host HOST] -[--host-user HOST_USER] [--host-ssh-port HOST_SSH_PORT] -[--scenario {ap,tp,htap}] [--benchmark BENCHMARK] -[--model-path MODEL_PATH] [-v] - -X-Tuner: a self-tuning toolkit for MogD. - -optional arguments: --h, --help show this help message and exit --m {train,tune}, --mode {train,tune} -train a reinforcement learning model or tune by your -trained model. --f CONFIG_FILE, --config-file CONFIG_FILE -you can pass a config file path or you should manually -set database information. ---db-name DB_NAME database name. ---db-user DB_USER database user name. ---port PORT database connection port. ---host HOST where did your database install on? ---host-user HOST_USER -user name of the host where your database installed -on. ---host-ssh-port HOST_SSH_PORT -host ssh port. ---scenario {ap,tp,htap} ---benchmark BENCHMARK ---model-path MODEL_PATH -the place where you want to save model weights to or -load model weights from. --v, --version -show version. -``` diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-5-command-reference.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-5-command-reference.md deleted file mode 100644 index b91dcea4..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-5-command-reference.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2021-05-19 ---- - -# Command Reference - -**Table 1** Command-line Parameter - -| Parameter | Description | Value Range | -| :--------------------- | :----------------------------------------------------------- | :--------------------- | -| mode | Specifies the running mode of the tuning program. | train, tune, recommend | -| -tuner-config-file, -x | Path of the core parameter configuration file of X-Tuner. The default path is **xtuner.conf** under the installation directory. | - | -| -db-config-file, -f | Path of the connection information configuration file used by the optimization program to log in to the database host. If the database connection information is configured in this file, the following database connection information can be omitted. | - | -| -db-name | Specifies the name of a database to be tuned. | - | -| -db-user | Specifies the user account used to log in to the tuned database. | - | -| -port | Specifies the database listening port. | - | -| -host | Specifies the host IP address of the database instance. | - | -| -host-user | Specifies the username for logging in to the host where the database instance is located. The database O&M tools, such as **gsql** and **gs_ctl**, can be found in the environment variables of the username. | - | -| -host-ssh-port | Specifies the SSH port number of the host where the database instance is located. This parameter is optional. The default value is **22**. | - | -| -help, -h | Returns the help information. | - | -| -version, -v | Returns the current tool version. | - | - -**Table 2** Parameters in the configuration file - -| Parameter | Description | Value Range | -| :-------------------- | :----------------- | :------------------- | -| logfile | Path for storing generated logs. | - | -| output_tuning_result | (Optional) Specifies the path for saving the tuning result. | - | -| verbose | Whether to print details. | on, off | -| recorder_file | Path for storing logs that record intermediate tuning information. | - | -| tune_strategy | Specifies a strategy used in tune mode. | rl, gop, auto | -| drop_cache | Whether to perform drop cache in each iteration. Drop cache can make the benchmark score more stable. If this parameter is enabled, add the login system user to the **/etc/sudoers** list and grant the NOPASSWD permission to the user. (You are advised to enable the NOPASSWD permission temporarily and disable it after the tuning is complete.) | on, off | -| used_mem_penalty_term | Penalty coefficient of the total memory used by the database. This parameter is used to prevent performance deterioration caused by unlimited memory usage. The greater the value is, the greater the penalty is. | Recommended value: 0 ~ 1 | -| rl_algorithm | Specifies the RL algorithm. | ddpg | -| rl_model_path | Path for saving or reading the RL model, including the save directory name and file name prefix. In train mode, this path is used to save the model. In tune mode, this path is used to read the model file. | - | -| rl_steps | Number of training steps of the deep reinforcement learning algorithm | - | -| max_episode_steps | Maximum number of training steps in each episode | - | -| test_episode | Number of episodes when the RL algorithm is used for optimization | - | -| gop_algorithm | Specifies a global optimization algorithm. | bayes, pso, auto | -| max_iterations | Maximum number of iterations of the global search algorithm. (The value is not fixed. Multiple iterations may be performed based on the actual requirements.) | - | -| particle_nums | Number of particles when the PSO algorithm is used | - | -| benchmark_script | Benchmark driver script. This parameter specifies the file with the same name in the benchmark path to be loaded. Typical benchmarks, such as TPC-C and TPC-H, are supported by default. | tpcc, tpch, tpcds, sysbench … | -| benchmark_path | Path for saving the benchmark script. If this parameter is not configured, the configuration in the benchmark drive script is used. | - | -| benchmark_cmd | Command for starting the benchmark script. If this parameter is not configured, the configuration in the benchmark drive script is used. | - | -| benchmark_period | This parameter is valid only for **period benchmark**. It indicates the test period of the entire benchmark. The unit is second. | - | -| scenario | Type of the workload specified by the user. | tp, ap, htap | -| tuning_list | List of parameters to be tuned. For details, see the **share/knobs.json.template** file. | - | diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-6-Troubleshooting.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-6-Troubleshooting.md deleted file mode 100644 index df96e0c7..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-6-Troubleshooting.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2021-05-19 ---- - -# Troubleshooting - -- Failure of connection to the database instance: Check whether the database instance is faulty or the security permissions of configuration items in the **pg_hba.conf** file are incorrectly configured. -- Restart failure: Check the health status of the database instance and ensure that the database instance is running properly. -- Dependency installation failure: Upgrade the pip package management tool by running the **python -m pip install -upgrade pip** command. -- Poor performance of TPC-C jobs: In high-concurrency scenarios such as TPC-C, a large amount of data is modified during pressure tests. Each test is not idempotent, for example, the data volume in the TPC-C database increases, invalid tuples are not cleared using VACUUM FULL, checkpoints are not triggered in the database, and drop cache is not performed. Therefore, it is recommended that the benchmark data that is written with a large amount of data, such as TPC-C, be imported again at intervals (depending on the number of concurrent tasks and execution duration). A simple method is to back up the $PGDATA directory. -- When the TPC-C job is running, the TPC-C driver script reports the error "TypeError: float() argument must be a string or a number, not 'NoneType'" (**none** cannot be converted to the float type). This is because the TPC-C pressure test result is not obtained. There are many causes for this problem, manually check whether TPC-C can be successfully executed and whether the returned result can be obtained. If the preceding problem does not occur, you are advised to set the delay time of the **sleep** command in the command list in the TPC-C driver script to a larger value. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-1-single-query-index-recommendation.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-1-single-query-index-recommendation.md deleted file mode 100644 index ea47de36..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-1-single-query-index-recommendation.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Single-query Index Recommendation -summary: Single-query Index Recommendation -author: Guo Huan -date: 2021-05-19 ---- - -# Single-query Index Recommendation - -The single-query index recommendation function allows users to directly perform operations in the database. This function generates recommended indexes for a single query statement entered by users based on the semantic information of the query statement and the statistics of the database. This function involves the following interfaces: - -**Table 1** Single-query index recommendation interfaces - -| Function Name | Parameter | Description | -| :-------------- | :------------------- | :----------------------------------------------------------- | -| gs_index_advise | SQL statement string | Generates a recommendation index for a single query statement. | - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - This function supports only a single SELECT statement and does not support other types of SQL statements. -> - Partitioned tables, column-store tables, segment-paged tables, common views, materialized views, global temporary tables, and encrypted databases are not supported. - -## Application Scenarios - -Use the preceding function to obtain the recommendation index generated for the query. The recommendation result consists of the table name and column name of the index. - -For example: - -```sql -mogdb=> select "table", "column" from gs_index_advise('SELECT c_discount from bmsql_customer where c_w_id = 10'); - table | column -----------------+---------- - bmsql_customer | (c_w_id) -(1 row) -``` - -The preceding information indicates that an index should be created on the **c_w_id** column of the **bmsql_customer** table. You can run the following SQL statement to create an index: - -```sql -CREATE INDEX idx on bmsql_customer(c_w_id); -``` - -Some SQL statements may also be recommended to create a join index, for example: - -```sql -mogdb=# select "table", "column" from gs_index_advise('select name, age, sex from t1 where age >= 18 and age < 35 and sex = ''f'';'); - table | column --------+------------ - t1 | (age, sex) -(1 row) -``` - -The preceding statement indicates that a join index **(age, sex)** needs to be created in the **t1** table. You can run the following command to create a join index: - -```sql -CREATE INDEX idx1 on t1(age, sex); -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Parameters of the system function **gs_index_advise()** are of the text type. If the parameters contain special characters such as single quotation marks ('), you can use single quotation marks (') to escape the special characters. For details, see the preceding example. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-2-virtual-index.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-2-virtual-index.md deleted file mode 100644 index 4d6cd416..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-2-virtual-index.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Virtual Index -summary: Virtual Index -author: Guo Huan -date: 2021-05-19 ---- - -# Virtual Index - -The virtual index function allows users to directly perform operations in the database. This function simulates the creation of a real index to avoid the time and space overhead required for creating a real index. Based on the virtual index, users can evaluate the impact of the index on the specified query statement by using the optimizer. - -This function involves the following interfaces: - -**Table 1** Virtual index function interfaces - -| Function Name | Parameter | Description | -| :------------------- | :------------------------------------------------------ | :----------------------------------------------------------- | -| hypopg_create_index | Character string of the statement for creating an index | Creates a virtual index. | -| hypopg_display_index | None | Displays information about all created virtual indexes. | -| hypopg_drop_index | OID of the index | Deletes a specified virtual index. | -| hypopg_reset_index | None | Clears all virtual indexes. | -| hypopg_estimate_size | OID of the index | Estimates the space required for creating a specified index. | - -This function involves the following GUC parameters: - -**Table 2** GUC parameters of the virtual index function - -| Parameter | Description | Default Value | -| :---------------- | :-------------------------------------------- | :------------ | -| enable_hypo_index | Whether to enable the virtual index function. | off | - -## Procedure - -1. Use the **hypopg_create_index** function to create a virtual index. For example: - - ```sql - mogdb=> select * from hypopg_create_index('create index on bmsql_customer(c_w_id)'); - indexrelid | indexname - ------------+------------------------------------- - 329726 | <329726>btree_bmsql_customer_c_w_id - (1 row) - ``` - -2. Enable the GUC parameter **enable_hypo_index**. This parameter controls whether the database optimizer considers the created virtual index when executing the EXPLAIN statement. By executing EXPLAIN on a specific query statement, you can evaluate whether the index can improve the execution efficiency of the query statement based on the execution plan provided by the optimizer. For example: - - ```sql - mogdb=> set enable_hypo_index = on; - SET - ``` - - Before enabling the GUC parameter, run **EXPLAIN** and the query statement. - - ```sql - mogdb=> explain SELECT c_discount from bmsql_customer where c_w_id = 10; - QUERY PLAN - ---------------------------------------------------------------------- - Seq Scan on bmsql_customer (cost=0.00..52963.06 rows=31224 width=4) - Filter: (c_w_id = 10) - (2 rows) - ``` - - After enabling the GUC parameter, run **EXPLAIN** and the query statement. - - ```sql - mogdb=> explain SELECT c_discount from bmsql_customer where c_w_id = 10; - QUERY PLAN - ------------------------------------------------------------------------------------------------------------------ - [Bypass] - Index Scan using <329726>btree_bmsql_customer_c_w_id on bmsql_customer (cost=0.00..39678.69 rows=31224 width=4) - Index Cond: (c_w_id = 10) - (3 rows) - ``` - - By comparing the two execution plans, you can find that the index may reduce the execution cost of the specified query statement. Then, you can consider creating a real index. - -3. (Optional) Use the **hypopg_display_index** function to display all created virtual indexes. For example: - - ```sql - mogdb=> select * from hypopg_display_index(); - indexname | indexrelid | table | column - --------------------------------------------+------------+----------------+------------------ - <329726>btree_bmsql_customer_c_w_id | 329726 | bmsql_customer | (c_w_id) - <329729>btree_bmsql_customer_c_d_id_c_w_id | 329729 | bmsql_customer | (c_d_id, c_w_id) - (2 rows) - ``` - -4. (Optional) Use the **hypopg_estimate_size** function to estimate the space (in bytes) required for creating a virtual index. For example: - - ```sql - mogdb=> select * from hypopg_estimate_size(329730); - hypopg_estimate_size - ---------------------- - 15687680 - (1 row) - ``` - -5. Delete the virtual index. - - Use the **hypopg_drop_index** function to delete the virtual index of a specified OID. For example: - - ```sql - mogdb=> select * from hypopg_drop_index(329726); - hypopg_drop_index - ------------------- - t - (1 row) - ``` - - Use the **hypopg_reset_index** function to clear all created virtual indexes at a time. For example: - - ```sql - mogdb=> select * from hypopg_reset_index(); - hypopg_reset_index - -------------------- - - (1 row) - ``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - Running **EXPLAIN ANALYZE** does not involve the virtual index function. -> - The created virtual index is at the database instance level and can be shared by sessions. After a session is closed, the virtual index still exists. However, the virtual index will be cleared after the database is restarted. -> - This function does not support common views, materialized views, and column-store tables. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-3-workload-level-index-recommendation.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-3-workload-level-index-recommendation.md deleted file mode 100644 index 409d1c16..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-3-workload-level-index-recommendation.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Workload-level Index Recommendation -summary: Workload-level Index Recommendation -author: Guo Huan -date: 2021-05-19 ---- - -# Workload-level Index Recommendation - -For workload-level indexes, you can run scripts outside the database to use this function. This function uses the workload of multiple DML statements as the input to generate a batch of indexes that can optimize the overall workload execution performance. In addition, it provides the function of extracting service data SQL statements from logs. - -## Prerequisites - -- The database is normal, and the client can be connected properly. - -- The **gsql** tool has been installed by the current user, and the tool path has been added to the **PATH** environment variable. - -- The Python 3.6+ environment is available. - -- To use the service data extraction function, you need to set the GUC parameters of the node whose data is to be collected as follows: - - - log_min_duration_statement = 0 - - - log_statement= 'all' - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** After service data extraction is complete, you are advised to restore the preceding GUC parameters. Otherwise, log files may be expanded. - -## Procedure for Using the Service Data Extraction Script - -1. Set the GUC parameters according to instructions in the prerequisites. - -2. Run the Python script **extract_log.py**: - - ``` - ython extract_log.py [l LOG_DIRECTORY] [f OUTPUT_FILE] [-d DATABASE] [-U USERNAME][--start_time] [--sql_amount] [--statement] [--json] - ``` - - The input parameters are as follows: - - - **LOG_DIRECTORY**: directory for storing **pg_log**. - - **OUTPUT_PATH**: path for storing the output SQL statements, that is, path for storing the extracted service data. - - **DATABASE** (optional): database name. If this parameter is not specified, all databases are selected by default. - - **USERNAME** (optional): username. If this parameter is not specified, all users are selected by default. - - **start_time** (optional): start time for log collection. If this parameter is not specified, all files are collected by default. - - **sql_amount** (optional): maximum number of SQL statements to be collected. If this parameter is not specified, all SQL statements are collected by default. - - **statement** (optional): Collects the SQL statements starting with **statement** in **pg_log log**. If this parameter is not specified, the SQL statements are not collected by default. - - **json**: Specifies that the collected log files are stored in JSON format after SQL normalization. If the default format is not specified, each SQL statement occupies a line. - - An example is provided as follows. - - ``` - python extract_log.py $GAUSSLOG/pg_log/dn_6001 sql_log.txt -d postgres -U omm --start_time '2021-07-06 00:00:00' --statement - ``` - -3. Change the GUC parameter values set in step 1 to the values before the setting. - -## Procedure for Using the Index Recommendation Script - -1. Prepare a file that contains multiple DML statements as the input workload. Each statement in the file occupies a line. You can obtain historical service statements from the offline logs of the database. - -2. Run the Python script **index_advisor_workload.py**: - - ``` - python index_advisor_workload.py [p PORT] [d DATABASE] [f FILE] [--h HOST] [-U USERNAME] [-W PASSWORD][--schema SCHEMA][--max_index_num MAX_INDEX_NUM][--max_index_storage MAX_INDEX_STORAGE] [--multi_iter_mode] [--multi_node] [--json] [--driver] [--show_detail] - ``` - - The input parameters are as follows: - - - **PORT**: port number of the connected database. - - **DATABASE**: name of the connected database. - - **FILE**: file path that contains the workload statement. - - **HOST** (optional): ID of the host that connects to the database. - - **USERNAME** (optional): username for connecting to the database. - - **PASSWORD** (optional): password for connecting to the database. - - **SCHEMA**: schema name. - - **MAX_INDEX_NUM** (optional): maximum number of recommended indexes. - - **MAX_INDEX_STORAGE** (optional): maximum size of the index set space. - - **multi_node** (optional): specifies whether the current instance is a distributed database instance. - - **multi_iter_mode** (optional): algorithm mode. You can switch the algorithm mode by setting this parameter. - - **json** (optional): Specifies the file path format of the workload statement as JSON after SQL normalization. By default, each SQL statement occupies one line. - - **driver** (optional): Specifies whether to use the Python driver to connect to the database. By default, **gsql** is used for the connection. - - **show_detail** (optional): Specifies whether to display the detailed optimization information about the current recommended index set. - - Example: - - ``` - python index_advisor_workload.py 6001 postgres tpcc_log.txt --schema public --max_index_num 10 --multi_iter_mode - ``` - - The recommendation result is a batch of indexes, which are displayed on the screen in the format of multiple create index statements. The following is an example of the result. - - ```sql - create index ind0 on public.bmsql_stock(s_i_id,s_w_id); - create index ind1 on public.bmsql_customer(c_w_id,c_id,c_d_id); - create index ind2 on public.bmsql_order_line(ol_w_id,ol_o_id,ol_d_id); - create index ind3 on public.bmsql_item(i_id); - create index ind4 on public.bmsql_oorder(o_w_id,o_id,o_d_id); - create index ind5 on public.bmsql_new_order(no_w_id,no_d_id,no_o_id); - create index ind6 on public.bmsql_customer(c_w_id,c_d_id,c_last,c_first); - create index ind7 on public.bmsql_new_order(no_w_id); - create index ind8 on public.bmsql_oorder(o_w_id,o_c_id,o_d_id); - create index ind9 on public.bmsql_district(d_w_id); - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The value of the **multi_node** parameter must be specified based on the current database architecture. Otherwise, the recommendation result is incomplete, or even no recommendation result is generated. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-1-overview.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-1-overview.md deleted file mode 100644 index 0a7c9571..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-1-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-05-06 ---- - -# Overview - -Slow SQL statements have always been a pain point in data O&M. How to effectively diagnose the root causes of slow SQL statements is a big challenge. Based on the characteristics of MogDB and the slow SQL diagnosis experience of DBAs on the live network, this tool supports more than 25 root causes of slow SQL statements, outputs multiple root causes based on the possibility, and provides specific solutions. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-2-environment-deployment.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-2-environment-deployment.md deleted file mode 100644 index c9c04efc..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-2-environment-deployment.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Environment Deployment -summary: Environment Deployment -author: Guo Huan -date: 2022-05-06 ---- - -# Environment Deployment - -- The database is working properly. -- The metric collection system is running properly. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-3-usage-guide.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-3-usage-guide.md deleted file mode 100644 index 7eff71a5..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-3-usage-guide.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-05-06 ---- - -# Usage Guide - -Assume that the **confpath** configuration file directory has been initialized. - -- Run the following command to start only the slow SQL diagnosis function and output the top 3 root causes (for details, see the description of the **service** subcommand): - - ``` - gs_dbmind service start -c confpath --only-run slow_query_diagnosis - ``` - -- Run the following command to diagnose slow SQL statements in interactive mode: - - ``` - gs_dbmind component slow_query_diagnosis show -c confpath --query SQL --start-time timestamps0 --end-time timestamps1 - ``` - -- Run the following command to manually clear historical prediction results: - - ``` - gs_dbmind component slow_query_diagnosis clean -c confpath --retention-days DAYS - ``` - -- Run the following command to stop the services that have been started: - - ``` - gs_dbmind service stop -c confpath - ``` diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-4-obtaining-help-information.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-4-obtaining-help-information.md deleted file mode 100644 index fb76fac7..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-4-obtaining-help-information.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-05-06 ---- - -# Obtaining Help Information - -You can run the **–help** command to obtain the help information. For example: - -``` -gs_dbmind component slow_query_diagnosis --help -``` - -``` -usage: [-h] -c DIRECTORY [--query SLOW_QUERY] - [--start-time TIMESTAMP_IN_MICROSECONDS] - [--end-time TIMESTAMP_IN_MICROSECONDS] [--retention-days DAYS] - {show,clean} - -Slow Query Diagnosis: Analyse the root cause of slow query - -positional arguments: - {show,clean} choose a functionality to perform - -optional arguments: - -h, --help show this help message and exit - -c DIRECTORY, --conf DIRECTORY - set the directory of configuration files - --query SLOW_QUERY set a slow query you want to retrieve - --start-time TIMESTAMP_IN_MICROSECONDS - set the start time of a slow SQL diagnosis result to - be retrieved - --end-time TIMESTAMP_IN_MICROSECONDS - set the end time of a slow SQL diagnosis result to be - retrieved - --retention-days DAYS - clear historical diagnosis results and set the maximum - number of days to retain data -``` diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-5-command-reference.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-5-command-reference.md deleted file mode 100644 index a6b1931c..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-5-command-reference.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-05-06 ---- - -# Command Reference - -**Table 1** gs_dbmind component slow_query_diagnosis parameters - -| Parameter | Description | Value Range | -| :--------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| -h, --help | Help information | - | -| action | Action parameter | - **show**: displays results.
- **clean**: clears results.
- **diagnosis**: interactive diagnosis. | -| -c,--conf | Configuration directory | - | -| --query | Slow SQL text | * | -| --start-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| --end-time | Timestamp of the end time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| --retention-days | Number of days retaining results | Non-negative real number | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-6-troubleshooting.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-6-troubleshooting.md deleted file mode 100644 index 8f80a70f..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-6-troubleshooting.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-05-06 ---- - -# Troubleshooting - -- If you run the interactive diagnosis command for a slow SQL statement that has not been executed, no diagnosis result is provided. -- If the exporter metric collection function is not enabled, the slow SQL diagnosis function is not available. -- After the parameters in the configuration file are reset, you need to restart the service process for the settings to take effect. -- When the interactive diagnosis function of slow SQL statements is used, the tool obtains necessary data based on the RPC and data collection services. Therefore, if the RPC and data collection services are not started, the diagnosis cannot be performed. -- When the diagnosis function is used for interactive diagnosis, the tool checks the entered SQL and database. If the entered SQL and database are invalid, the diagnosis cannot be performed. -- During slow SQL diagnosis, SMALL\_SHARED\_BUFFER needs to collect column information of related tables. Therefore, ensure that the opengauss\_exporter connection user has the permission on the schema to which the table belongs. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-1-overview.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-1-overview.md deleted file mode 100644 index 33df1fa4..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-1-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-05-06 ---- - -# Overview - -The trend prediction module predicts the future time series change trend based on historical time series data. The framework of this module has been decoupled to flexibly change prediction algorithms. This module can automatically select algorithms for different feature time series. The LR regression algorithm for linear feature time series prediction and the ARIMA algorithm for non-linear feature prediction are supported. At present, this module can cover the accurate prediction of linear time series, non-linear time series and periodic time series. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-2-environment-deployment.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-2-environment-deployment.md deleted file mode 100644 index e370be78..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-2-environment-deployment.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Environment Deployment -summary: Environment Deployment -author: Guo Huan -date: 2022-05-06 ---- - -# Environment Deployment - -The metric collection system is running properly. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-3-usage-guide.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-3-usage-guide.md deleted file mode 100644 index d5283c1e..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-3-usage-guide.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-05-06 ---- - -# Usage Guide - -Assume that the **confpath** configuration file directory has been initialized. - -- Run the following command to start only the slow SQL diagnosis function (the number of root causes for slow SQL diagnosis is determined by the algorithm running result and is not fixed). For more usage, see the description of the **service** subcommand. - - ``` - gs_dbmind service start -c confpath --only-run slow_query_diagnosis - ``` - -- Run the following command to query the diagnosis history of slow SQL statements: - - ``` - gs_dbmind component slow_query_diagnosis show -c confpath --query SQL --start-time timestamps0 --end-time timestamps1 - ``` - -- Run the following command to diagnose slow SQL statements in interactive mode: - - ``` - gs_dbmind component slow_query_diagnosis diagnosis -c confpath --database dbname --schema schema_name --query SQL - ``` - -- Run the following command to manually clear historical prediction results: - - ``` - gs_dbmind component slow_query_diagnosis clean -c confpath --retention-days DAYS - ``` - -- Run the following command to stop the services that have been started: - - ``` - gs_dbmind service stop -c confpath - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-4-obtaining-help-information.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-4-obtaining-help-information.md deleted file mode 100644 index cfb64369..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-4-obtaining-help-information.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-05-06 ---- - -# Obtaining Help Information - -You can run the **–help** command to obtain the help information. For example: - -``` -gs_dbmind component forecast --help -``` - -``` -usage: [-h] -c DIRECTORY [--metric-name METRIC_NAME] [--host HOST] [--start-time TIMESTAMP_IN_MICROSECONDS] [--end-time TIMESTAMP_IN_MICROSECONDS] [--retention-days DAYS] - {show,clean} - -Workload Forecasting: Forecast monitoring metrics - -positional arguments: - {show,clean} choose a functionality to perform - -optional arguments: - -h, --help show this help message and exit - -c DIRECTORY, --conf DIRECTORY - set the directory of configuration files - --metric-name METRIC_NAME - set a metric name you want to retrieve - --host HOST set a host you want to retrieve - --start-time TIMESTAMP_IN_MICROSECONDS - set a start time of for retrieving - --end-time TIMESTAMP_IN_MICROSECONDS - set a end time of for retrieving - --retention-days DAYS - clear historical diagnosis results and set the maximum number of days to retain data -``` diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-5-command-reference.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-5-command-reference.md deleted file mode 100644 index 4d7741bd..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-5-command-reference.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-05-06 ---- - -# Command Reference - -**Table 1** gs_dbmind component forecast parameters - -| Parameter | Description | Value Range | -| :-------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| -h, –help | Help information | - | -| action | Action parameter | - **show**: displays results.
- **clean**: clears results. | -| -c, –conf | Configuration directory | - | -| –metric-name | Specifies the metric name to be displayed, which is used for filtering. | - | -| –host | Specifies the service IP address and port number, which is used for filtering. | - | -| –start-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| –end-time | Timestamp of the end time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| –retention-days | Number of days retaining results | Non-negative real number | diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-6-troubleshooting.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-6-troubleshooting.md deleted file mode 100644 index 46cf947b..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-6-troubleshooting.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-05-06 ---- - -# Troubleshooting - -- Considering the actual service and model prediction effect, you are advised to set the trend prediction duration to a value greater than 3600 seconds. (If the metric collection period is 15 seconds, the number of data records collected is 240.) Otherwise, the prediction effect will deteriorate, and the service will be abnormal when the data volume is extremely small. The default value is 3600 seconds. -- After the parameters in the configuration file are reset, you need to restart the service process for the settings to take effect. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-1-overview.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-1-overview.md deleted file mode 100644 index 4a79133b..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-1-overview.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: SQLdiag Slow SQL Discovery -summary: SQLdiag Slow SQL Discovery -author: Guo Huan -date: 2021-05-19 ---- - -# SQLdiag Slow SQL Discovery - -SQLdiag is a framework for predicting the execution duration of SQL statements in MogDB. The existing prediction technologies are mainly based on model prediction of execution plans. These prediction solutions are applicable only to jobs whose execution plans can be obtained in the OLAP scenarios, and are not useful for quick query such as OLTP or HTAP. Different from the preceding solutions, SQLdiag focuses on the historical SQL statements of databases. Because the execution duration of the database SQL statements in a short time does not vary greatly, SQLdiag can detect instruction sets similar to the entered instructions from the historical data, and predict the SQL statement execution duration based on the SQL vectorization technology and the time series prediction algorithm. This framework has the following benefits: - -1. Execution plans do not require instructions. This has no impact on database performance. -2. The framework is widely used, unlike many other well-targeted algorithms in the industry, for example, they may applicable only to OLTP or OLAP. -3. The framework is robust and easy to understand. Users can design their own prediction models by simply modifying the framework. - -SQLdiag is an SQL statement execution time prediction tool. It predicts the execution time of SQL statements based on the statement logic similarity and historical execution records without obtaining the SQL statement execution plan using a template or deep learning. Abnormal SQL statements can also be detected with this tool. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-2-usage-guide.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-2-usage-guide.md deleted file mode 100644 index 74336594..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-2-usage-guide.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2021-10-21 ---- - -# Usage Guide - -## Prerequisites - -- You have obtained training data. -- If you use the provided tool to collect training data, you need to enable the WDR function. The involved parameters are **track_stmt_stat_level** and **log_min_duration_statement**. For details, see the following sections. -- To ensure the prediction accuracy, the historical statement logs provided by users should be as comprehensive and representative as possible. - -## Collecting SQL Statements - -This tool requires users to prepare data in advance. Each sample is separated by a newline character. The training data format is as follows: - -``` -SQL,EXECUTION_TIME -``` - -The prediction data format is as follows: - -``` -SQL -``` - -**SQL** indicates the text of an SQL statement, and **EXECUTION_TIME** indicates the execution time of the SQL statement. For details about the sample data, see **train.csv** and **predict.csv** in **sample_data**. - -You can collect training data in the required format. The tool also provides the **load_sql_from_rd** script for automatic collection. The script obtains SQL information based on the WDR report. The involved parameters are **log_min_duration_statement** and **track_stmt_stat_level**: - -- **log_min_duration_statement** indicates the slow SQL threshold. If the value is **0**, full collection is performed. The unit is millisecond. -- **track_stmt_stat_level** indicates the information capture level. You are advised to set it to **'L0,L0'**. - -After this parameter is set, a certain amount of system resources may be occupied but the usage is generally low. In continuous high-concurrency scenarios, this may cause a performance loss less than 5%. If the database concurrency is low, the performance loss can be ignored. The following script is stored in the sqldiag root directory (*$GAUSSHOME***/bin/components/sqldiag**). - -``` -Use a script to obtain the training set: -load_sql_from_wdr.py [-h] --port PORT --start_time START_TIME - --finish_time FINISH_TIME [--save_path SAVE_PATH] -Example: - python load_sql_from_wdr.py --start_time "2021-04-25 00:00:00" --finish_time "2021-04-26 14:00:00" --port 5432 --save_path ./data.csv -``` - -## Procedure - -1. Provide historical logs for model training. - -2. Perform training and prediction. - - ``` - Template-based training and prediction: - gs_dbmind component sqldiag [train, predict] -f FILE --model template --model-path template_model_path - DNN-based training and prediction: - gs_dbmind component sqldiag [train, predict] -f FILE --model dnn --model-path dnn_model_path - ``` - -## Examples - -Use the provided test data to perform template-based training: - -``` -gs_dbmind component sqldiag train -f ./sample_data/train.csv --model template --model-path ./template -``` - -Use the provided test data for template-based prediction: - -``` -gs_dbmind component sqldiag predict -f ./sample_data/predict.csv --model template --model-path ./template --predicted-file ./result/t_result -``` - -Use the provided test data to update the template-based model: - -``` -gs_dbmind component sqldiag finetune -f ./sample_data/train.csv --model template --model-path ./template -``` - -Use the provided test data to perform DNN-based training: - -``` -gs_dbmind component sqldiag train -f ./sample_data/train.csv --model dnn --model-path ./dnn_model -``` - -Use the provided test data for DNN-based prediction: - -``` -gs_dbmind component sqldiag predict -f ./sample_data/predict.csv --model dnn --model-path ./dnn_model --predicted-file -``` - -Use the provided test data to update the DNN-based model: - -``` -gs_dbmind component sqldiag finetune -f ./sample_data/train.csv --model dnn --model-path ./dnn_model -``` diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-3-obtaining-help-information.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-3-obtaining-help-information.md deleted file mode 100644 index b863b043..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-3-obtaining-help-information.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2021-05-19 ---- - -# Obtaining Help Information - -Before using the SQLdiag tool, run the following command to obtain help information: - -``` -gs_dbmind component sqldiag --help -``` - -The command output is as follows: - -``` -usage: [-h] [-f CSV_FILE] [--predicted-file PREDICTED_FILE] - [--model {template,dnn}] --model-path MODEL_PATH - [--config-file CONFIG_FILE] - {train,predict,finetune} - -SQLdiag integrated by MogDB. - -positional arguments: - {train,predict,finetune} - The training mode is to perform feature extraction and - model training based on historical SQL statements. The - prediction mode is to predict the execution time of a - new SQL statement through the trained model. - -optional arguments: - -h, --help show this help message and exit - -f CSV_FILE, --csv-file CSV_FILE - The data set for training or prediction. The file - format is CSV. If it is two columns, the format is - (SQL statement, duration time). If it is three - columns, the format is (timestamp of SQL statement - execution time, SQL statement, duration time). - --predicted-file PREDICTED_FILE - The file path to save the predicted result. - --model {template,dnn} - Choose the model model to use. - --model-path MODEL_PATH - The storage path of the model file, used to read or - save the model file. - --config-file CONFIG_FILE -``` diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-4-command-reference.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-4-command-reference.md deleted file mode 100644 index 533a05e9..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-4-command-reference.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2021-05-19 ---- - -# Command Reference - -**Table 1** Command-line options - -| Parameter | Description | Value Range | -| :-------------- | :----------------------------------- | :------------ | -| -f | Training or prediction file location | N/A | -| –predicted-file | Prediction result location | N/A | -| –model | Model selection | template, dnn | -| –model-path | Location of the training model | N/A | diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-5-troubleshooting.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-5-troubleshooting.md deleted file mode 100644 index 442f8d1d..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-5-troubleshooting.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2021-05-19 ---- - -# Troubleshooting - -- Failure in the training scenario: Check whether the file path of historical logs is correct and whether the file format meets the requirements. -- Failure in the prediction scenario: Check whether the model path is correct. Ensure that the format of the load file to be predicted is correct. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-1-overview.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-1-overview.md deleted file mode 100644 index ff2cdb48..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-1-overview.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-10-24 ---- - -# Overview - -SQL Rewriter is an SQL rewriting tool. It converts query statements into more efficient or standard forms based on preset rules to improve query efficiency. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - This function does not apply to statements that contain subqueries. -> - This function supports only the SELECT and DELETE statements for deleting the entire table. -> - This function contains 11 rewriting rules. Statements that do not comply with the rewriting rules are not processed. -> - This function displays original query statements and rewritten statements on the screen. You are not advised to rewrite SQL statements that contain sensitive information. -> - The rule for converting UNION to UNION ALL avoids deduplication and improves the query performance. The obtained result may be redundant. -> - If a statement contains ORDER BY + specified column name or GROUP BY + specified column name, the SelfJoin rule is not applicable. -> - The tool does not ensure equivalent conversion of query statements. The purpose is to improve the efficiency of query statements. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-2-usage-guide.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-2-usage-guide.md deleted file mode 100644 index 3f6fe167..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-2-usage-guide.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-10-24 ---- - -# Usage Guide - -## Prerequisites - -The database and connection are normal. - -## Example - -Use the **tpcc** database as an example: - -```shell -gs_dbmind component sql_rewriter 5030 tpcc queries.sql --db-host 127.0.0.1 --db-user myname --schema public -``` - -**queries.sql** is the SQL statement to be modified. The content is as follows: - -```sql -select cfg_name from bmsql_config group by cfg_name having cfg_name='1'; -delete from bmsql_config; -delete from bmsql_config where cfg_name='1'; -``` - -The result is multiple rewritten query statements, which are displayed on the screen (the statements that cannot be rewritten are displayed as null), as shown in the following. - -```shell -+--------------------------------------------------------------------------+------------------------------+ -| Raw SQL | Rewritten SQL | -+--------------------------------------------------------------------------+------------------------------+ -| select cfg_name from bmsql_config group by cfg_name having cfg_name='1'; | SELECT cfg_name | -| | FROM bmsql_config | -| | WHERE cfg_name = '1'; | -| delete from bmsql_config; | TRUNCATE TABLE bmsql_config; | -| delete from bmsql_config where cfg_name='1'; | | -+--------------------------------------------------------------------------+------------------------------+ -``` diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-3-obtaining-help-information.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-3-obtaining-help-information.md deleted file mode 100644 index a5668891..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-3-obtaining-help-information.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-10-24 ---- - -# Obtaining Help Information - -Before using the SQL Rewriter tool, run the following command to obtain help information: - -```shell -gs_dbmind component sql_rewriter --help -``` - -The following information is displayed: - -```shell -usage: [-h] [--db-host DB_HOST] [--db-user DB_USER] [--schema SCHEMA] - db_port database file - -SQL Rewriter - -positional arguments: - db_port Port for database - database Name for database - file File containing SQL statements which need to rewrite - -optional arguments: - -h, --help show this help message and exit - --db-host DB_HOST Host for database - --db-user DB_USER Username for database log-in - --schema SCHEMA Schema name for the current business data -``` - -Passwords are entered through pipes or in interactive mode. For password-free users, any input can pass the verification. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-4-command-reference.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-4-command-reference.md deleted file mode 100644 index 899e4191..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-4-command-reference.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-10-24 ---- - -# Command Reference - -**Table 1** Command line parameters - -| Parameter | Definition | -| :-------- | :------------------------------------------------------- | -| db_port | Database port number | -| database | Database name | -| file | Path of the file that contains multiple query statements | -| db-host | (Optional) Database host ID | -| db-user | (Optional) Database user name | -| schema | (Optional, public schema) Schema | diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-5-troubleshooting.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-5-troubleshooting.md deleted file mode 100644 index d78caa09..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-5-troubleshooting.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-10-24 ---- - -# Troubleshooting - -- If the SQL statement cannot be rewritten, check whether the SQL statement complies with the rewriting rule or whether the SQL syntax is correct. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-1-overview.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-1-overview.md deleted file mode 100644 index d2fb9557..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-1-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-10-24 ---- - -# Overview - -The anomaly detection module implements time series data based on statistics methods to detect possible exceptions in the data. The framework of this module is decoupled to flexibly replace different anomaly detection algorithms. In addition, this module can automatically select algorithms based on different features of time series data. It supports anomaly value detection, threshold detection, box plot detection, gradient detection, growth rate detection, fluctuation rate detection, and status conversion detection. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-2-usage-guide.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-2-usage-guide.md deleted file mode 100644 index dc22e2c8..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-2-usage-guide.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-10-24 ---- - -# Usage Guide - -Assume that the metric collection system is running properly and the configuration file directory **confpath** has been initialized. You can run the following command to implement this feature: - -Enable only the anomaly detection function: - -```shell -gs_dbmind service start --conf confpath --only-run anomaly_detection -``` - -View a metric on all nodes from timestamps1 to timestamps2: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action overview --metric metric_name --start-time timestamps1 --end-time timestamps2 -``` - -View a metric on a specific node from timestamps1 to timestamps2: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action overview --metric metric_name --start-time timestamps1 --end-time timestamps2 --host ip_address --anomaly anomaly_type -``` - -View a metric on all nodes from timestamps1 to timestamps2 in a specific anomaly detection mode: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action overview --metric metric_name --start-time timestamps1 --end-time timestamps2 --anomaly anomaly_type -``` - -View a metric on a specific node from timestamps1 to timestamps2 in a specific anomaly detection mode: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action overview --metric metric_name --start-time timestamps1 --end-time timestamps2 --host ip_address --anomaly anomaly_type -``` - -Visualize a metric on all nodes from timestamps1 to timestamps2 in a specific anomaly detection mode: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action plot --metric metric_name --start-time timestamps1 --end-time timestamps2 --host ip_address --anomaly anomaly_type -``` - -Stop the started service: - -```shell -gs_dbmind service stop --conf confpath -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** When setting anomaly detection parameters, ensure that start-time is at least 30 seconds earlier than end-time. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-3-obtaining-help-information.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-3-obtaining-help-information.md deleted file mode 100644 index fe993502..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-3-obtaining-help-information.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-10-24 ---- - -# Obtaining Help Information - -You can run the **--help** command to obtain the help information. For example: - -```shell -gs_dbmind component anomaly_detection --help -``` - -The following information is displayed: - -```shell -usage: anomaly_detection.py [-h] --action {overview,plot} -c CONF -m METRIC -s - START_TIME -e END_TIME [-H HOST] [-a ANOMALY] - -Workload Anomaly detection: Anomaly detection of monitored metric. - -optional arguments: - -h, --help show this help message and exit - --action {overview,plot} - choose a functionality to perform - -c CONF, --conf CONF set the directory of configuration files - -m METRIC, --metric METRIC - set the metric name you want to retrieve - -s START_TIME, --start-time START_TIME - set the start time of for retrieving in ms - -e END_TIME, --end-time END_TIME - set the end time of for retrieving in ms - -H HOST, --host HOST set a host of the metric, ip only or ip and port. - -a ANOMALY, --anomaly ANOMALY - set a anomaly detector of the metric(increase_rate, - level_shift, spike, threshold) - -Process finished with exit code 0 -``` diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-4-command-reference.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-4-command-reference.md deleted file mode 100644 index 5d966981..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-4-command-reference.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-10-24 ---- - -# Command Reference - -**Table 1** Command line parameters - -| Parameter | Description | Value Range | -| :---------------- | :----------------------------------------------------------- | :------------------------------------------- | -| -h, --help | Help command | - | -| --action | Action parameter | - **overview**
- **plot**: visualization | -| -c,--conf | Configuration file directory | - | -| -m,--metric-name | Metric name to be displayed | - | -| -H, --host | Data source IP address which is used to filter data | IP address or IP address + port number | -| -a, --anomaly | Anomaly detection mode, which is used for filtering | - | -| -s, --start-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| -e, --end-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-5-troubleshooting.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-5-troubleshooting.md deleted file mode 100644 index 5683b12c..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-5-troubleshooting.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-10-24 ---- - -# Troubleshooting - -- Overview scenario failure: Check whether the configuration file path is correct and whether the configuration file information is complete. Check whether the metric name is correct, whether the host IP address is correct, whether the anomaly detection type is correct, and whether the metric data exists in the start time and end time. -- Visualization scenario failure: Check whether the configuration file path is correct and whether the configuration file information is complete. Check whether the metric name is correct, whether the host IP address is correct, whether the anomaly detection type is correct, and whether the metric data exists in the start time and end time. diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/dbmind-mode/1-service.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/dbmind-mode/1-service.md deleted file mode 100644 index 2f66db70..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/dbmind-mode/1-service.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -title: service -summary: service -author: Guo Huan -date: 2022-05-06 ---- - -# service - -This subcommand can be used to initialize the configuration directory and start and stop background tasks. - -## Initializing the Configuration Directory - -You can run the **gs_dbmind service setup** subcommand to initialize the configuration directory. This directory may contain the configuration files and logs of the DBMind. Some files in the directory are described as follows: - -- **dbmind.conf**: DBMind parameter configuration file. You can modify it using the **gs_dbmind set** command or a text editor. -- **dynamic_config.db**: DBMind service metadata stored on the local node, including algorithm hyperparameters and monitoring thresholds. This file contains DBMind service metadata and cannot be configured by users. -- **metric_map.conf**: monitoring metric mapping table, which can be used to adapt to different collection platforms. For example, in DBMind, the monitored system CPU usage is named **os_cpu_usage**, but a user-defined collection tool may name the CPU usage **my_cpu_usage_rate**. In this case, if you want DBMind to represent the CPU usage **metric my_cpu_usage_rate**, you need to modify this configuration item. That is, add the **os_cpu_usage = my_cpu_usage_rate** configuration item for mapping. For common users, you are advised to use the collection component and solution of the DBMind. In this case, you do not need to modify the configuration file. -- **logs**: This directory stores logs generated by the DBMind service. - -You can initialize the configuration directory in interactive or non-interactive mode. For example, if the name of the configuration directory to be initialized is **confpath**, perform the following operations: - -**Interactive mode** - -``` -gs_dbmind service setup -c confpath --interactive -``` - -After running the preceding command, you can configure the configuration items in interactive mode through the CLI client. - -**Non-interactive mode** - -In non-interactive mode, the initialization consists of three steps: starting configuration, modifying configuration items, and initializing configuration. In the second step, you need to manually edit the configuration file by using the text editor. The procedure is as follows: - -1. Run the following command to start the configuration: - - ``` - gs_dbmind service setup -c confpath - ``` - -2. After the preceding command is executed, the **dbmind.conf** configuration file is generated in the **confpath** directory. You need to use the text editor to manually modify the file. Related parameters are described as follows: - - ``` - # TSDB is used to specify the metric storage location of the monitored database system. Currently, only Prometheus is supported. - # The mandatory parameters are the IP address and port number of Prometheus. Other parameters (such as username, password, and SSL certificate information) are optional. - [TSDB] - name = prometheus # The type of time-series database. Options: prometheus. - host = # Address of time-series database. - port = # Port to connect to time-series database. - username = (null) # User name to connect to time-series database. - password = (null) # Password to connect to time-series database. - ssl_certfile = (null) # The certificate file for ssl connections. - ssl_keyfile = (null) # Certificate private key file. - ssl_keyfile_password = (null) # Password for ssl keyfile. - ssl_ca_file = (null) # CA certificate to validate requests. - - # METADATABASE is used to specify where the analysis results generated by DBMind are stored. - # Currently, SQLite, openGauss, and PostgreSQL databases are supported. If the openGauss database is used, pay attention to the compatibility of the Python driver psycopg2. You can use the driver provided by openGauss, or compile or modify GUC parameters for adaptation. - # Other information is about the connection to the database. Note that the user must have the permission to create the database. - [METADATABASE] - dbtype = sqlite # Database type. Options: sqlite, opengauss, postgresql. - host = # Address of meta-data database. - port = # Port to connect to meta-data database. - username = # User name to connect to meta-data database. - password = (null) # Password to connect to meta-data database. - database = # Database name to connect to meta-data database. - - # WORKER is used to specify the number of worker subprocesses that can be used by DBMind. If 0 is written, adaptation is performed, that is, CPU resources are used as much as possible. - [WORKER] - process_num = 0 # Number of worker processes on a local node. Less than or equal to zero means adaptive. - - # AGENT is used to specify the information for the DBMind to connect to the openGauss Agent. By using this agent, DBMind can obtain the real-time status of the monitored instance, improving the analysis accuracy. In addition, you can deliver some change actions to the DB instance, for example, ending a slow SQL statement (depending on whether the user configured here has sufficient permissions). - # The value of master_url is the IP address of the Agent. Because openGauss-exporter functions as the Agent, the value of master_url is the IP address of openGauss-exporter. - # In addition, openGauss-exporter supports HTTPS. Therefore, you can specify an SSL certificate based on the configuration. - [AGENT] - master_url = # The agent URL of the master node. e.g., https://127.0.0.1:9187. - username = # Username to login the monitoring database. Credential for agent. - password = # Password to login the monitoring database. Credential for agent. - ssl_certfile = (null) # The certificate file for ssl connections. - ssl_keyfile = (null) # Certificate private key file. - ssl_keyfile_password = (null) # Password for ssl keyfile. - ssl_ca_file = (null) # CA certificate to validate requests. - - # SELF-MONITORING is used to configure parameters for monitoring database instances. - # detection_interval indicates the execution frequency of the periodic check task, in seconds. - # last_detection_time indicates the length of the latest data used by each check task. - # forecasting_future_time indicates a length of a future time predicted by the time series forecast feature. - # golden_kpi indicates the monitoring metric that needs to be focused on. - # result_storage_retention indicates the maximum storage duration of diagnosis results. - [SELF-MONITORING] - detection_interval = 600 # Unit is second. The interval for performing health examination on the openGauss through monitoring metrics. - last_detection_time = 600 # Unit is second. The time for last detection. - forecasting_future_time = 3600 # Unit is second. How long the KPI in the future for forecasting. Meanwhile, this is the period for the forecast. - # The following golden_kpi of monitoring system is vital. - golden_kpi = os_cpu_usage, os_mem_usage, os_disk_usage, gaussdb_qps_by_instance # DBMind only measures and detects the golden metrics in the anomaly detection processing. - result_storage_retention = 604800 # Unit is second. How long should the results retain? If retention is more than the threshold, DBMind will delete them. - - # SELF-OPTIMIZATION is used to modify the following parameters to intervene the DBMind optimization result. Generally, the default values are used. - # optimization_interval: interval for executing optimization tasks. - # max_reserved_period: maximum storage duration of optimization results. - # max_index_num: upper limit of the recommended index result. - # max_index_storage: upper limit of the disk space occupied by the recommended index page. - # max_template_num: maximum number of SQL statements recorded in the SQL template recommended for the index. - # kill_slow_query: determines whether to enable automatic scanning and killing of slow SQL statements. If this function is enabled, you can run the set subcommand to set the threshold, for example, 70 seconds. The value must be a positive integer, in seconds. - # gs_dbmind set slow_sql_threshold max_elapsed_time 70 - [SELF-OPTIMIZATION] - optimization_interval = 86400 # Unit is second. The interval for generating report. - max_reserved_period = 100 # Unit is day. Maximum retention time. - max_index_num = 10 # Maximum number of advised indexes. - max_index_storage = 100 # Unit is MB. - max_template_num = 5000 # Maximum number of templates. - kill_slow_query = false # Whether to actively check and kill slow query. The default elapsed time of a slow query to be killed is 1 minute. - - # LOG indicates the DBMind log information. - [LOG] - maxbytes = 10485760 # Default is 10Mb. Maximum size of a single log file. If maxbytes is zero, the file grows indefinitely. - backupcount = 1 # Number of backups of log files. - level = INFO # Options: DEBUG, INFO, WARNING, ERROR. - - # The following information is displayed when you perform interactive configuration. You do not need to configure it. - [COMMENT] - worker = The form of executing compute-intensive tasks. Tasks can be executed locally or distributed to multiple nodes for execution. - tsdb = Configure the data source for time series data, which come from monitoring the openGauss instance. - metadatabase = Configure the database to record meta-data, which the database can store meta-data for the forecasting and diagnosis process. The database should be an openGauss instance. - self-monitoring = Set up parameters for monitoring and diagnosing openGauss instance. - self-optimization = Set up parameters for openGauss optimization. - ``` - -3. After manually modifying the preceding parameters, initialize the configuration items. In this phase, DBMind preliminarily checks the correctness of configuration items, initializes the structure and content of the metadata database table for storing result data, and encrypts the plaintext passwords in the configuration items. - - ``` - gs_dbmind service setup --initialize -c confpath - ``` - -4. Start the DBMind background service based on the configuration directory. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> 1. The comments in the configuration file are used to prompt users in interactive mode. Do not manually modify or delete the comments. -> 2. Make sure that the value of the configuration item and the comment are separated by a space. Otherwise, the system regards the comment as the value of the configuration item. -> 3. If special characters in a configuration item need to be escaped, use the percent sign ( %) to escape the special characters. For example, if the password is **password %**, use the percent sign ( %) to escape the special characters, that is, **password %%**. - -## Starting a Service - -After the configuration directory is initialized, you can start the DBMind background service based on the configuration directory. For example, if the configuration directory is **confpath**, run the following command: - -``` -gs_dbmind service start -c confpath -``` - -After the preceding command is executed, the system displays a message indicating that the service has been started. If no additional parameter is specified, this command starts all background tasks by default. If you want to start only one background task, add the **–only-run** option. For example, if you only want to start the slow SQL root cause analysis service, run the following command: - -``` -gs_dbmind service start -c confpath --only-run slow_query_diagnosis -``` - -## Stopping a Service - -Similar to starting a service, stopping a service has a simpler command line structure. You only need to specify the address of the configuration directory. For example, if the configuration directory is **confpath**, run the following command: - -``` -gs_dbmind service stop -c confpath -``` - -The DBMind service automatically exits after the running task is complete in the background. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** -> -> - The metabase user in **[METADATABASE]** must have the permission to create tables and insert and update data in the database. Otherwise, an exception will occur during tool execution. -> - Currently, multiple services cannot be started separately using the same configuration file. -> - The tool provides the **requirement.txt** file. You can use this file to install required third-party dependencies. - -## Command Reference - -You can use the **–help** option to obtain the help information about this mode. For example: - -``` -gs_dbmind service --help -``` - -``` -usage: service [-h] -c DIRECTORY [--only-run {slow_query_diagnosis,forecast}] [--interactive | --initialize] {setup,start,stop} - -positional arguments: - {setup,start,stop} - perform an action for service - -optional arguments: - -h, --help show this help message and exit - -c DIRECTORY, --conf DIRECTORY - set the directory of configuration files - --only-run {slow_query_diagnosis,forecast} - explicitly set a certain task running in the backend - --interactive configure and initialize with interactive mode - --initialize initialize and check configurations after configuring. -``` - -**Table 1** Parameters of the gs_dbmind service subcommand - -| Parameter | Description | Value Range | -| :------------ | :----------------------------------------- | :----------------------------------------------------------- | -| action | Action parameter | - setup: initializes configuration directory.
- start: starts a service.
- stop: stops a service. | -| -c,--conf | Configuration file directory | - | -| --initialize | Initializes configuration parameters. | - | -| --interactive | Configures parameters in interactive mode. | - | -| --only-run | Selects the module to be run only. | - forecast: prediction module.
- slow_query_diagnosis: root cause analysis module for slow SQL statements. | -| -h, --help | Help information | - | diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/dbmind-mode/2-component.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/dbmind-mode/2-component.md deleted file mode 100644 index ea7edcb2..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/dbmind-mode/2-component.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: component -summary: component -author: Guo Huan -date: 2022-05-06 ---- - -# component - -This subcommand can be used to start DBMind components, including the exporter for monitoring metrics and other AI functions. It forwards the commands passed by the user through the CLI client to the corresponding components. For details about the commands of different components, see the corresponding sections of the components. - -## Command Reference - -You can use the **–help** option to obtain the help information about this mode. For example: - -``` -gs_dbmind component --help -``` - -``` -usage: component [-h] COMPONENT_NAME ... - -positional arguments: - COMPONENT_NAME choice a component to start. ['extract_log', 'forecast', 'index_advisor', 'opengauss_exporter', 'reprocessing_exporter', 'slow_query_diagnosis', 'sqldiag', 'xtuner'] - ARGS arguments for the component to start - -optional arguments: - -h, --help show this help message and exit -``` - -**Table 1** Parameters of the gs_dbmind component subcommand - -| Parameter | Description | Value Range | -| :------------- | :------------------- | :----------------------------------------------------------- | -| COMPONENT_NAME | Component name | extract_log, forecast, index_advisor, opengauss_exporter, reprocessing_exporter, slow_query_diagnosis, sqldiag, xtuner | -| ARGS | Component parameters | Refer to the command description of the corresponding component. | -| -h, –help | Help information | - | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/AI-features/ai4db/dbmind-mode/3-set.md b/product/en/docs-mogdb/v5.0/AI-features/ai4db/dbmind-mode/3-set.md deleted file mode 100644 index 7bf0bb61..00000000 --- a/product/en/docs-mogdb/v5.0/AI-features/ai4db/dbmind-mode/3-set.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: set -summary: set -author: Guo Huan -date: 2022-05-06 ---- - -# set - -This command is used to change the parameter values in the **dbmind.conf** configuration file. You can also manually modify the **dbmind.conf** configuration file. The two methods have no difference. For example, to change the value of **host** in the **TSDB** configuration item of the **dbmind.conf** file in the **confpath** directory to **127.0.0.1**, run the following command: - -``` -gs_dbmind set TSDB host 127.0.0.1 -c confpath -``` - -You can choose either of the methods to modify common parameters. The DBMind configuration file does not store plaintext passwords. If a user uses a plaintext password, the DBMind displays a message and exits. Therefore, the user can change the password in either of the following ways: - -1. Modify the **dbmind.conf** file first and run the following command to reinitialize the configuration file: - - ``` - gs_dbmind service setup --initialize -c confpath - ``` - -2. Run the **set** subcommand to set the parameters. For example: - - ``` - gs_dbmind set METADATABASE password xxxxx -c confpath - ``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This command is case sensitive to character strings. If you enter an incorrect character string, an error may occur during the command execution. The **set** subcommand involves many types of parameter values. Therefore, only the set values are preliminarily checked. You need to ensure that the entered values are correct. For example, some values must be positive integers instead of negative integers. - -## Command Reference - -You can use the **–help** option to obtain the help information about this mode. For example: - -``` -gs_dbmind set --help -``` - -``` -usage: set [-h] -c DIRECTORY section option target - -positional arguments: - section which section (case sensitive) to set - option which option to set - target the parameter target to set - -optional arguments: - -h, --help show this help message and exit - -c DIRECTORY, --conf DIRECTORY - set the directory of configuration files -``` - -**Table 1** Parameters of the set subcommand: python dbmind/ set xxx - -| Parameter | Description | Value Range | -| :-------- | :---------------------------------------- | :---------- | -| -h, –help | Help information | - | -| -c, –conf | Configuration file directory **confpath** | - | -| section | Setting area | - | -| option | Configuration item | - | -| target | Set value | - | diff --git a/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/5.0.6.md b/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/5.0.6.md new file mode 100644 index 00000000..9207594d --- /dev/null +++ b/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/5.0.6.md @@ -0,0 +1,175 @@ +--- +title: MogDB 5.0.6 +summary: MogDB 5.0.6 +author: Guo Huan 候宗田 齐永江 郑小进 +date: 2024-03-26 +--- + +# MogDB 5.0.6 + +## 1. Version Description + +MogDB 5.0.6 is a patch version of MogDB 5.0.0, released on 2024-03-30. It adds new features and fixes some defects based on MogDB 5.0.5, with the following details: + +
+ +## 2. New Features + +### 2.1 Ustore Storage Engine Commercial Use + +The Astore storage engine, which uses the Append-Update mode, suffers from inefficient space management in large-scale high-concurrency update scenarios, leading to an order of magnitude deterioration in update latency, significantly constraining key businesses such as finance and security. + +The new Ustore storage engine, which supports Inplace-Update, is officially released. It offers excellent performance in space management and high hotspot updates while maintaining consistent read-write performance with Astore, effectively addressing customer pain points. + +**Related Page**: [In-place Update Storage Engine Ustore](../../performance-tuning/system-tuning/configuring-ustore.md) + +### 2.2 SELECT Auto Commit + +MogDB processes all read and write requests under a transaction mechanism, which is significantly different from Oracle's behavior where reads do not start a transaction and writes implicitly start and explicitly commit a transaction. This is unfriendly for Oracle applications and long read-only connections. + +This feature implements the Oracle transaction management mechanism, significantly reducing the complexity of Oracle application migration. + +**Related Page**: [SELECT Auto Commit Transaction](../../reference-guide/sql-reference/transaction/transaction-auto-commit.md) + +### 2.3 Import and Export Enhancements + +Import and export capabilities are crucial for data backup and recovery, data migration, data synchronization, data analysis, and data exchange scenarios. Their performance directly affects the smooth progress of key businesses. + +To strengthen the support of tools for business ecosystems, parallelism at both the table level and within the table has been implemented on the basis of existing gs_dump and gs_restore tools. This has resulted in several times performance improvement in common database scenarios, and even dozens of times performance improvement in scenarios with large and ultra-large tables. + +**Related Page**: [Enhanced Logical Backup and Recovery Efficiency](../../characteristic-description/high-availability/enhanced-efficiency-of-logical-backup-and-restore.md) + +### 2.4 Compatibility Enhancements + +1. Support for using string constants after group by, support for nested use of aggregate functions, compatibility with Oracle syntax + + **Related Page**: [ORDER BY/GROUP BY Scenario Compatibility](../../characteristic-description/compatibility/order-by-group-by-scenario-expansion.md)、[Support for Nested Aggregate Functions](../../characteristic-description/compatibility/nesting-of-aggregate-functions.md) + +2. A new option 'accept_empty_str' has been added to the parameter [behavior_compat_options](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options), allowing users to decide whether empty strings in all data types are converted to null. + +3. New versions of gsql, libpq, and odbc for AIX systems are available, allowing clients or drivers to access the database on AIX systems. + +4. Compatibility with PG's INSERT...ON CONFLICT syntax, changing the INSERT action to UPDATE or DO NOTHING when a unique constraint conflict occurs to avoid errors. + + **Related Page**: [INSERT Supports ON CONFLICT Clause](../../characteristic-description/compatibility/insert-on-conflict.md) + +5. After setting the parameter [behavior_compat_options](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options) to 'convert_string_digit_to_numeric', floating-point strings and integers can be directly operated on. + +6. Support for granting stored procedures with the AUTHID CURRENT_USER attribute to other users, and the tables involved in the stored procedures are also converted to tables with the same name under the corresponding user's ownership. + + **Related Page**: [Support for AUTHID CURRENT_USER](../../characteristic-description/compatibility/authid-current-user.md) + +7. When the parameter of mod(val1, val2) function is (float, int), the implicit conversion is modified from mod(int, int) to mod(numeric, numeric), which improves the calculation precision of mod function + + **Related Page**: [mod Function Compatibility](../../characteristic-description/compatibility/mod-function-float-to-int.md) + +8. Support for [ALTER SEQUENCE](../../reference-guide/sql-syntax/ALTER-SEQUENCE.md) functionality, allowing modifications to be made after sequence creation. + +9. Support for combining [UPDATE](../../reference-guide/sql-syntax/UPDATE.md)/[DELETE](../../reference-guide/sql-syntax/DELETE.md) statements with the return into clause, enhancing the functionality of update/delete statements. + +10. Support for [cursor backward retrieval](../../characteristic-description/enterprise-level-features/scroll-cursor.md) capabilities, expanding the usage scenarios of cursors. + +### 2.5 Performance Improvements + +1. During query execution, columns in subqueries that are not used by the outer query are deleted to improve query performance. + + **Related Page**: [Support for Pruning Subquery Projection Columns](../../characteristic-description/enterprise-level-features/support-for-pruning-subquery-projection-columns.md) + +2. Support for modifying the logged attribute of a table after table creation, supporting the modification of log tables to non-log tables, not recording wal logs, and improving insertion efficiency. + + **Related Page**: [Support for Modifying Table Log Attributes After Table Creation](../../characteristic-description/compatibility/modify-table-log-property.md) + +3. Support for optimizing statements like 'like'xxx' and 'like'xxx%' through SET [sql_beta_feature](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#sql_beta_feature) = index_like_prefix_opt; to improve execution performance. + +4. Support for eliminating useless order by clauses in subqueries of queries to improve execution performance. + + **Related Page**: [Order By Column Pruning](../../characteristic-description/enterprise-level-features/pruning-order-by-in-subqueries.md) + +5. Modification of the default behavior of cte tables, allowing them to be promoted to upper queries to generate better query plans and improve execution performance. + +6. A new option 'bpchar_coerce_compat' has been added to the parameter [behavior_compat_options](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options), allowing implicit conversion of text to bpchar when bpchar and text are operated on, enabling conditions like bpchar_col = 'xxx'::text to directly use indexes or partition pruning, improving query efficiency. + +### 2.6 Usability Improvements + +1. Addition of parameters such as work_mem, rewrite_rule to hints, enhancing users' ability to control the execution plan of individual statements with hints. + + **Related Page**: [Optimizer GUC Parameters Hint](../../performance-tuning/sql-tuning/hint-based-tuning.md#优化器guc参数的hint) + +2. Support for returning the number of rows and error messages when the insert fails in BatchMode Insert, enhancing users' error identification capabilities. + +3. Addition of the [gs_xlog_keepers](../../reference-guide/functions-and-operators/system-management-functions/other-functions.md#gs_xlog_keepers) function, which is used to query the reasons for xlog cleanup within the database, helping users better clean up xlog and reduce disk space occupation. + +4. Support for fields in the [MERGE INTO](../../reference-guide/sql-syntax/MERGE-INTO.md) statement without table name prefixes, enhancing SQL syntax compatibility and reducing application modifications for users. + +5. Support for viewing the number of active autonomous transactions and related details, supporting the release of autonomous transactions without stopping the database, strengthening users' control over autonomous transactions. + + **Related Page**: [Autonomous Transaction Management Views and Non-database Stopping Release of Autonomous Transactions](../../characteristic-description/maintainability/autonomous-transaction-management.md) + +6. Adaptation of the wal2json wal parsing plugin, which can convert wal into json format, improving the readability of wal content and facilitating users to understand the log status of standby machines. + + **Related Page**: [wal2json User Guide](../../developer-guide/extension/wal2json-user-guide.md) + +7. Support for rollback of a single SQL error, when the [behavior_compat_options](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options) parameter is set to the 'sql_implicit_savepoint' option, an error in a single SQL in a transaction does not affect the submission of other SQLs, and the submission will retain the results of correctly executed SQLs. + +### 2.7 Stability Enhancements + +1. Addition of the new parameter [protect_standby](../../reference-guide/guc-parameters/ha-replication/standby-server.md#protect_standby), which is used to protect standby machines from being activated as the master by the switchover or failover commands, allowing users to control the promotion of designated standby machines. + +2. Addition of the new parameter [data_sync_failed_ignore](../../reference-guide/guc-parameters/fault-tolerance.md#data_sync_failed_ignore), which is used to control the behavior of the database when data page synchronization to disk fails, whether to stop the database or keep running. + + **Related Page**: [Corrupt Files Handling](../../characteristic-description/maintainability/corrupt-files-handling.md) + +3. Addition of the new parameter [damage_page_ignore](../../reference-guide/guc-parameters/fault-tolerance.md#damage_page_ignore), which controls the database's ability to forcibly start during recovery when encountering damaged or missing pages, for data recovery work. + + **Related Page**: [Corrupt Files Handling](../../characteristic-description/maintainability/corrupt-files-handling.md) + +4. CM has added support for dual-network segment deployment of stream replication, for high-availability disaster recovery at the network card level (PTK 1.4 and higher versions). + + **Related Page**: [MogDB/CM/PTK Support for Dual Network Segments](../../characteristic-description/high-availability/cm-dual-network-segment-deployment.md) + +5. CM has added a new parameter [enable_async_standby_promotion](../../high-available-guide/cluster-management/cm-configuration-parameter/cm-cm_server.md#enable_async_standby_promotion) to control whether asynchronous standby machines are allowed to be promoted as the master in two-node deployment modes, enhancing users' control over HA behavior. + +
+ +## 3. Defect Fixes + +1. [3098] Fixed the issue of pg_get_userbyid() having poor performance compared to PG. +2. [2949] Fixed the error "failed to find plan for subquery xx" when querying system views with explain. +3. [2952] Fixed the issue where the DDL returned by the pg_get_tabledef function could not be executed normally. +4. [3917] Fixed the error with MogDB creating temporary tables with IF NOT EXISTS. +5. [4159] Fixed the issue where the char function failed to convert specific data types in dolphin mode. +6. [3759] Fixed the issue where uint8 type and ordinary numbers could not be operated directly under B mode. +7. [3726] Fixed the issue where the definition of Hash sub-partitioned tables could not be obtained through pg_get_tabledef. +8. [3643] Fixed the error that occurred when the function or procedure name in the package was the same as the synonym name. +9. [3567] Fixed the issue where convert_string_digit_to_numeric was not effective for expressions. +10. [3566] Fixed the compatibility issue with the all/any (list) syntax under A mode. +11. [3588] Fixed the error of insufficient permissions to access the template1 database in the database log. +12. [3456] Fixed the conflict between custom packages and parameters proc_outparam_override. +13. [3401] Fixed the error with date and bigint type calculations. +14. [3328] Fixed the issue where the table creation statement exported by gs_dump was different from the original. +15. [3360] Fixed the ambiguity of the return value of dbms_utility.format_call_stack. +16. [3280] Fixed the issue of garbled characters when operating json in dolphin mode. +17. [3374] Fixed the issue where date_format did not support time zone time formats. +18. [3191] Fixed the issue where the calculation priority of the dolphin numeric operator was incorrect, leading to unexpected results. +19. [3576] Fixed the issue where the data dictionary query failed when the parameter dolphin.b_compatibility_mode was set to on. +20. [3259] Fixed the issue where the sys_guid() function built into the MogDB database only generated one piece of the same data. +21. [3045] Fixed the issue where the DDL for creating a table in MySQL mode added double quotes to the keyword 'key', but the DDL seen in the gs_dump generated SQL file and 'SHOW CREATE TABLE' did not have double quotes, causing the exported SQL file to fail when importing. +22. [3180] Fixed the issue where the gs_dump logical backup could not restore the owner and creator of the trigger. +23. [3307] Fixed the issue where the DETAIL information defined by RAISE EXCEPTION USING DETAIL was not returned for troubleshooting. +24. [3950] Fixed the issue where a type object already exists, and the prompt should indicate that the object already exists, but currently, it indicates a syntax error in the statement. +25. [3922] Fixed the issue where 'completion' could not be used as a non-reserved keyword. +26. [3419] Fixed the issue where the backup compression of gs_probackup and brm was ineffective under a custom tablespace. +27. [3367] Fixed the issue where an error occurred when using the decode function column as a correlation condition in a subquery with group by. +28. [2952] Fixed the issue where the order of the table creation statement returned by the pg_get_tabledef() function was incorrect. +29. [3053] Fixed the issue where an error occurred in the return value of Hint after enabling query_dop. +30. [3337] Fixed the issue where the "cached plan must not change result type" error occurred after modifying plan_cache_type_validation and not restarting. +31. [4006] Fixed the issue where there was a low probability of client links getting stuck after automatic failover of a database cluster configured with a VIP. +32. [3736] Fixed the issue where the CM (Cluster Manager) cluster's own failover might cause the database cluster to automatically failover in a VIP configuration scenario. +33. [3283] Fixed the issue where the parameter most_available_sync could not take effect immediately after modification and executing the reload command. +34. [3167] Fixed the issue where there was a chance of causing a query to fail to find the CSN file when using the select ... for share syntax. + +
+ +## 4. Upgrade Instructions + +If you need to upgrade your current lower version of MogDB to version 5.0.6, please be sure to read the [pre-upgrade guide](../../upgrade-guide/2-read-before-upgrade.md), and for detailed introduction on database upgrade, please refer to the [upgrade guide](../../upgrade-guide/upgrade-guide.md) of the latest version. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/5.0.7.md b/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/5.0.7.md new file mode 100644 index 00000000..1b4995b4 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/5.0.7.md @@ -0,0 +1,32 @@ +--- +title: MogDB 5.0.7 +summary: MogDB 5.0.7 +author: Guo Huan +date: 2024-05-29 +--- + +# MogDB 5.0.7 + +## 1. Version Description + +MogDB 5.0.7 is a patch version of MogDB 5.0.0, released on 2024-05-30. It addresses some defects on top of MogDB 5.0.6, with details as follows: + +
+ +## 2. Defect Fixes + +1. [5004] Fixed an issue where the Cluster Manager (CM) experienced abnormal functionality when adding two VIPs in a single network segment scenario. +2. [5002] Fixed an issue where changes in the support for type conversion of `select decode` led to errors in certain scenarios. +3. [4120] Fixed a probabilistic core dump issue that occurred with the `alter table add column after` statement. +4. [4988] Fixed an abnormal query status issue that persisted when continuously checking the value of the `protect_standby` parameter after setting it to on with the `reload` command. +5. [5998] Fixed a failure in the CM to automatically mount the VIP in a one-primary, one-standby scenario. +6. [4999] Fixed an issue where the "tidrangescan" plugin was not automatically created when upgrading from a lower version of MogDB 5.0 to MogDB 5.0.6. +7. [5006] Fixed an error that occurred when setting user passwords with parallel export enabled in `gs_dump`. +8. [5007] Fixed a failure in the recovery operation based on incremental backups with `gs_probackup`. +9. [5076] Fixed an error that occurred when using `gs_dump` to export a database in the B library scenario and then importing it with `gs_restore`. +10. [5125] Fixed a core dump issue that occurred when restoring a full backup with `gs_probackup` and specifying a non-existent ID with the `-i` option. +11. [5152] Fixed an error that occurred when the `rewrite_rule` was set to `column_pruner` and the window function included a `partition` clause. +12. [4508] Fixed a probabilistic core issue caused by concurrent DDL package operations. +13. [5003] Fixed an error that occurred when executing SQL statements involving at least 15 table joins with an Append join operator, reporting the issue "bitmapset has multiple members". +14. [5217] Fixed a probabilistic data outage issue caused by modifying table fields with `ALTER TABLE … FIRST/AFTER`. +15. [5151] Fixed a memory overflow issue that occurred when there were 128 users or a multiple of 128 users, and the 128th user was deleted while simultaneously querying the `gs_sql_count` system view. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/5.0.8.md b/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/5.0.8.md new file mode 100644 index 00000000..03161cc2 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/5.0.8.md @@ -0,0 +1,122 @@ +--- +title: MogDB 5.0.8 +summary: MogDB 5.0.8 +author: Guo Huan +date: 2024-07-03 +--- + +# MogDB 5.0.8 + +## 1. Version Description + +MogDB 5.0.8 is a patch version of MogDB 5.0.0, released on 2024-07-31. It includes new features and defect fixes on top of MogDB 5.0.7, with details as follows: + +
+ +## 2. New Features + +### 2.1 Sequential Scan Pre-fetching + +MogDB has been optimized for scenarios involving large volumes of data in sequential scans, implementing parallelization between CPU computation and I/O during the scan process. This enhancement maximizes CPU efficiency and delivers higher throughput. The sequential scan pre-fetching supports both AStore and UStore storage engines. With scan pre-fetching enabled, there is a 20% - 80% performance increase for the SeqScan operator in TPCH scenarios, and a 10% - 30% overall end-to-end performance improvement. + +**Related Page**: [Sequential Scan Pre-fetching](../../characteristic-description/high-performance/seqscan-prefetch.md) + +### 2.2 UStore Support for SMP Parallel Execution + +SMP parallel technology utilizes the multi-core CPU architecture of computers to achieve multi-threaded parallel computing, fully leveraging CPU resources to improve query performance. This feature adds parallel capability support for the Ustore storage engine. In full-table sequential scan scenarios, the query performance nearly linearly increases with the degree of parallelism within the storage bandwidth. Significant performance improvements have also been achieved with SMP in index queries. + +**Related Page**: [Ustore SMP Parallel Scan](../../characteristic-description/high-performance/ustore-smp.md)、[In-place Update Storage Engine Ustore](../../performance-tuning/system-tuning/configuring-ustore.md) + +### 2.3 Enhanced Import and Export Capabilities + +1. gs_dump now supports parallel import/export of partitioned tables, achieving a nearly linear multiple increase in performance with the increase of parallelism within the storage bandwidth. + + **Related Page**: [Enhanced Logical Backup and Recovery Efficiency](../../characteristic-description/high-availability/enhanced-efficiency-of-logical-backup-and-restore.md) + +2. gs_dump supports backup export of data on standby machines. + + **Related Page**: [gs_dump](../../reference-guide/tool-reference/server-tools/gs_dump.md) + +3. gs_dump and gs_restore support the specification of import/export for five basic objects: functions, triggers, types, packages, and procedures. + + **Related Page**: [Support for Specifying Import/Export of Five Basic Objects](../../characteristic-description/enterprise-level-features/import-export-specific-objects.md) + +### 2.4 Compatibility Enhancements + +1. In B compatibility mode (MySQL compatibility), support has been added for division operations where the divisor is 0. The error message "division by zero" is now reported at a warning level instead of an error level to prevent SQL execution interruptions. In this scenario, the calculation result returns NULL, consistent with MySQL behavior. + +2. When using connection methods such as JDBC in PBE mode, support is provided for left values in anonymous blocks and the return of OUT/INOUT type data to the corresponding driver end within stored procedures. The use of this feature requires setting the `proc_outparam_override` option in the `behavior_compat_options` parameter. + + **Related Page**: [PBE Mode Support for Stored Procedure Out Parameters](../../characteristic-description/compatibility/stored-procedure-out-parameters-in-pbe-mode.md) + +3. Native ECPG (Embedded SQL Preprocessor) has been modified to adapt to the usage and functionality of ORACLE PRO\*C, facilitating users to smoothly replace PRO\*C with ECPG to implement business logic. + + **Related Page**: [Embedded SQL Preprocessor ECPG](../../characteristic-description/application-development-interfaces/ECPG.md) + +### 2.5 Logical Replication Support for DDL Operations + +Support has been added for generating DDL logs of common table operations and logical decoding of DDL logs, facilitating data migration synchronization software to capture changes to the MogDB data dictionary and enhancing the parallel capabilities of MogDB with heterogeneous databases. + +**Related Page**: [Logical Replication Support for DDL Operations](../../developer-guide/logical-replication/logical-decoding/logical-decoding-support-for-DDL.md) + +### 2.6 Enhanced Checkpoint Capabilities + +Without significantly impacting database performance, the dirty page flushing capability of MogDB has been optimized to reduce the number of dirty pages within the system. This will greatly reduce the completion time of database switchover operations. Additionally, in the event of an unexpected power loss and database restart, it will also reduce the amount of wal that needs to be replayed, thereby reducing the database startup time. + +**Related Page**: [Enhancement of Dirty Pages Flushing Performance](../../characteristic-description/high-performance/enhancement-of-dirty-pages-flushing-performance.md) + +
+ +## 3. Defect Fixes + +1. [4061] Fixed a low-probability database crash issue caused by multi-threaded concurrent writes to the wal log. + +2. [5234] Fixed an issue where procedures were missing after exporting with gs_dump and importing with gs_restore. + +3. [5291] Fixed an issue where functions with two parameters were missing after exporting and importing with gs_dump and gs_restore. + +4. [3961] Fixed an inconsistency issue with group by rollup syntax query results compared to Oracle. + +5. [4543] Fixed an issue where the auto_increment value already in use by a lower version could not be reset to 1. + +6. [4550] Fixed an issue with duplicate uuid_short values in B mode. + +7. [4894] Fixed an error reporting issue with "could not find tuple for trigger 437851" when dropping a trigger function. + +8. [4952] Fixed an issue where SQL statements with distinct and incremental sorting selected a poor plan. + +9. [4992] Fixed an issue where the estimated row count after analyzing a table with zero actual rows was significantly off. + +10. [5138] Fixed an unexpected result issue with the whale plugin when executing `SELECT to_timestamp(0) FROM dual;`. + +11. [5143] Fixed an issue where the whale plugin's `dbms_random.normal()` function returned the same value each time. + +12. [5146] Fixed an issue with incorrect tree query filter condition downward propagation. + +13. [5242] Fixed an issue where stored procedure cursor parameters returned null despite actual data being present. + +14. [5244] Fixed an issue where '' returned as 0 after UNION ALL in B mode. + +15. [5293] Fixed an error when executing stored procedures to insert table data. + +16. [5400] Fixed a memory leak issue when updating partitioned tables. + +17. [5642] Fixed an error when using aggregate functions with order by on empty result sets in B mode. + +18. [5680] Fixed a permission denied error for relation (null) when a regular user accessed remote dblink tables. + +19. [5698] Fixed an issue where updating a remote dblink table with an alias was incorrectly identified as a field. + +20. [1073] Added support for accessing and updating external partitioned tables. + +21. [6147] Fixed an error with the fetchsize function when the select - O auto-commit feature was enabled. + +22. [6141] Fixed an unexpected auto-commit issue with select for update when the select - O auto-commit feature was active. + +23. [4470] Fixed an error with "tuple already updated by self" when deleting a table with a foreign key constraint in an Ustore table. + +24. [6305] Fixed a low-probability core issue with gs_dump when parallel exporting partitioned tables. + +25. [5837] Fixed a low-probability data inaccuracy issue with Merge into during concurrent updates. + +26. [6380] Fixed a low-probability database core issue with beatmapheapscan in parallel situations. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/release-note.md b/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/release-note.md index f0f2c2a2..88b811ea 100644 --- a/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/release-note.md +++ b/product/en/docs-mogdb/v5.0/about-mogdb/mogdb-new-feature/release-note.md @@ -9,9 +9,12 @@ date: 2022-09-27 | Version | Release Date | Overview | | ------------------- | ------------ | ------------------------------------------------------------ | -| [5.0.5](./5.0.5.md) | 12/30 | MogDB 5.0.5 is a patch release for MogDB 5.0.0, which adds some new features and fixes some defects based on MogDB 5.0.4. | -| [5.0.4](./5.0.4.md) | 11/30 | MogDB 5.0.4, officially released on November 30, 2023, builds on MogDB 5.0.3 by fixing some bugs. | -| [5.0.3](./5.0.3.md) | 10/27 | MogDB 5.0.3, officially released on October 27, 2023, builds on MogDB 5.0.2 by fixing some bugs. | -| [5.0.2](./5.0.2.md) | 09/30 | MogDB 5.0.2, officially released on September 30, 2023, builds on MogDB 5.0.1 by fixing some bugs and adding some new features. | -| [5.0.1](./5.0.1.md) | 08/15 | MogDB 5.0.1, officially released on August 15, 2023, builds on MogDB 5.0.0 by fixing some bugs and adding the feature of decoupling views from their dependencies. | -| [5.0.0](./5.0.0.md) | 07/14 | MogDB 5.0.0 was officially released on 07/14/2023. As an LTS release, it is further enhanced based on version 3.0/3.1 and introduces new features from openGauss 5.0.0. | \ No newline at end of file +| [5.0.8](./5.0.8.md) | 2024/07/31 | MogDB 5.0.8 builds upon the previous version 5.0.7 by addressing key defects and introducing new features such as sequential scan pre-fetching and SMP parallel execution for UStore. Additionally, this release has made significant enhancements in compatibility, performance, and usability. | +| [5.0.7](./5.0.7.md) | 2024/05/30 | MogDB 5.0.7 fixed some defects based on MogDB 5.0.6. | +| [5.0.6](./5.0.6.md) | 2024/03/30 | Building upon the MogDB 5.0.5 version, MogDB 5.0.6 not only rectified a range of defects but also introduced a suite of new features, including the commercial utilization of the Ustore storage engine, automatic commit for SELECT statements, and significant performance enhancements for import and export functions. Moreover, this version has seen comprehensive improvements in compatibility, performance, and ease of use. | +| [5.0.5](./5.0.5.md) | 2023/12/30 | MogDB 5.0.5 is a patch release for MogDB 5.0.0, which adds some new features and fixes some defects based on MogDB 5.0.4. | +| [5.0.4](./5.0.4.md) | 2023/11/30 | MogDB 5.0.4, officially released on November 30, 2023, builds on MogDB 5.0.3 by fixing some bugs. | +| [5.0.3](./5.0.3.md) | 2023/10/27 | MogDB 5.0.3, officially released on October 27, 2023, builds on MogDB 5.0.2 by fixing some bugs. | +| [5.0.2](./5.0.2.md) | 2023/09/30 | MogDB 5.0.2, officially released on September 30, 2023, builds on MogDB 5.0.1 by fixing some bugs and adding some new features. | +| [5.0.1](./5.0.1.md) | 2023/08/15 | MogDB 5.0.1, officially released on August 15, 2023, builds on MogDB 5.0.0 by fixing some bugs and adding the feature of decoupling views from their dependencies. | +| [5.0.0](./5.0.0.md) | 2023/07/14 | MogDB 5.0.0 was officially released on 07/14/2023. As an LTS release, it is further enhanced based on version 3.0/3.1 and introduces new features from openGauss 5.0.0. | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/administrator-guide/database-deployment-scenario/common-primary-backup-deployment-scenarios.md b/product/en/docs-mogdb/v5.0/administrator-guide/database-deployment-scenario/common-primary-backup-deployment-scenarios.md index 2d9ada95..003d3811 100644 --- a/product/en/docs-mogdb/v5.0/administrator-guide/database-deployment-scenario/common-primary-backup-deployment-scenarios.md +++ b/product/en/docs-mogdb/v5.0/administrator-guide/database-deployment-scenario/common-primary-backup-deployment-scenarios.md @@ -1,85 +1,85 @@ ---- -title: Common Primary/Standby Deployment Solutions -summary: Common Primary/Standby Deployment Solutions -author: Guo Huan -date: 2023-04-07 ---- - -# Common Primary/Standby Deployment Solutions - -## Single-Center Deployment - -**Figure 1** Single-center deployment -![单中心部署图](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/single-center-deployment.png) - -Networking features: If a single AZ is deployed, one synchronous standby node and one asynchronous standby node can be configured. - -Advantages: - -1. Three nodes are equivalent. If any node is faulty, the other nodes can still provide services. -2. The cost is low. - -Disadvantages: The high availability (HA) is low. If an AZ-level fault occurs, you can only restore the entire node. - -Applicability: Applicable to service systems that have low requirements on HA. - -## Intra-City Dual-Center Deployment - -**Figure 2** Intra-city dual-center deployment -![同城双中心部署图](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/intra-city-dual-center-deployment.png) - -Networking features: Two intra-city AZs are more reliable than a single AZ. A synchronous standby node can be configured for the primary center and the intra-city center respectively. - -Advantages: - -1. Intra-city synchronous replication. If one data center is faulty, the other data center can still provide services without data loss. RPO = 0. -2. The cost is reasonable. - -Disadvantages: - -1. The intra-city distance should not be too long. It is recommended that the distance be within 70 km. The total latency caused by excessive read/write times should be considered during service design. -2. Remote DR is not supported. - -Applicability: Applicable to common service systems. - -## Two-City Three-DC Deployment - -**Figure 3** Two-city three-dc deployment -![两地三中心部署图](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/two-city-three-dc-deployment.png) - -Networking features: In the two-city three-DC deployment, each AZ must have at least one synchronous standby node. The cluster reliability can reach the highest level when the number of cities and data centers increases. - -Advantages: It supports zero data loss in remote DR, and has the highest reliability. RPO = 0. - -Disadvantages: - -1. If the remote DR distance is long and synchronous standby node is configured in the remote center, the performance may be affected. -2. The cost is relatively high. - -Applicability: Applicable to core and important service systems. - -## Two-City Three-DC Streaming DR Solution - -**Figure 4** Two-city three-DC streaming DR solution -![两地三中心流式容灾方案部署图](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/two-city-three-dc-streaming-dr-solution.png) - -Networking features: Two independent clusters are deployed in the dual-cluster DR solution. The primary and DR cluster networking modes can be selected as required. The DR cluster selects the first standby DN to connect to the primary DN of the primary cluster. In the DR cluster, the first standby DN is connected in cascading standby mode. - -Advantages: - -1. The primary cluster has the advantage of single-cluster networking. You need to manually switch to the standby cluster only when the primary cluster is unavailable. -2. There is only one cross-cluster (remote) replication link regardless of whether a DR switchover occurs. Therefore, less network bandwidth is occupied. -3. The networking is more flexible. The primary cluster and DR cluster can use different networking modes. - -Disadvantages: - -1. DR clusters need to be added, increasing costs. -2. Remote DR RPO > 0 - -Applicability: Applicable to core and important service systems. - -For more information, see [Two-City Three-DC DR](./two-city-three-dc-dr.md). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: -> -> The preceding deployments are typical solutions. You can adjust the deployment solutions based on actual service scenarios, for example, adding or deleting standby nodes, adjusting the number of centers, properly deploying synchronous and asynchronous standby nodes, and properly using cascaded standby nodes. +--- +title: Common Primary/Standby Deployment Solutions +summary: Common Primary/Standby Deployment Solutions +author: Guo Huan +date: 2023-04-07 +--- + +# Common Primary/Standby Deployment Solutions + +## Single-Center Deployment + +**Figure 1** Single-center deployment +![单中心部署图](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/single-center-deployment.png) + +Networking features: If a single AZ is deployed, one synchronous standby node and one asynchronous standby node can be configured. + +Advantages: + +1. Three nodes are equivalent. If any node is faulty, the other nodes can still provide services. +2. The cost is low. + +Disadvantages: The high availability (HA) is low. If an AZ-level fault occurs, you can only restore the entire node. + +Applicability: Applicable to service systems that have low requirements on HA. + +## Intra-City Dual-Center Deployment + +**Figure 2** Intra-city dual-center deployment +![同城双中心部署图](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/intra-city-dual-center-deployment.png) + +Networking features: Two intra-city AZs are more reliable than a single AZ. A synchronous standby node can be configured for the primary center and the intra-city center respectively. + +Advantages: + +1. Intra-city synchronous replication. If one data center is faulty, the other data center can still provide services without data loss. RPO = 0. +2. The cost is reasonable. + +Disadvantages: + +1. The intra-city distance should not be too long. It is recommended that the distance be within 70 km. The total latency caused by excessive read/write times should be considered during service design. +2. Remote DR is not supported. + +Applicability: Applicable to common service systems. + +## Two-City Three-DC Deployment + +**Figure 3** Two-city three-dc deployment +![两地三中心部署图](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/two-city-three-dc-deployment.png) + +Networking features: In the two-city three-DC deployment, each AZ must have at least one synchronous standby node. The cluster reliability can reach the highest level when the number of cities and data centers increases. + +Advantages: It supports zero data loss in remote DR, and has the highest reliability. RPO = 0. + +Disadvantages: + +1. If the remote DR distance is long and synchronous standby node is configured in the remote center, the performance may be affected. +2. The cost is relatively high. + +Applicability: Applicable to core and important service systems. + +## Two-City Three-DC Streaming DR Solution + +**Figure 4** Two-city three-DC streaming DR solution +![两地三中心流式容灾方案部署图](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/two-city-three-dc-streaming-dr-solution.png) + +Networking features: Two independent clusters are deployed in the dual-cluster DR solution. The primary and DR cluster networking modes can be selected as required. The DR cluster selects the first standby DN to connect to the primary DN of the primary cluster. In the DR cluster, the first standby DN is connected in cascading standby mode. + +Advantages: + +1. The primary cluster has the advantage of single-cluster networking. You need to manually switch to the standby cluster only when the primary cluster is unavailable. +2. There is only one cross-cluster (remote) replication link regardless of whether a DR switchover occurs. Therefore, less network bandwidth is occupied. +3. The networking is more flexible. The primary cluster and DR cluster can use different networking modes. + +Disadvantages: + +1. DR clusters need to be added, increasing costs. +2. Remote DR RPO > 0 + +Applicability: Applicable to core and important service systems. + +For more information, see [Two-City Three-DC DR](./two-city-three-dc-dr.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +> The preceding deployments are typical solutions. You can adjust the deployment solutions based on actual service scenarios, for example, adding or deleting standby nodes, adjusting the number of centers, properly deploying synchronous and asynchronous standby nodes, and properly using cascaded standby nodes. diff --git a/product/en/docs-mogdb/v5.0/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-architecture.md b/product/en/docs-mogdb/v5.0/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-architecture.md index 094f0cf3..ebc5cfc4 100644 --- a/product/en/docs-mogdb/v5.0/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-architecture.md +++ b/product/en/docs-mogdb/v5.0/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-architecture.md @@ -1,20 +1,20 @@ ---- -title: Resource Pooling Architecture -summary: Resource Pooling Architecture -author: Guo Huan -date: 2023-04-07 ---- - -# Resource Pooling Architecture - -This document describes some best practices and precautions in the resource pooling architecture. Developers who are interested in related features can quickly deploy, practice, or perform customized development. It is recommended that developers have at least basic knowledge, be proficient in compiling MogDB source code, and master basic storage knowledge and basic Linux commands. - -The following figure shows the resource pooling architecture. - -**Figure 1: MogDB resource pooling architecture** - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/shared_storage.jpg) - -- The read/write node and the read-only node share the same underlying storage. -- The read/write node and the read-only node use the DMS components to share hot data pages in the shared buffer pool through the TCP or RDMA protocol. +--- +title: Resource Pooling Architecture +summary: Resource Pooling Architecture +author: Guo Huan +date: 2023-04-07 +--- + +# Resource Pooling Architecture + +This document describes some best practices and precautions in the resource pooling architecture. Developers who are interested in related features can quickly deploy, practice, or perform customized development. It is recommended that developers have at least basic knowledge, be proficient in compiling MogDB source code, and master basic storage knowledge and basic Linux commands. + +The following figure shows the resource pooling architecture. + +**Figure 1: MogDB resource pooling architecture** + +![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/shared_storage.jpg) + +- The read/write node and the read-only node share the same underlying storage. +- The read/write node and the read-only node use the DMS components to share hot data pages in the shared buffer pool through the TCP or RDMA protocol. - The read/write node and the read-only node access persistent data in the underlying shared storage through DSS APIs and DSS servers. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/administrator-guide/importing-and-exporting-data/exporting-data/exporting-data.md b/product/en/docs-mogdb/v5.0/administrator-guide/importing-and-exporting-data/exporting-data/exporting-data.md index b62ea39e..4e84f494 100644 --- a/product/en/docs-mogdb/v5.0/administrator-guide/importing-and-exporting-data/exporting-data/exporting-data.md +++ b/product/en/docs-mogdb/v5.0/administrator-guide/importing-and-exporting-data/exporting-data/exporting-data.md @@ -1,13 +1,13 @@ ---- -title: Exporting Data -summary: Exporting Data -author: Guo Huan -date: 2023-05-22 ---- - -# Exporting Data - -+ **[Using gs_dump and gs_dumpall to Export Data Overview](1-using-gs_dump-and-gs_dumpall-to-export-data-overview.md)** -+ **[Exporting a Single Database](2-exporting-a-single-database.md)** -+ **[Exporting All Databases](3-exporting-all-databases.md)** +--- +title: Exporting Data +summary: Exporting Data +author: Guo Huan +date: 2023-05-22 +--- + +# Exporting Data + ++ **[Using gs_dump and gs_dumpall to Export Data Overview](1-using-gs_dump-and-gs_dumpall-to-export-data-overview.md)** ++ **[Exporting a Single Database](2-exporting-a-single-database.md)** ++ **[Exporting All Databases](3-exporting-all-databases.md)** + **[Data Export By a User Without Required Permissions](4-data-export-by-a-user-without-required-permissions.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/administrator-guide/importing-and-exporting-data/importing-and-exporting-data.md b/product/en/docs-mogdb/v5.0/administrator-guide/importing-and-exporting-data/importing-and-exporting-data.md index 264f4e7b..f2960ca5 100644 --- a/product/en/docs-mogdb/v5.0/administrator-guide/importing-and-exporting-data/importing-and-exporting-data.md +++ b/product/en/docs-mogdb/v5.0/administrator-guide/importing-and-exporting-data/importing-and-exporting-data.md @@ -1,11 +1,11 @@ ---- -title: Importing And Exporting Data -summary: Importing And Exporting Data -author: Guo Huan -date: 2023-05-22 ---- - -# Importing And Exporting Data - -- **[Importing Data](importing-data/importing-data.md)** +--- +title: Importing And Exporting Data +summary: Importing And Exporting Data +author: Guo Huan +date: 2023-05-22 +--- + +# Importing And Exporting Data + +- **[Importing Data](importing-data/importing-data.md)** - **[Exporting Data](exporting-data/exporting-data.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/administrator-guide/importing-and-exporting-data/importing-data/importing-data.md b/product/en/docs-mogdb/v5.0/administrator-guide/importing-and-exporting-data/importing-data/importing-data.md index da0c8593..49a11c00 100644 --- a/product/en/docs-mogdb/v5.0/administrator-guide/importing-and-exporting-data/importing-data/importing-data.md +++ b/product/en/docs-mogdb/v5.0/administrator-guide/importing-and-exporting-data/importing-data/importing-data.md @@ -1,19 +1,19 @@ ---- -title: Importing Data -summary: Importing Data -author: Guo Huan -date: 2023-05-22 ---- - -# Importing Data - -+ **[Import Modes](1-import-modes.md)** -+ **[Running the INSERT Statement to Insert Data](2-running-the-INSERT-statement-to-insert-data.md)** -+ **[Running the COPY FROM STDIN Statement to Import Data](3-running-the-COPY-FROM-STDIN-statement-to-import-data.md)** -+ **[Using a gsql Meta-Command to Import Data](4-using-a-gsql-meta-command-to-import-data.md)** -+ **[Using gs_restore to Import Data](5-using-gs_restore-to-import-data.md)** -+ **[Updating Data in a Table](6-updating-data-in-a-table.md)** -+ **[Deep Copy](7-deep-copy.md)** -+ **[ANALYZE Table](8-ANALYZE-table.md)** -+ **[Doing VACUUM to a Table](9-doing-VACUUM-to-a-table.md)** +--- +title: Importing Data +summary: Importing Data +author: Guo Huan +date: 2023-05-22 +--- + +# Importing Data + ++ **[Import Modes](1-import-modes.md)** ++ **[Running the INSERT Statement to Insert Data](2-running-the-INSERT-statement-to-insert-data.md)** ++ **[Running the COPY FROM STDIN Statement to Import Data](3-running-the-COPY-FROM-STDIN-statement-to-import-data.md)** ++ **[Using a gsql Meta-Command to Import Data](4-using-a-gsql-meta-command-to-import-data.md)** ++ **[Using gs_restore to Import Data](5-using-gs_restore-to-import-data.md)** ++ **[Updating Data in a Table](6-updating-data-in-a-table.md)** ++ **[Deep Copy](7-deep-copy.md)** ++ **[ANALYZE Table](8-ANALYZE-table.md)** ++ **[Doing VACUUM to a Table](9-doing-VACUUM-to-a-table.md)** + **[Managing Concurrent Write Operations](10-managing-concurrent-write-operations.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/administrator-guide/localization/localization.md b/product/en/docs-mogdb/v5.0/administrator-guide/localization/localization.md index c73e2fa1..32ea4dd3 100644 --- a/product/en/docs-mogdb/v5.0/administrator-guide/localization/localization.md +++ b/product/en/docs-mogdb/v5.0/administrator-guide/localization/localization.md @@ -1,12 +1,12 @@ ---- -title: Localization -summary: Localization -author: Guo Huan -date: 2023-05-22 ---- - -# Localization - -+ **[Locale Support](locale-support.md)** -+ **[Collation Support](collation-support.md)** +--- +title: Localization +summary: Localization +author: Guo Huan +date: 2023-05-22 +--- + +# Localization + ++ **[Locale Support](locale-support.md)** ++ **[Collation Support](collation-support.md)** + **[Character Set Support](character-set-support.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/administrator-guide/routine-maintenance/routine-maintenance.md b/product/en/docs-mogdb/v5.0/administrator-guide/routine-maintenance/routine-maintenance.md index 57b5d53f..b7236244 100644 --- a/product/en/docs-mogdb/v5.0/administrator-guide/routine-maintenance/routine-maintenance.md +++ b/product/en/docs-mogdb/v5.0/administrator-guide/routine-maintenance/routine-maintenance.md @@ -1,24 +1,24 @@ ---- -title: Routine Maintenance -summary: Routine Maintenance -author: Guo Huan -date: 2023-05-22 ---- - -# Routine Maintenance - -+ **[Starting and Stopping MogDB](0-starting-and-stopping-mogdb.md)** -+ **[Using the gsql Client for Connection](using-the-gsql-client-for-connection.md)** -+ **[Routine Maintenance Check Items](1-routine-maintenance-check-items.md)** -+ **[Checking OS Parameters](2-checking-os-parameters.md)** -+ **[Checking MogDB Health Status](3-checking-mogdb-health-status.md)** -+ **[Checking Database Performance](4-checking-database-performance.md)** -+ **[Checking and Deleting Logs](5-checking-and-deleting-logs.md)** -+ **[Checking Time Consistency](6-checking-time-consistency.md)** -+ **[Checking the Number of Application Connections](7-checking-the-number-of-application-connections.md)** -+ **[Routinely Maintaining Tables](8-routinely-maintaining-tables.md)** -+ **[Routinely Recreating an Index](9-routinely-recreating-an-index.md)** -+ **[Exporting and Viewing the WDR](exporting-and-viewing-the-wdr.md)** -+ **[Data Security Maintenance Suggestions](10-data-security-maintenance-suggestions.md)** -+ **[Slow SQL Diagnosis](slow-sql-diagnosis.md)** +--- +title: Routine Maintenance +summary: Routine Maintenance +author: Guo Huan +date: 2023-05-22 +--- + +# Routine Maintenance + ++ **[Starting and Stopping MogDB](0-starting-and-stopping-mogdb.md)** ++ **[Using the gsql Client for Connection](using-the-gsql-client-for-connection.md)** ++ **[Routine Maintenance Check Items](1-routine-maintenance-check-items.md)** ++ **[Checking OS Parameters](2-checking-os-parameters.md)** ++ **[Checking MogDB Health Status](3-checking-mogdb-health-status.md)** ++ **[Checking Database Performance](4-checking-database-performance.md)** ++ **[Checking and Deleting Logs](5-checking-and-deleting-logs.md)** ++ **[Checking Time Consistency](6-checking-time-consistency.md)** ++ **[Checking the Number of Application Connections](7-checking-the-number-of-application-connections.md)** ++ **[Routinely Maintaining Tables](8-routinely-maintaining-tables.md)** ++ **[Routinely Recreating an Index](9-routinely-recreating-an-index.md)** ++ **[Exporting and Viewing the WDR](exporting-and-viewing-the-wdr.md)** ++ **[Data Security Maintenance Suggestions](10-data-security-maintenance-suggestions.md)** ++ **[Slow SQL Diagnosis](slow-sql-diagnosis.md)** + **[Log Reference](11-log-reference.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/abo-optimizer/adaptive-plan-selection.md b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/abo-optimizer/adaptive-plan-selection.md index 693c2412..c202950f 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/abo-optimizer/adaptive-plan-selection.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/abo-optimizer/adaptive-plan-selection.md @@ -1,42 +1,42 @@ ---- -title: Adaptive Plan Selection -summary: Adaptive Plan Selection -author: zhang cuiping -date: 2022-10-13 ---- - -# Adaptive Plan Selection - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -This feature triggers plan selection based on the base table condition selection rate, and provides cache multi-plan management and adaptive selection for queries that use partial indexes and offsets. In typical scenarios, the query throughput can be improved by several times. - -## Benefits - -Users can maintain multiple cache plans to adapt to different query parameters, improving query execution performance. - -## Description - -Adaptive plan selection applies to scenarios where a general cache plan is used for plan execution. Cache plan exploration is performed by using range linear expansion, and plan selection is performed by using range coverage matching. Adaptive plan selection makes up for the performance problem caused by the traditional single cache plan that cannot change according to the query condition parameter, and avoids frequent calling of query optimization. - -## Enhancements - -None - -## Constraints - -- Database services are running properly. -- Users have logged in to the database. -- Users have created a database and data table, and have imported data. - -## Dependencies - -It depends on the plan cache function in the database. - -## Related Pages - +--- +title: Adaptive Plan Selection +summary: Adaptive Plan Selection +author: zhang cuiping +date: 2022-10-13 +--- + +# Adaptive Plan Selection + +## Availability + +This feature is available since MogDB 3.1.0. + +## Introduction + +This feature triggers plan selection based on the base table condition selection rate, and provides cache multi-plan management and adaptive selection for queries that use partial indexes and offsets. In typical scenarios, the query throughput can be improved by several times. + +## Benefits + +Users can maintain multiple cache plans to adapt to different query parameters, improving query execution performance. + +## Description + +Adaptive plan selection applies to scenarios where a general cache plan is used for plan execution. Cache plan exploration is performed by using range linear expansion, and plan selection is performed by using range coverage matching. Adaptive plan selection makes up for the performance problem caused by the traditional single cache plan that cannot change according to the query condition parameter, and avoids frequent calling of query optimization. + +## Enhancements + +None + +## Constraints + +- Database services are running properly. +- Users have logged in to the database. +- Users have created a database and data table, and have imported data. + +## Dependencies + +It depends on the plan cache function in the database. + +## Related Pages + [Adaptive Plan Selection](../../../AI-features/ai4db/abo-optimizer/adaptive-plan-selection/ai4db-adaptive-plan-selection.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/abo-optimizer/characteristic-description-abo-optimizer.md b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/abo-optimizer/characteristic-description-abo-optimizer.md index fc2984f7..b566d486 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/abo-optimizer/characteristic-description-abo-optimizer.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/abo-optimizer/characteristic-description-abo-optimizer.md @@ -1,11 +1,11 @@ ---- -title: ABO Optimizer -summary: ABO Optimizer -author: Guo Huan -date: 2023-05-22 ---- - -# ABO Optimizer - -+ **[Intelligent Cardinality Estimation](intelligent-cardinality-estimation.md)** +--- +title: ABO Optimizer +summary: ABO Optimizer +author: Guo Huan +date: 2023-05-22 +--- + +# ABO Optimizer + ++ **[Intelligent Cardinality Estimation](intelligent-cardinality-estimation.md)** + **[Adaptive Plan Selection](adaptive-plan-selection.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/abo-optimizer/intelligent-cardinality-estimation.md b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/abo-optimizer/intelligent-cardinality-estimation.md index af66b4d4..ced4aebc 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/abo-optimizer/intelligent-cardinality-estimation.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/abo-optimizer/intelligent-cardinality-estimation.md @@ -1,46 +1,46 @@ ---- -title: Intelligent Cardinality Estimation -summary: Intelligent Cardinality Estimation -author: zhang cuiping -date: 2022-10-13 ---- - -# Intelligent Cardinality Estimation - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -Intelligent cardinality estimation uses lightweight algorithms in databases to model multi-column data distribution and provides the capability of multi-column equality cardinality estimation. In scenarios where data skew occurs and columns are closely related, more accurate estimation results can be obtained to provide accurate cost reference for the optimizer, improving plan generation accuracy and database query execution efficiency. - -## Benefits - -Users can create intelligent statistics to improve the accuracy of multi-column statistics and improve the query optimization performance. - -## Description - -The intelligent estimation cardinality first uses data samples in the database to model data distribution, and compresses and stores the model in the database. The optimizer triggers intelligent estimation in the execution plan generation phase to estimate the cost more accurately and generate a better plan. - -## Enhancements - -None - -## Constraints - -- The database is running properly and resources are sufficient. -- Only the following data types are supported: FLOAT8, Double Precision, FlOAT4, REAL, INT16, BIGINT, INTEGER, VARCHAR, CHARACTER VARYING, CHAR, CHARACTER, and NUMERIC. -- Only query cardinality estimation with no more than 64 columns is supported. -- To ensure system performance, model creation uses only a maximum of 200,000 data samples. If the data is too sparse, the estimation result may be inaccurate. -- To make full use of the limited memory for model access acceleration, you are advised to create a maximum of 30 AI statistics columns. Otherwise, memory replacement may be triggered. -- If data of the variable-length string type is too long, the creation and estimation performance of cardinality estimation model may be affected. -- In the current version, if both MCV and Bayesian networks are created, the cardinality estimation performance is low. Therefore, you are not advised to create MCV and Bayesian networks. - -## Dependencies - -It depends on the multi-column statistics creation syntax and data sampling algorithms in databases. - -## Related Pages - +--- +title: Intelligent Cardinality Estimation +summary: Intelligent Cardinality Estimation +author: zhang cuiping +date: 2022-10-13 +--- + +# Intelligent Cardinality Estimation + +## Availability + +This feature is available since MogDB 3.1.0. + +## Introduction + +Intelligent cardinality estimation uses lightweight algorithms in databases to model multi-column data distribution and provides the capability of multi-column equality cardinality estimation. In scenarios where data skew occurs and columns are closely related, more accurate estimation results can be obtained to provide accurate cost reference for the optimizer, improving plan generation accuracy and database query execution efficiency. + +## Benefits + +Users can create intelligent statistics to improve the accuracy of multi-column statistics and improve the query optimization performance. + +## Description + +The intelligent estimation cardinality first uses data samples in the database to model data distribution, and compresses and stores the model in the database. The optimizer triggers intelligent estimation in the execution plan generation phase to estimate the cost more accurately and generate a better plan. + +## Enhancements + +None + +## Constraints + +- The database is running properly and resources are sufficient. +- Only the following data types are supported: FLOAT8, Double Precision, FlOAT4, REAL, INT16, BIGINT, INTEGER, VARCHAR, CHARACTER VARYING, CHAR, CHARACTER, and NUMERIC. +- Only query cardinality estimation with no more than 64 columns is supported. +- To ensure system performance, model creation uses only a maximum of 200,000 data samples. If the data is too sparse, the estimation result may be inaccurate. +- To make full use of the limited memory for model access acceleration, you are advised to create a maximum of 30 AI statistics columns. Otherwise, memory replacement may be triggered. +- If data of the variable-length string type is too long, the creation and estimation performance of cardinality estimation model may be affected. +- In the current version, if both MCV and Bayesian networks are created, the cardinality estimation performance is low. Therefore, you are not advised to create MCV and Bayesian networks. + +## Dependencies + +It depends on the multi-column statistics creation syntax and data sampling algorithms in databases. + +## Related Pages + [Intelligent Cardinality Estimation](../../../AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/ai4db-intelligent-cardinality-estimation.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai-capabilities.md b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai-capabilities.md index 1666ed9c..e4e593ed 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai-capabilities.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai-capabilities.md @@ -1,12 +1,12 @@ ---- -title: AI Capabilities -summary: AI Capabilities -author: Guo Huan -date: 2023-05-22 ---- - -# AI Capabilities - -- **[AI4DB: Autonomous Database O&M](ai4db-autonomous-database-o-m/characteristic-description-ai4db.md)** -- **[DB4AI: Database-driven AI](db4ai-database-driven-ai.md)** +--- +title: AI Capabilities +summary: AI Capabilities +author: Guo Huan +date: 2023-05-22 +--- + +# AI Capabilities + +- **[AI4DB: Autonomous Database O&M](ai4db-autonomous-database-o-m/characteristic-description-ai4db.md)** +- **[DB4AI: Database-driven AI](db4ai-database-driven-ai.md)** - **[ABO Optimizer](abo-optimizer/characteristic-description-abo-optimizer.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/1-database-metric-collection-forecast-and-exception-detection.md b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/1-database-metric-collection-forecast-and-exception-detection.md index 3fa5a09f..cdc20900 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/1-database-metric-collection-forecast-and-exception-detection.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/1-database-metric-collection-forecast-and-exception-detection.md @@ -1,45 +1,45 @@ ---- -title: Database Metric Collection, Forecast, and Exception Detection -summary: Database Metric Collection, Forecast, and Exception Detection -author: Guo Huan -date: 2022-05-10 ---- - -# Database Metric Collection, Forecast, and Exception Detection - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -This feature serves as an AI tool integrated into MogDB and can be used to collect and forecast database metrics, as well as monitor and diagnose exceptions. It is a component in the DBMind suite. Currently, this feature is compatible with the Prometheus platform to collect database system metrics. Prometheus exporters are provided to collect and process database monitoring metrics. By monitoring the time series data of metrics, you can forecast the future load trend and diagnose problems. In addition, you can perform exception detection. - -## Benefits - -- This feature greatly simplifies the work of O&M personnel, releases a large number of labor resources, and reduces costs for the company. -- You can use the metric collection, monitoring, and forecast functions to detect problems in advance, preventing database exceptions from causing greater loss. - -## Description - -Prometheus is a popular open-source monitoring system in the industry. It is also a time series database. The collector of Prometheus is called exporter, which is used to collect metrics of monitored modules. To interconnect with the Prometheus platform, DBMind provides two types of exporters: openGauss-exporter for collecting database metrics and reprocessing-exporter for reprocessing the collected metrics. - -This feature supports forecast of collected metrics. You can specify key performance indicators (KPIs) to be forecasted by modifying configuration files. This helps you find metric trends and perform O&M operations in a timely manner. For example, you can forecast the memory usage to detect memory leakage and forecast the disk usage to expand the capacity at a proper time. The AI-based exception detection algorithm can detect the trend fluctuation of metrics, helping users detect problems in time. - -## Enhancements - -This feature is greatly improved in MogDB 3.0.0 and is compatible with the Prometheus platform. Two exporters are used to connect to Prometheus. - -## Constraints - -- The database is normal, and the data directory has been written into environment variables. -- The Python version must be 3.6 or later. -- The Prometheus monitoring platform is configured and the Prometheus service is started so that monitoring data can be collected. - -## Dependencies - -Prometheus - -## Related Pages - +--- +title: Database Metric Collection, Forecast, and Exception Detection +summary: Database Metric Collection, Forecast, and Exception Detection +author: Guo Huan +date: 2022-05-10 +--- + +# Database Metric Collection, Forecast, and Exception Detection + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +This feature serves as an AI tool integrated into MogDB and can be used to collect and forecast database metrics, as well as monitor and diagnose exceptions. It is a component in the DBMind suite. Currently, this feature is compatible with the Prometheus platform to collect database system metrics. Prometheus exporters are provided to collect and process database monitoring metrics. By monitoring the time series data of metrics, you can forecast the future load trend and diagnose problems. In addition, you can perform exception detection. + +## Benefits + +- This feature greatly simplifies the work of O&M personnel, releases a large number of labor resources, and reduces costs for the company. +- You can use the metric collection, monitoring, and forecast functions to detect problems in advance, preventing database exceptions from causing greater loss. + +## Description + +Prometheus is a popular open-source monitoring system in the industry. It is also a time series database. The collector of Prometheus is called exporter, which is used to collect metrics of monitored modules. To interconnect with the Prometheus platform, DBMind provides two types of exporters: openGauss-exporter for collecting database metrics and reprocessing-exporter for reprocessing the collected metrics. + +This feature supports forecast of collected metrics. You can specify key performance indicators (KPIs) to be forecasted by modifying configuration files. This helps you find metric trends and perform O&M operations in a timely manner. For example, you can forecast the memory usage to detect memory leakage and forecast the disk usage to expand the capacity at a proper time. The AI-based exception detection algorithm can detect the trend fluctuation of metrics, helping users detect problems in time. + +## Enhancements + +This feature is greatly improved in MogDB 3.0.0 and is compatible with the Prometheus platform. Two exporters are used to connect to Prometheus. + +## Constraints + +- The database is normal, and the data directory has been written into environment variables. +- The Python version must be 3.6 or later. +- The Prometheus monitoring platform is configured and the Prometheus service is started so that monitoring data can be collected. + +## Dependencies + +Prometheus + +## Related Pages + [Prometheus Exporter](../../../AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/2-root-cause-analysis-for-slow-sql-statements.md b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/2-root-cause-analysis-for-slow-sql-statements.md index ece000ec..fa9b5fc7 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/2-root-cause-analysis-for-slow-sql-statements.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/2-root-cause-analysis-for-slow-sql-statements.md @@ -1,42 +1,42 @@ ---- -title: Root Cause Analysis for Slow SQL Statements -summary: Root Cause Analysis for Slow SQL Statements -author: Guo Huan -date: 2022-05-10 ---- - -# Root Cause Analysis for Slow SQL Statements - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Slow SQL statements have always been a pain point in data O&M. How to effectively diagnose the root causes of slow SQL statements is a big challenge. Based on the characteristics of MogDB and the slow SQL statement diagnosis experience of DBAs on the live network, this tool supports more than 15 root causes of slow SQL statements, outputs multiple root causes based on the possibility, and provides specific solutions. - -## Benefits - -This feature provides customers with fast and reliable slow SQL statement discovery and root cause analysis functions, greatly simplifying the work of O&M personnel. - -## Description - -Based on the Prometheus data collection solution, data required for root cause analysis for slow SQL statements is collected, including system resource information (CPU usage, memory usage, and I/O), load information (QPS), large process information (including external large processes and scheduled database tasks), slow SQL statement text information, start time and end time of slow SQL statement execution, slow SQL statement execution plan, temporary file information, and so on. Then, this feature calculates the most matched root cause of slow SQL statements based on the AI algorithm, and provides suggestions and confidence. - -## Enhancements - -None. - -## Constraints - -- The database is normal, and the client can be connected properly. -- An environment running Python 3.6 or later is available. -- The information about slow SQL statements is obtained from the workload diagnosis report (WDR). In the database WDR, slow SQL statements are marked. The GUC parameter **track_stmt_stat_level** is enabled by default. Otherwise, you need to manually enable it. Generally, the **track_stmt_stat_level** is set to **'off, L0'**. Higher levels will affect the performance. Data collection is implemented by the Prometheus solution. Therefore, you need to configure the Prometheus data collection platform. This feature focuses on algorithms and obtains metric sequence information from Prometheus. - -## Dependencies - -None. - -## Related Pages - +--- +title: Root Cause Analysis for Slow SQL Statements +summary: Root Cause Analysis for Slow SQL Statements +author: Guo Huan +date: 2022-05-10 +--- + +# Root Cause Analysis for Slow SQL Statements + +## Availability + +This feature is available since MogDB 3.0.0. + +## Introduction + +Slow SQL statements have always been a pain point in data O&M. How to effectively diagnose the root causes of slow SQL statements is a big challenge. Based on the characteristics of MogDB and the slow SQL statement diagnosis experience of DBAs on the live network, this tool supports more than 15 root causes of slow SQL statements, outputs multiple root causes based on the possibility, and provides specific solutions. + +## Benefits + +This feature provides customers with fast and reliable slow SQL statement discovery and root cause analysis functions, greatly simplifying the work of O&M personnel. + +## Description + +Based on the Prometheus data collection solution, data required for root cause analysis for slow SQL statements is collected, including system resource information (CPU usage, memory usage, and I/O), load information (QPS), large process information (including external large processes and scheduled database tasks), slow SQL statement text information, start time and end time of slow SQL statement execution, slow SQL statement execution plan, temporary file information, and so on. Then, this feature calculates the most matched root cause of slow SQL statements based on the AI algorithm, and provides suggestions and confidence. + +## Enhancements + +None. + +## Constraints + +- The database is normal, and the client can be connected properly. +- An environment running Python 3.6 or later is available. +- The information about slow SQL statements is obtained from the workload diagnosis report (WDR). In the database WDR, slow SQL statements are marked. The GUC parameter **track_stmt_stat_level** is enabled by default. Otherwise, you need to manually enable it. Generally, the **track_stmt_stat_level** is set to **'off, L0'**. Higher levels will affect the performance. Data collection is implemented by the Prometheus solution. Therefore, you need to configure the Prometheus data collection platform. This feature focuses on algorithms and obtains metric sequence information from Prometheus. + +## Dependencies + +None. + +## Related Pages + [Slow Query Diagnosis: Root Cause Analysis for Slow SQL Statements](../../../AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/3-index-recommendation.md b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/3-index-recommendation.md index 88a7e150..807bbd6e 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/3-index-recommendation.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/3-index-recommendation.md @@ -1,44 +1,44 @@ ---- -title: Index Recommendation -summary: Index Recommendation -author: Guo Huan -date: 2022-05-10 ---- - -# Index Recommendation - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -This feature serves as an intelligent database index recommendation tool that covers multiple task levels and application scenarios. It provides the single-query index recommendation function, virtual index function, and workload-level index recommendation function to provide reliable index recommendations for users. - -## Benefits - -This feature provides the quick and reliable index recommendation function, greatly simplifying the work of O&M personnel. - -## Description - -The single-query index recommendation function allows users to directly perform operations in the database. This feature generates recommended indexes for a single query statement entered by users based on the semantic information of the query statement and the statistics of the database. The virtual index function allows users to directly perform operations in the database. This feature simulates the creation of a real index to avoid the time and space overhead required for creating a real index. Based on the virtual index, users can evaluate the impact of the index on the specified query statement by using the optimizer. The workload-level index recommendation can be used by running scripts outside the database. This feature uses the workload of multiple DML statements as the input to generate a batch of indexes that can optimize the overall workload execution performance. - -## Enhancements - -None. - -## Constraints - -The database is normal, and the client can be connected properly. - -The gsql tool has been installed by the current user, and the tool path has been added to the "_PATH_" environment variable. - -An environment running Python 3.6 or later is available. - -## Dependencies - -None. - -## Related Pages - +--- +title: Index Recommendation +summary: Index Recommendation +author: Guo Huan +date: 2022-05-10 +--- + +# Index Recommendation + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +This feature serves as an intelligent database index recommendation tool that covers multiple task levels and application scenarios. It provides the single-query index recommendation function, virtual index function, and workload-level index recommendation function to provide reliable index recommendations for users. + +## Benefits + +This feature provides the quick and reliable index recommendation function, greatly simplifying the work of O&M personnel. + +## Description + +The single-query index recommendation function allows users to directly perform operations in the database. This feature generates recommended indexes for a single query statement entered by users based on the semantic information of the query statement and the statistics of the database. The virtual index function allows users to directly perform operations in the database. This feature simulates the creation of a real index to avoid the time and space overhead required for creating a real index. Based on the virtual index, users can evaluate the impact of the index on the specified query statement by using the optimizer. The workload-level index recommendation can be used by running scripts outside the database. This feature uses the workload of multiple DML statements as the input to generate a batch of indexes that can optimize the overall workload execution performance. + +## Enhancements + +None. + +## Constraints + +The database is normal, and the client can be connected properly. + +The gsql tool has been installed by the current user, and the tool path has been added to the "_PATH_" environment variable. + +An environment running Python 3.6 or later is available. + +## Dependencies + +None. + +## Related Pages + [Index-advisor: Index Recommendation](../../../AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/index-advisor-index-recommendation.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/4-parameter-tuning-and-diagnosis.md b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/4-parameter-tuning-and-diagnosis.md index 01401b38..f0f7c255 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/4-parameter-tuning-and-diagnosis.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/4-parameter-tuning-and-diagnosis.md @@ -1,50 +1,50 @@ ---- -title: Parameter Tuning and Diagnosis -summary: Parameter Tuning and Diagnosis -author: Guo Huan -date: 2022-05-10 ---- - -# Parameter Tuning and Diagnosis - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -This feature servers as a parameter tuning tool integrated into databases. It uses AI technologies such as deep reinforcement learning and global search algorithms to obtain the optimal database parameter settings without manual intervention. It is not forcibly deployed with the database environment. It can be independently deployed and run without the database installation environment. - -## Benefits - -This tool can quickly provide the parameter adjustment configuration of the current load in any scenario, reducing database administrator's manual intervention, improving the O&M effect, and meeting customer expectations. - -## Description - -The tuning program can run in any of the following modes: - -- **recommend**: Log in to the database using the specified username, obtain the feature information about the running workload, and generate a parameter recommendation report based on the feature information. Report improper parameter settings and potential risks in the current database. Output the currently running workload behavior and characteristics. Output the recommended parameter settings. In this mode, the database does not need to be restarted. In other modes, the database may need to be restarted repeatedly. -- **train**: Modify parameters and execute the benchmark based on the benchmark information provided by users. The reinforcement learning model is trained through repeated iteration so that you can load the model in **tune** mode for optimization. -- **tune**: Use an optimization algorithm to tune database parameters. Currently, two types of algorithms are supported: deep reinforcement learning and global search algorithm (global optimization algorithm). The deep reinforcement learning mode requires **train** mode to generate the optimized model after training. However, the global search algorithm does not need to be trained in advance and can be directly used for search and optimization. - -## Enhancements - -None. - -## Constraints - -- The database is normal, the client can be properly connected, and data can be imported to the database. As a result, the optimization program can perform the benchmark test for optimization effect. -- To use this tool, you need to specify the user who logs in to the database. The user who logs in to the database must have sufficient permissions to obtain sufficient database status information. -- If you log in to the database host as a Linux user, add **$GAUSSHOME/bin** to the PATH environment variable so that you can directly run database O&M tools, such as gsql, gs_guc, and gs\_ctl. -- The recommended Python version is Python 3.6 or later. The required dependency has been installed in the operating environment, and the optimization program can be started properly. You can install a Python 3.6+ environment independently without setting it as a global environment variable. You are not advised to install the tool as the root user. If you install the tool as the root user and run the tool as another user, ensure that you have the read permission on the configuration file. -- This tool can run in three modes. In **tune** and **train** modes, you need to configure the benchmark running environment and import data. This tool will iteratively run the benchmark to check whether the performance is improved after the parameters are modified. -- In **recommend** mode, you are advised to run the command when the database is executing the workload to obtain more accurate real-time workload information. -- By default, this tool provides benchmark running script samples of TPC-C, TPC-H, TPC-DS, and sysbench. If you use the benchmarks to perform pressure tests on the database system, you can modify or configure the preceding configuration files. To adapt to your own service scenarios, you need to compile the script file that drives your customized benchmark based on the **template.py** file in the **benchmark** directory. - -## Dependencies - -None. - -## Related Pages - +--- +title: Parameter Tuning and Diagnosis +summary: Parameter Tuning and Diagnosis +author: Guo Huan +date: 2022-05-10 +--- + +# Parameter Tuning and Diagnosis + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +This feature servers as a parameter tuning tool integrated into databases. It uses AI technologies such as deep reinforcement learning and global search algorithms to obtain the optimal database parameter settings without manual intervention. It is not forcibly deployed with the database environment. It can be independently deployed and run without the database installation environment. + +## Benefits + +This tool can quickly provide the parameter adjustment configuration of the current load in any scenario, reducing database administrator's manual intervention, improving the O&M effect, and meeting customer expectations. + +## Description + +The tuning program can run in any of the following modes: + +- **recommend**: Log in to the database using the specified username, obtain the feature information about the running workload, and generate a parameter recommendation report based on the feature information. Report improper parameter settings and potential risks in the current database. Output the currently running workload behavior and characteristics. Output the recommended parameter settings. In this mode, the database does not need to be restarted. In other modes, the database may need to be restarted repeatedly. +- **train**: Modify parameters and execute the benchmark based on the benchmark information provided by users. The reinforcement learning model is trained through repeated iteration so that you can load the model in **tune** mode for optimization. +- **tune**: Use an optimization algorithm to tune database parameters. Currently, two types of algorithms are supported: deep reinforcement learning and global search algorithm (global optimization algorithm). The deep reinforcement learning mode requires **train** mode to generate the optimized model after training. However, the global search algorithm does not need to be trained in advance and can be directly used for search and optimization. + +## Enhancements + +None. + +## Constraints + +- The database is normal, the client can be properly connected, and data can be imported to the database. As a result, the optimization program can perform the benchmark test for optimization effect. +- To use this tool, you need to specify the user who logs in to the database. The user who logs in to the database must have sufficient permissions to obtain sufficient database status information. +- If you log in to the database host as a Linux user, add **$GAUSSHOME/bin** to the PATH environment variable so that you can directly run database O&M tools, such as gsql, gs_guc, and gs\_ctl. +- The recommended Python version is Python 3.6 or later. The required dependency has been installed in the operating environment, and the optimization program can be started properly. You can install a Python 3.6+ environment independently without setting it as a global environment variable. You are not advised to install the tool as the root user. If you install the tool as the root user and run the tool as another user, ensure that you have the read permission on the configuration file. +- This tool can run in three modes. In **tune** and **train** modes, you need to configure the benchmark running environment and import data. This tool will iteratively run the benchmark to check whether the performance is improved after the parameters are modified. +- In **recommend** mode, you are advised to run the command when the database is executing the workload to obtain more accurate real-time workload information. +- By default, this tool provides benchmark running script samples of TPC-C, TPC-H, TPC-DS, and sysbench. If you use the benchmarks to perform pressure tests on the database system, you can modify or configure the preceding configuration files. To adapt to your own service scenarios, you need to compile the script file that drives your customized benchmark based on the **template.py** file in the **benchmark** directory. + +## Dependencies + +None. + +## Related Pages + [X-Tuner: Parameter Tuning and Diagnosis](../../../AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-parameter-optimization-and-diagnosis.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/5-slow-sql-statement-discovery.md b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/5-slow-sql-statement-discovery.md index 1b8678e7..1e752152 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/5-slow-sql-statement-discovery.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/5-slow-sql-statement-discovery.md @@ -1,43 +1,43 @@ ---- -title: Slow SQL Statement Discovery -summary: Slow SQL Statement Discovery -author: Guo Huan -date: 2022-05-10 ---- - -# Slow SQL Statement Discovery - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -This feature servers as an SQL statement execution time forecast tool. It forecasts the execution time of SQL statements based on the statement logic similarity and historical execution records without obtaining the SQL statement execution plan using a template. - -## Benefits - -- This feature does not require users to provide SQL execution plans. Therefore, the database performance is not affected. -- Different from other algorithms in the industry that are limited to OLAP or OLTP, this feature is more widely used. - -## Description - -The SQLdiag focuses on the historical SQL statements of the database, summarizes the execution performance of the historical SQL statements, and then uses the historical SQL statements to infer unknown services. The execution duration of SQL statements in the database does not differ greatly in a short period of time. SQLdiag can detect the statement result set similar to the executed SQL statements from historical data and predict the execution duration of SQL statements based on the SQL vectorization technology and template-based method. - -## Enhancements - -None. - -## Constraints - -- The historical logs and the format of the workload to be predicted meet the requirements. You can use the GUC parameter of the database to enable the collection or use the monitoring tool to collect logs. -- To ensure the prediction accuracy, the historical statement logs provided by users should be as comprehensive and representative as possible. -- The Python environment has been configured as required. - -## Dependencies - -None. - -## Related Pages - +--- +title: Slow SQL Statement Discovery +summary: Slow SQL Statement Discovery +author: Guo Huan +date: 2022-05-10 +--- + +# Slow SQL Statement Discovery + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +This feature servers as an SQL statement execution time forecast tool. It forecasts the execution time of SQL statements based on the statement logic similarity and historical execution records without obtaining the SQL statement execution plan using a template. + +## Benefits + +- This feature does not require users to provide SQL execution plans. Therefore, the database performance is not affected. +- Different from other algorithms in the industry that are limited to OLAP or OLTP, this feature is more widely used. + +## Description + +The SQLdiag focuses on the historical SQL statements of the database, summarizes the execution performance of the historical SQL statements, and then uses the historical SQL statements to infer unknown services. The execution duration of SQL statements in the database does not differ greatly in a short period of time. SQLdiag can detect the statement result set similar to the executed SQL statements from historical data and predict the execution duration of SQL statements based on the SQL vectorization technology and template-based method. + +## Enhancements + +None. + +## Constraints + +- The historical logs and the format of the workload to be predicted meet the requirements. You can use the GUC parameter of the database to enable the collection or use the monitoring tool to collect logs. +- To ensure the prediction accuracy, the historical statement logs provided by users should be as comprehensive and representative as possible. +- The Python environment has been configured as required. + +## Dependencies + +None. + +## Related Pages + [SQLdiag: Slow SQL Discovery](../../../AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/characteristic-description-ai4db.md b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/characteristic-description-ai4db.md index ec5c4709..de5c7351 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/characteristic-description-ai4db.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/characteristic-description-ai4db.md @@ -1,14 +1,14 @@ ---- -title: AI4DB Autonomous Database O&M -summary: AI4DB Autonomous Database O&M -author: Guo Huan -date: 2023-05-22 ---- - -# AI4DB: Autonomous Database O&M - -+ [Database Metric Collection, Forecast, and Exception Detection](1-database-metric-collection-forecast-and-exception-detection.md) -+ [Root Cause Analysis for Slow SQL Statements](2-root-cause-analysis-for-slow-sql-statements.md) -+ [Index Recommendation](3-index-recommendation.md) -+ [Parameter Tuning and Diagnosis](4-parameter-tuning-and-diagnosis.md) +--- +title: AI4DB Autonomous Database O&M +summary: AI4DB Autonomous Database O&M +author: Guo Huan +date: 2023-05-22 +--- + +# AI4DB: Autonomous Database O&M + ++ [Database Metric Collection, Forecast, and Exception Detection](1-database-metric-collection-forecast-and-exception-detection.md) ++ [Root Cause Analysis for Slow SQL Statements](2-root-cause-analysis-for-slow-sql-statements.md) ++ [Index Recommendation](3-index-recommendation.md) ++ [Parameter Tuning and Diagnosis](4-parameter-tuning-and-diagnosis.md) + [Slow SQL Statement Discovery](5-slow-sql-statement-discovery.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/db4ai-database-driven-ai.md b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/db4ai-database-driven-ai.md index 91a3b4ba..a45df4dd 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/db4ai-database-driven-ai.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/ai-capabilities/db4ai-database-driven-ai.md @@ -1,43 +1,43 @@ ---- -title: DB4AI Database-driven AI -summary: DB4AI Database-driven AI -author: Guo Huan -date: 2022-05-10 ---- - -# DB4AI: Database-driven AI - -## Availability - -This feature is available since MogDB 2.1.0. - -## Introduction - -DB4AI uses database capabilities to drive AI tasks and implement data storage and technology stack isomorphism. By integrating AI algorithms into the database, MogDB supports the native AI computing engine, model management, AI operators, and native AI execution plan, providing users with inclusive AI technologies. Different from the traditional AI modeling process, DB4AI one-stop modeling eliminates repeated data flowing among different platforms, simplifies the development process, and plans the optimal execution path through the database, so that developers can focus on the tuning of specific services and models. It outcompetes similar products in ease-of-use and performance. - -## Benefits - -- With this feature, you do not need to manually compile AI model code. Instead, you can use out-of-the-box SQL statements to train and forecast machine learning models, reducing the learning and usage costs. -- Extra overhead that is caused by fragmented data storage and repeated data migration can be avoided. -- A higher execution efficiency can be achieved. With this feature, the AI model training efficiency is high. Compared with manual model training, the performance is improved by several times. -- Stricter security protection prevents data leakage during AI model training. - -## Description - -MogDB supports the native DB4AI capability. By introducing native AI operators, MogDB simplifies the operation process and fully utilizes the optimization and execution capabilities of the database optimizer and executor to obtain the high-performance model training capability in the database. With a simpler model training and forecast process and higher performance, developers can focus on model tuning and data analysis in a shorter period of time, avoiding fragmented technology stacks and redundant code implementation. - -## Enhancements - -More algorithms are supported in MogDB 3.0.0. - -## Constraints - -- The database is running properly. - -## Dependencies - -None. - -## Related Pages - +--- +title: DB4AI Database-driven AI +summary: DB4AI Database-driven AI +author: Guo Huan +date: 2022-05-10 +--- + +# DB4AI: Database-driven AI + +## Availability + +This feature is available since MogDB 2.1.0. + +## Introduction + +DB4AI uses database capabilities to drive AI tasks and implement data storage and technology stack isomorphism. By integrating AI algorithms into the database, MogDB supports the native AI computing engine, model management, AI operators, and native AI execution plan, providing users with inclusive AI technologies. Different from the traditional AI modeling process, DB4AI one-stop modeling eliminates repeated data flowing among different platforms, simplifies the development process, and plans the optimal execution path through the database, so that developers can focus on the tuning of specific services and models. It outcompetes similar products in ease-of-use and performance. + +## Benefits + +- With this feature, you do not need to manually compile AI model code. Instead, you can use out-of-the-box SQL statements to train and forecast machine learning models, reducing the learning and usage costs. +- Extra overhead that is caused by fragmented data storage and repeated data migration can be avoided. +- A higher execution efficiency can be achieved. With this feature, the AI model training efficiency is high. Compared with manual model training, the performance is improved by several times. +- Stricter security protection prevents data leakage during AI model training. + +## Description + +MogDB supports the native DB4AI capability. By introducing native AI operators, MogDB simplifies the operation process and fully utilizes the optimization and execution capabilities of the database optimizer and executor to obtain the high-performance model training capability in the database. With a simpler model training and forecast process and higher performance, developers can focus on model tuning and data analysis in a shorter period of time, avoiding fragmented technology stacks and redundant code implementation. + +## Enhancements + +More algorithms are supported in MogDB 3.0.0. + +## Constraints + +- The database is running properly. + +## Dependencies + +None. + +## Related Pages + [DB4AI Database-driven AI](../../AI-features/db4ai/db4ai.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/1-standard-sql.md b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/1-standard-sql.md index 28d6d1ef..d372a0fb 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/1-standard-sql.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/1-standard-sql.md @@ -1,42 +1,42 @@ ---- -title: Standard SQL -summary: Standard SQL -author: Guo Huan -date: 2022-05-07 ---- - -# Standard SQL - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -SQL is a standard computer language used to control the access to databases and manage data in databases. SQL standards are classified into core features and optional features. Most databases do not fully support SQL standards. - -MogDB supports most of the core features of SQL:2011 and some optional features, providing a unified SQL interface for users. - -## Benefits - -All database vendors can use a unified SQL interface, reducing the costs of learning languages and migrating applications. - -## Description - -For details, see “SQL Syntax” in the *Reference Guide*. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - +--- +title: Standard SQL +summary: Standard SQL +author: Guo Huan +date: 2022-05-07 +--- + +# Standard SQL + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +SQL is a standard computer language used to control the access to databases and manage data in databases. SQL standards are classified into core features and optional features. Most databases do not fully support SQL standards. + +MogDB supports most of the core features of SQL:2011 and some optional features, providing a unified SQL interface for users. + +## Benefits + +All database vendors can use a unified SQL interface, reducing the costs of learning languages and migrating applications. + +## Description + +For details, see “SQL Syntax” in the *Reference Guide*. + +## Enhancements + +None + +## Constraints + +None + +## Dependencies + +None + +## Related Pages + [SQL Syntax](../../reference-guide/sql-syntax/sql-syntax.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/2-standard-development-interfaces.md b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/2-standard-development-interfaces.md index cc8face5..dff0f137 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/2-standard-development-interfaces.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/2-standard-development-interfaces.md @@ -1,40 +1,40 @@ ---- -title: Standard Development Interfaces -summary: Standard Development Interfaces -author: Guo Huan -date: 2022-05-07 ---- - -# Standard Development Interfaces - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Standard ODBC 3.5 and JDBC 4.0 APIs are supported. - -## Benefits - -Standard ODBC and JDBC interfaces are provided to ensure quick migration of user services to MogDB. - -## Description - -Currently, the standard ODBC 3.5 and JDBC 4.0 APIs are supported. The ODBC interface supports SUSE Linux, Windows 32-bit, and Windows 64-bit platforms. The JDBC API supports all platforms. - -## Enhancements - -The function of connecting JDBC to a third-party log framework is added. JDBC can interconnect with a third-party log framework to meet users' log management and control requirements. - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - +--- +title: Standard Development Interfaces +summary: Standard Development Interfaces +author: Guo Huan +date: 2022-05-07 +--- + +# Standard Development Interfaces + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +Standard ODBC 3.5 and JDBC 4.0 APIs are supported. + +## Benefits + +Standard ODBC and JDBC interfaces are provided to ensure quick migration of user services to MogDB. + +## Description + +Currently, the standard ODBC 3.5 and JDBC 4.0 APIs are supported. The ODBC interface supports SUSE Linux, Windows 32-bit, and Windows 64-bit platforms. The JDBC API supports all platforms. + +## Enhancements + +The function of connecting JDBC to a third-party log framework is added. JDBC can interconnect with a third-party log framework to meet users' log management and control requirements. + +## Constraints + +None + +## Dependencies + +None + +## Related Pages + [JDBC Interface Reference](../../developer-guide/dev/2-development-based-on-jdbc/15-JDBC/jdbc-interface-reference.md), [ODBC Interface Reference](../../developer-guide/dev/3-development-based-on-odbc/6-ODBC/odbc-interface-reference.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/3-postgresql-api-compatibility.md b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/3-postgresql-api-compatibility.md index aee35d99..b69c9aeb 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/3-postgresql-api-compatibility.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/3-postgresql-api-compatibility.md @@ -1,40 +1,40 @@ ---- -title: PostgreSQL API Compatibility -summary: PostgreSQL API Compatibility -author: Guo Huan -date: 2022-05-07 ---- - -# PostgreSQL API Compatibility - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Compatible with PostgreSQL clients and standard APIs. - -## Benefits - -Compatible with the PostgreSQL clients and standard APIs, and can be seamlessly interconnected with PostgreSQL ecosystem tools. - -## Description - -Compatible with PostgreSQL clients and standard APIs. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - +--- +title: PostgreSQL API Compatibility +summary: PostgreSQL API Compatibility +author: Guo Huan +date: 2022-05-07 +--- + +# PostgreSQL API Compatibility + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +Compatible with PostgreSQL clients and standard APIs. + +## Benefits + +Compatible with the PostgreSQL clients and standard APIs, and can be seamlessly interconnected with PostgreSQL ecosystem tools. + +## Description + +Compatible with PostgreSQL clients and standard APIs. + +## Enhancements + +None + +## Constraints + +None + +## Dependencies + +None + +## Related Pages + [Psycopg API Reference](../../developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/psycopg-api-reference.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/ECPG.md b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/ECPG.md index 6f9f6902..1a2efcb6 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/ECPG.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/ECPG.md @@ -1,113 +1,424 @@ ---- -title: Embedded SQL Preprocessor ECPG -summary: Embedded SQL Preprocessor ECPG -author: Guo Huan -date: 2023-04-04 ---- - -# Embedded SQL Preprocessor ECPG - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -Embedded SQL programs in C language are supported. - -## Benefits - -Embedded SQL programs compiled for other SQL databases can be easily migrated to MogDB, ensuring quick service migration. - -## Description - -An embedded SQL program consists of code written in an ordinary programming language, in this case C, mixed with SQL commands in specially marked sections. To build the program, the source code (*.pgc) is first passed through the embedded SQL preprocessor, which converts it to an ordinary C program (*.c), and afterwards it can be processed by a C compiler. Converted ECPG applications call functions in the libpq library through the embedded SQL library (ecpglib), and communicate with the MogDB server using the normal frontend-backend protocol. The embedded SQL program has an advantage over other methods in processing SQL commands for C code. - -1. It handles the lengthy information transfer between variables in C programs. -2. The SQL code in the program is checked during compilation to ensure syntax correctness. -3. SQL embedded in C is specified in SQL standards and is supported by many other SQL database systems. - -## Enhancements - -None. - -## Constraints - -ECPG supports most of the MogDB SQL syntax. However, the current syntax and lexical of the ECPG do not support the processing of anonymous block statements and package statements. Therefore, anonymous block statements and package creation statements cannot be used as embedded SQL statements. - -## Dependencies - -None. - -## Usage - -Using the following commands to compile ECPG: - -1. `ecpg testecpg.ecpg -o testecpg.c` - -2. `gcc -l$GAUSSHOME/include/postgresql/server/ -l$GAUSSHOME/include -L$GAUSSHOME/lib -lpq -lecpg -o testecpg testecpg.c` - -## Example - -```c -#include -#include - -/* error handlers for the whole program */ -EXEC SQL WHENEVER NOT FOUND DO BREAK; - - -int main(int argc, char **argv) -{ - EXEC SQL BEGIN DECLARE SECTION; - int v_id, v_name_ind; - char v_name[32]; - char *url="tcp:postgresql://127.0.0.1:5432/postgres"; - char *username="ecpg"; - char *password="Test@123"; - EXEC SQL END DECLARE SECTION; - - EXEC SQL DECLARE c CURSOR FOR - SELECT id, name - FROM test_ecpg_tab - ORDER BY 1; - - /* connect to the database */ - EXEC SQL CONNECT TO :url USER :username USING :password; - - /* open a cursor */ - EXEC SQL OPEN c; - - /* loop will be left if the cursor is done */ - for(;;) - { - /* get the next result row */ - EXEC SQL FETCH NEXT FROM c INTO :v_id, :v_name :v_name_ind; - - printf( - "id = %d, name = %s\n", - v_id, - v_name_ind ? "(null)" : v_name - ); - } - - EXEC SQL CLOSE c; - EXEC SQL COMMIT; - EXEC SQL DISCONNECT; - - return 0; -} -``` - -1. Create a database user - - ```sql - create user ecpg identified by 'Test@123'; - ``` - -2. Create a test table - - ```sql - drop table if exists ecpg.test_ecpg_tab; - create table ecpg.test_ecpg_tab as select id , ' name '||id name from generate_series(1,20) id; +--- +title: Embedded SQL Preprocessor ECPG +summary: Embedded SQL Preprocessor ECPG +author: Guo Huan +date: 2023-04-04 +--- + +# Embedded SQL Preprocessor ECPG + +## Availability + +This feature is available since MogDB 5.0.0. + +## Introduction + +Embedded SQL programs in C language are supported. + +## Benefits + +Embedded SQL programs compiled for other SQL databases can be easily migrated to MogDB, ensuring quick service migration. + +## Description + +An embedded SQL program consists of code written in an ordinary programming language, in this case C, mixed with SQL commands in specially marked sections. To build the program, the source code (*.pgc) is first passed through the embedded SQL preprocessor, which converts it to an ordinary C program (*.c), and afterwards it can be processed by a C compiler. Converted ECPG applications call functions in the libpq library through the embedded SQL library (ecpglib), and communicate with the MogDB server using the normal frontend-backend protocol. The embedded SQL program has an advantage over other methods in processing SQL commands for C code. + +1. It handles the lengthy information transfer between variables in C programs. +2. The SQL code in the program is checked during compilation to ensure syntax correctness. +3. SQL embedded in C is specified in SQL standards and is supported by many other SQL database systems. + +## Enhancements + +For compatibility with Oracle Pro\*C, to smoothly use ECPG to replace Pro\*C for implementing business logic, MogDB 5.0.8 has implemented the following features: + +1. Support for EXEC SQL FOR FETCH to retrieve multiple rows of results into the SQLDA structure. +2. Support for EXEC SQL EXECUTE IMMEDIATE {:host_string}. +3. Support for dynamic SQL PREPARE host variables. +4. Declaration and sharing of host variables. +5. Array-style indicator variables. +6. Compatibility with SQLDA's DESCRIPTOR. +7. Compatibility with DESCRIBE SELECT LIST FOR and DESCRIBE BIND VARIABLES FOR. +8. Compatibility with Pro\*C-style connection establishment methods. +9. Data type conversion and compatibility. +10. Handling of multi-row data with structure arrays. +11. Limiting the number of rows with the FOR clause. +12. Transaction processing syntax: commit, release, rollback, and release. +13. Support for anonymous block syntax. +14. Compatibility with EXECUTE IMMEDIATE string_literal. +15. Compatibility with PREPARE FROM [SelectStmt|UpdateStmt|InsertStmt|DeleteStmt|MergeStmt]. + +## Constraints + +1. Some OCI types have been implemented, but the MogOCI is immature and prohibited from use. +2. When binding these data types in sqlda, their type codes are consistent with ORACLE. +3. Consistent with PRO\*C, users need to implement binding variables and handle output column types when querying in the pre-compiled code. +4. For EXECUTE IMMEDIATE, only hoststring is supported, not string_literal. +5. PREPARE FROM only supports host variables and does not support SELECT syntax. +6. When using SQLDA to receive data, variable-length strings can only be correctly obtained when a length limit is specified for the column, such as: char[10]. + +## Dependencies + +None. + +## Usage + +Using the following commands to compile ECPG: + +1. `ecpg testecpg.ecpg -o testecpg.c` + +2. `gcc -l$GAUSSHOME/include/postgresql/server/ -l$GAUSSHOME/include -L$GAUSSHOME/lib -lpq -lecpg -o testecpg testecpg.c` + +## Example + +```c +#include +#include + +/* error handlers for the whole program */ +EXEC SQL WHENEVER NOT FOUND DO BREAK; + + +int main(int argc, char **argv) +{ + EXEC SQL BEGIN DECLARE SECTION; + int v_id, v_name_ind; + char v_name[32]; + char *url="tcp:postgresql://127.0.0.1:5432/postgres"; + char *username="ecpg"; + char *password="Test@123"; + EXEC SQL END DECLARE SECTION; + + EXEC SQL DECLARE c CURSOR FOR + SELECT id, name + FROM test_ecpg_tab + ORDER BY 1; + + /* connect to the database */ + EXEC SQL CONNECT TO :url USER :username USING :password; + + /* open a cursor */ + EXEC SQL OPEN c; + + /* loop will be left if the cursor is done */ + for(;;) + { + /* get the next result row */ + EXEC SQL FETCH NEXT FROM c INTO :v_id, :v_name :v_name_ind; + + printf( + "id = %d, name = %s\n", + v_id, + v_name_ind ? "(null)" : v_name + ); + } + + EXEC SQL CLOSE c; + EXEC SQL COMMIT; + EXEC SQL DISCONNECT; + + return 0; +} +``` + +```c +// Basic operations + +// Query +EXEC SQL SELECT ename,job,sal +2000 into :emp_name , :job_title,:salary from emp where empno = :emp_number; + +// Insert +EXEC SQL INSERT INTO emp (empno,ename,sal,deptno) VALUES (:emp_number,:emp_name,:salary,:dept_number); + +// Update +EXEC SQL UPDATE emp SET sal = :salary , comm = :commission WHERE empno =:emp_number; + +// Delete +EXEC SQL DELETE FROM emo WHERE deptno = :dept_number; +``` + +```c +//For dynamic SQL, one method is to use the SQLDA data structure to store data. The SQLDA structure is defined in the sqlda.h header file. Here we will introduce how to use SQLDA. +#include +SQLDA *bind_dp; +SQLDA *select_dp; + +//The SQLDA structure can be initialized using the following method +bind_dp = SQLSQLDAAlloc(runtime_context,size,name_lenght,ind_name_length); + +//In early versions of ORACLE, the sqlald() function was used to allocate a descriptor +EXEC SQL FETCH ... USING DESCRIPTOR ... +EXEC SQL OPEN ... USING DESCRIPTOR ... +``` + +The constructed SQLDA structure contains several members, and users need to understand the semantics of each member and construct and populate the SQLDA descriptor on their own. The details are as follows: + +- The N variable: The maximum number of select-list items or placeholders that can be Described. + +- The V variable: A pointer to the array of addresses of data buffers for storing the values of the select-list or bound variables. + + Before using the select-list or bound variables, the corresponding space for V must be allocated, and declared. + +- The L variable: A pointer to the array of lengths of the data buffers for the select-list or bound variable values. + +- The T variable: A pointer to the array of data type codes for the data buffers of the select-list or bound variables. + +- The I variable: A pointer to the data buffer for the indicator variables. + +- The F variable: The number of select-list items or placeholders actually found by DESCRIBE. + +- The S variable: A pointer to the array of names of the data buffers for the select-list or placeholders. + +- The M variable: The length of the names of the select-list items or placeholders. + +- The C variable: An array of the current lengths of the names of the select-list items or placeholders. + +- The X variable: An array for storing the names of the indicator variables. + +- The Y variable: An array of the maximum lengths of the names of the indicator variables. + +- The Z variable: An array of the current lengths of the names of the indicator variables. + +Users need to understand the implementation details of SQLDA mentioned above because they need to handle how to use SQLDA in their pre-compiled C code. The specific process is as follows: + +1. Declare a host string in the Declare Section to save the query text. +2. Declare the SQLDA for select and bind. +3. Allocate storage space for the select and bind descriptors. +4. Set the maximum number of select lists and placeholders that can be Described. +5. Place the query text in the host string. +6. Prepare the query from the host string. +7. Declare a cursor for the query. +8. DESCRIBE the variables to be bound into the bind descriptor. +9. Reset the number of placeholders to the number actually found by DESCRIBE. +10. Get the values and allocate space for the bound variables found by DESCRIBE. +11. Use the bind descriptor to open the cursor. +12. DESCRIBE the select-list into the descriptor. +13. Reset the number of select-list items to the number actually found by DESCRIBE. +14. Reset the length and data type of each select-list column for display. +15. FETCH a row from the database into the allocated data buffer pointed to by the select descriptor. +16. Process the select-list values returned by FETCH. +17. Free the storage space used for select-list items, placeholders, indicator variables, and descriptors. +18. Close the cursor. + +```c +#include +#include +#include + +// Define the maximum number of columns and bound variables +#define MAX_ITEMS 40 +// Define the maximum length of column names +#define MAX_VNAME_LEN 30 +#define MAX_INAME_LEN 30 + +int alloc_descriptor(int size,int max_vname_len,int max_iname_len); +void set_bind_v(); +void set_select_v(); +void free_da(); +void sql_error(char *msg); + +EXEC SQL INCLUDE sqlca; +EXEC SQL INCLUDE sqlda; +EXEC SQL INCLUDE sqlcpr; + +// Host variable definitions: +EXEC SQL BEGIN DECLARE SECTION; +float f1 = 12.34; +VARCHAR f2[64]; +char sql_statement[256]= "select * from test_ora"; +char type_statement[256]="select f1,f2 from test_ora where f1"); + + // Allocate data for the sqlda type + alloc_descriptor(MAX_ITEMS,MAX_VNAME_LEN,MAX_INAME_LEN); + + // Table creation statement + EXEC SQL DROP TABLE IF EXISTS TEST_ORA; + EXEC SQL CREATE TABLE TEST_ORA(f1 float, f2 text); + EXEC SQL INSERT INTO TEST_ORA VALUES(12.34,'abcd123'); + EXEC SQL INSERT INTO TEST_ORA VALUES(12,'e234d'); + EXEC SQL INSERT INTO TEST_ORA VALUES(12.34,'abcd123'); + EXEC SQL INSERT INTO TEST_ORA VALUES(333.33,'abcd'); + EXEC SQL commit; + // Prepare statement + EXEC SQL PREPARE S from :type_statement; + EXEC SQL DECLARE C1 CURSOR FOR S; + set_bind_v(); + + strcpy(f2.arr,"abcd123"); + f2.len = strlen(f2.arr); + f2.arr[f2.len] = '\0'; + + bind_p->L[0] = sizeof(float); + bind_p->V[0] = (char*)malloc(bind_p->L[0]); + memcpy(bind_p->V[0], &f1, sizeof(float)); + bind_p->T[0] = 4; /* EXTERNAL_PROC_FLOAT */ + bind_p->L[1] = sizeof(char) * 64; + bind_p->V[1] = (char*)malloc(bind_p->L[1] + 1); + memcpy(bind_p->V[1], &f2, sizeof(char) * 64); + bind_p->T[1] = 1; /* EXTERNAL_PROC_VARCHAR2 */ + + EXEC SQL OPEN C1 USING DESCRIPTOR bind_p; + EXEC SQL DESCRIBE SELECT LIST for S INTO select_p; + + set_select_v(); + printf("f1\t\tf2\n"); + printf("----------------------------------------------------------\n"); + for(;;) + { + EXEC SQL WHENEVER NOT FOUND DO break; + EXEC SQL FETCH C1 USING DESCRIPTOR select_p; + + for(i = 0;iF;i++){ + printf("%s ",select_p->V[i]); + } + printf("\n"); + } + free_da(); + EXEC SQL CLOSE C1; + printf("\n-----------------------------------------------------\n"); + alloc_descriptor(MAX_ITEMS,MAX_VNAME_LEN,MAX_INAME_LEN); + EXEC SQL PREPARE S from :sql_statement; + EXEC SQL DECLARE C CURSOR FOR S; + set_bind_v(); + EXEC SQL OPEN C USING DESCRIPTOR bind_p; + EXEC SQL DESCRIBE SELECT LIST for S INTO select_p; + set_select_v(); + EXEC SQL WHENEVER NOT FOUND DO break; + for (;;) { + EXEC SQL FETCH C USING DESCRIPTOR select_p; + for(i = 0;iF;i++){ + printf("%s ",select_p->V[i]); + } + printf("\n"); + } + free_da(); + EXEC SQL CLOSE C; + EXEC SQL DROP TABLE TEST_ORA; + EXEC SQL COMMIT WORK RELEASE; + exit(0); +} +// Allocate descriptor space: +int alloc_descriptor(int size,int max_vname_len,int max_iname_len) +{ + if((bind_p=sqlald(size,max_vname_len,max_iname_len))==(SQLDA*)0) + { + printf("can't allocate memory for bind_p."); + return -1; + } + + if((select_p=sqlald(size,max_vname_len,max_iname_len))==(SQLDA*)0) + { + printf("can't allocate memory for select_p."); + return -1; + } + + return 0; +} +// Setting of binding variables: +void set_bind_v() +{ + unsigned int i; + EXEC SQL WHENEVER SQLERROR DO sql_error(""); + bind_p ->N = MAX_ITEMS; + EXEC SQL DESCRIBE BIND VARIABLES FOR S INTO bind_p; + + if(bind_p->F<0) + { + printf("Too Many bind varibles"); + return; + } + bind_p->N = bind_p->F; + for(i=0;iN;i++) + { + bind_p->T[i] = 1; + } +} + +// Processing of select columns +void set_select_v() +{ + unsigned int i; + int null_ok,precision,scale; + EXEC SQL DESCRIBE SELECT LIST for S INTO select_p; + + if(select_p->F<0) + { + printf("Too Many column varibles"); + return; + } + select_p->N = select_p->F; + // Process the format + for(i = 0;iN;i++) + { + sqlnul((short unsigned int*)&(select_p->T[i]), (short unsigned int*)&(select_p->T[i]), &null_ok);// Check if the type is null + switch (select_p->T[i]) + { + case 1://VARCHAR2 + break; + case 2://NUMBER + sqlprc(&(select_p->L[i]), &precision, &scale); + if (precision == 0) + precision = 40; + select_p->L[i] = precision + 2; + break; + case 8://LONG + select_p->L[i] = 240; + break; + case 11://ROWID + select_p->L[i] = 18; + break; + case 12://DATE + select_p->L[i] = 9; + break; + case 23://RAW + break; + case 24://LONGRAW + select_p->L[i] = 240; + break; + } + select_p->V[i] = (char *)realloc(select_p->V[i], select_p->L[i]+1); + select_p->V[i][select_p->L[i]] ='\0';// Add terminator + select_p->T[i] = 1;// Convert all types to character type + } +} +// Function to free SQLDA memory: +void free_da() +{ + sqlclu(bind_p); + sqlclu(select_p); +} + +// Error handling +void sql_error(char *msg) +{ + printf("\n%s %s\n", msg,(char *)sqlca.sqlerrm.sqlerrmc); + EXEC SQL WHENEVER SQLERROR CONTINUE; + EXEC SQL ROLLBACK RELEASE; + exit(0); +} +``` + +1. Create a database user + + ```sql + create user ecpg identified by 'Test@123'; + ``` + +2. Create a test table + + ```sql + drop table if exists ecpg.test_ecpg_tab; + create table ecpg.test_ecpg_tab as select id , ' name '||id name from generate_series(1,20) id; ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/MogDB-MySQL-compatibility.md b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/MogDB-MySQL-compatibility.md index b7ef0d51..0b7d7e49 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/MogDB-MySQL-compatibility.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/MogDB-MySQL-compatibility.md @@ -1,30 +1,30 @@ ---- -title: MogDB-MySQL Compatibility -summary: MogDB-MySQL Compatibility -author: Zhang Cuiping -date: 2022-06-21 ---- - -# MogDB-MySQL Compatibility - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -This feature mainly enhances the compatibility of MogDB with MySQL in the following three aspects. At the same time, the `insert` syntax is supported, and `insert into` can be abbreviated as `insert`. - -User lock allows users to add custom locks through SQL, which allows multiple programs to complete the lock-related interaction process, making the client access from any location to get a consistent lock view. - -When data is inserted into a table to be created, the current time is inserted by default. During data update, if the update time is not specified, the time when the data is updated is displayed by default. - -Session-level SQL mode can be set, allowing change in running, global change, and intra-session change. - -## Benefits - -By setting user locks, data, data structures or certain strings are protected from interfering with each other between sessions, ensuring consistency and security of information. It solves the problem of recording the timestamp of users' operation when their business data is written and modified. By setting SQL mode, it can solve the compatibility between the legacy problems of earlier versions and later versions. - -## Related Pages - +--- +title: MogDB-MySQL Compatibility +summary: MogDB-MySQL Compatibility +author: Zhang Cuiping +date: 2022-06-21 +--- + +# MogDB-MySQL Compatibility + +## Availability + +This feature is available since MogDB 3.0.0. + +## Introduction + +This feature mainly enhances the compatibility of MogDB with MySQL in the following three aspects. At the same time, the `insert` syntax is supported, and `insert into` can be abbreviated as `insert`. + +User lock allows users to add custom locks through SQL, which allows multiple programs to complete the lock-related interaction process, making the client access from any location to get a consistent lock view. + +When data is inserted into a table to be created, the current time is inserted by default. During data update, if the update time is not specified, the time when the data is updated is displayed by default. + +Session-level SQL mode can be set, allowing change in running, global change, and intra-session change. + +## Benefits + +By setting user locks, data, data structures or certain strings are protected from interfering with each other between sessions, ensuring consistency and security of information. It solves the problem of recording the timestamp of users' operation when their business data is written and modified. By setting SQL mode, it can solve the compatibility between the legacy problems of earlier versions and later versions. + +## Related Pages + [Dolphin Extension](../../developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-extension.md), [CREATE TABLE](../../reference-guide/sql-syntax/CREATE-TABLE.md), [ALTER TABLE](../../reference-guide/sql-syntax/ALTER-TABLE.md), [INSERT](../../reference-guide/sql-syntax/INSERT.md), [Advisory Lock Functions](../../reference-guide/functions-and-operators/system-management-functions/advisory-lock-functions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/MogDB-Oracle-compatibility.md b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/MogDB-Oracle-compatibility.md index c8f90d67..c259c2d0 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/MogDB-Oracle-compatibility.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/MogDB-Oracle-compatibility.md @@ -1,30 +1,30 @@ ---- -title: MogDB-Oracle Compatibility -summary: MogDB-Oracle Compatibility -author: Zhang Cuiping -date: 2022-06-17 ---- - -# MogDB-Oracle Compatibility - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -MogDB is compatible with Oracle's related functions and package functions using whale extension. - -In the function part, it mainly adds instrb, nls_charset_id, nls_charset_name, nls_lower, nls_upper, ora_hash, remainder, replace, show, show_parameter, to_timestamp, to_yminterval, tz_offset, nullif, ratio_to_report, etc. - -packages are generally only used in stored procedures, and according to ORACLE data package rules, new packages are placed under the corresponding schema. The supported Oracle management packages are dbms_random, dbms_output, dbms_lock, dbms_application_info, dbms_metadata, dbms_job, dbms_utility. - -For more information about the functions and the packages, please see the [whale](../../developer-guide/extension/whale.md). - -## Benefits - -MogDB's compatibility with Oracle is enhanced by using the whale extension to enhance MogDB functions. - -## Related Pages - +--- +title: MogDB-Oracle Compatibility +summary: MogDB-Oracle Compatibility +author: Zhang Cuiping +date: 2022-06-17 +--- + +# MogDB-Oracle Compatibility + +## Availability + +This feature is available since MogDB 3.0.0. + +## Introduction + +MogDB is compatible with Oracle's related functions and package functions using whale extension. + +In the function part, it mainly adds instrb, nls_charset_id, nls_charset_name, nls_lower, nls_upper, ora_hash, remainder, replace, show, show_parameter, to_timestamp, to_yminterval, tz_offset, nullif, ratio_to_report, etc. + +packages are generally only used in stored procedures, and according to ORACLE data package rules, new packages are placed under the corresponding schema. The supported Oracle management packages are dbms_random, dbms_output, dbms_lock, dbms_application_info, dbms_metadata, dbms_job, dbms_utility. + +For more information about the functions and the packages, please see the [whale](../../developer-guide/extension/whale.md). + +## Benefits + +MogDB's compatibility with Oracle is enhanced by using the whale extension to enhance MogDB functions. + +## Related Pages + [whale](../../developer-guide/extension/whale.md), [Character Processing Functions and Operators](../../reference-guide/functions-and-operators/character-processing-functions-and-operators.md), [Mathematical Functions and Operators](../../reference-guide/functions-and-operators/mathematical-functions-and-operators.md), [Date and Time Processing Functions and Operators](../../reference-guide/functions-and-operators/date-and-time-processing-functions-and-operators.md), [HLL Functions and Operators](../../reference-guide/functions-and-operators/hll-functions-and-operators.md), [Window Functions](../../reference-guide/functions-and-operators/window-functions.md), [System Information Functions](../../reference-guide/functions-and-operators/system-information-functions/system-information-functions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/application-development-interfaces.md b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/application-development-interfaces.md index 8b6a93f0..4301d737 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/application-development-interfaces.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/application-development-interfaces/application-development-interfaces.md @@ -1,15 +1,15 @@ ---- -title: Application Development Interfaces -summary: Application Development Interfaces -author: Guo Huan -date: 2023-05-22 ---- - -# Application Development Interfaces - -+ **[Standard SQL](1-standard-sql.md)** -+ **[Standard Development Interfaces](2-standard-development-interfaces.md)** -+ **[PostgreSQL API Compatibility](3-postgresql-api-compatibility.md)** -+ **[MogDB-Oracle Compatibility](MogDB-Oracle-compatibility.md)** -+ **[MogDB-MySQL Compatibility](MogDB-MySQL-compatibility.md)** +--- +title: Application Development Interfaces +summary: Application Development Interfaces +author: Guo Huan +date: 2023-05-22 +--- + +# Application Development Interfaces + ++ **[Standard SQL](1-standard-sql.md)** ++ **[Standard Development Interfaces](2-standard-development-interfaces.md)** ++ **[PostgreSQL API Compatibility](3-postgresql-api-compatibility.md)** ++ **[MogDB-Oracle Compatibility](MogDB-Oracle-compatibility.md)** ++ **[MogDB-MySQL Compatibility](MogDB-MySQL-compatibility.md)** + **[Embedded SQL Preprocessor ECPG](ECPG.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/characteristic-description-overview.md b/product/en/docs-mogdb/v5.0/characteristic-description/characteristic-description-overview.md index a733e828..0cda3b91 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/characteristic-description-overview.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/characteristic-description-overview.md @@ -30,6 +30,9 @@ MogDB 5.0 has the following characteristics. + [Ordering Operator Optimization](./high-performance/ordering-operator-optimization.md) + [OCK-accelerated Data Transmission](./high-performance/ock-accelerated-data-transmission.md) + [OCK SCRLock Accelerate Distributed Lock](./high-performance/ock-scrlock-accelerate-distributed-lock.md) + + [Enhancement of Dirty Pages Flushing Performance](./high-performance/enhancement-of-dirty-pages-flushing-performance.md) + + [Sequential Scan Prefetch](./high-performance/seqscan-prefetch.md) + + [Ustore SMP Parallel Scanning](./high-performance/ustore-smp.md) + High Availability (HA) + [Primary/Standby](./high-availability/1-primary-standby.md) + [Logical Replication](./high-availability/2-logical-replication.md) @@ -49,6 +52,9 @@ MogDB 5.0 has the following characteristics. + [Using a Standby Node to Build a Standby Node](./high-availability/16-using-a-standby-node-to-build-a-standby-node.md) + [Two-City Three-DC DR](./high-availability/17-two-city-three-dc-dr.md) + [CM Cluster Management Component Supporting Two Node Deployment](./high-availability/cm-cluster-management-component-supporting-two-node-deployment.md) + + [Query of the Original DDL Statement for a View](./high-availability/ddl-query-of-view.md) + + [MogDB/CM/PTK Dual Network Segment Support](./high-availability/cm-dual-network-segment-deployment.md) + + [Enhanced Efficiency of Logical Backup and Restore](./high-availability/enhanced-efficiency-of-logical-backup-and-restore.md) + Maintainability + [Workload Diagnosis Report (WDR)](./maintainability/2-workload-diagnosis-report.md) + [Slow SQL Diagnosis](./maintainability/3-slow-sql-diagnosis.md) @@ -58,6 +64,12 @@ MogDB 5.0 has the following characteristics. + [Extension-Splitting](./maintainability/extension-splitting.md) + [Built-in Stack Tool](./maintainability/built-in-stack-tool.md) + [SQL PATCH](./maintainability/sql-patch.md) + + [Lightweight Lock Export and Analysis](./maintainability/light-lock-export-and-analysis.md) + + [DCF Module Tracing](./maintainability/dcf-module-tracing.md) + + [Error When Writing Illegal Characters](./maintainability/error-when-writing-illegal-characters.md) + + [Support For Pageinspect & Pagehack](./maintainability/pageinspect-pagehack.md) + + [Autonomous Transaction Management View and Termination](./maintainability/autonomous-transaction-management.md) + + [Corrupt Files Handling](./maintainability/corrupt-files-handling.md) + Compatibility + [Add %rowtype Attribute To The View](./compatibility/add-rowtype-attribute-to-the-view.md) + [Aggregate Functions Distinct Performance Optimization](./compatibility/aggregate-functions-distinct-performance-optimization.md) @@ -82,6 +94,14 @@ MogDB 5.0 has the following characteristics. + [Support PLPGSQL subtype](./compatibility/support-plpgsql-subtype.md) + [Support Synonym Calls Without Parentheses For Function Without Parameters](./compatibility/support-synonym-calls-without-parentheses-for-function-without-parameters.md) + [Support For dbms_utility.format_error_backtrace](./compatibility/format-error-backtrace.md) + + [Support for PIVOT and UNPIVOT Syntax](./compatibility/pivot-and-unpivot.md) + + [Mod function compatibility](./compatibility/mod-function-float-to-int.md) + + [Support for Nesting of Aggregate Functions](./compatibility/nesting-of-aggregate-functions.md) + + [ORDER BY/GROUP BY Scenario Expansion](./compatibility/order-by-group-by-scenario-expansion.md) + + [Support for Modifying Table Log Properties After Table Creation](./compatibility/modify-table-log-property.md) + + [Support for INSERT ON CONFLICT Clause](./compatibility/insert-on-conflict.md) + + [Support for AUTHID CURRENT_USER](./compatibility/authid-current-user.md) + + [Support for Stored Procedure OUT Parameters in PBE Mode](./compatibility/stored-procedure-out-parameters-in-pbe-mode.md) + Database Security + [Access Control Model](./database-security/1-access-control-model.md) + [Separation of Control and Access Permissions](./database-security/2-separation-of-control-and-access-permissions.md) @@ -124,6 +144,11 @@ MogDB 5.0 has the following characteristics. + [BRIN Index](./enterprise-level-features/24-brin-index.md) + [BLOOM Index](./enterprise-level-features/25-bloom-index.md) + [Event Trigger](./enterprise-level-features/event-trigger.md) + + [Scrollable Cursor Support for Reverse Retrieval](./enterprise-level-features/scroll-cursor.md) + + [Support for Pruning Subquery Projection Columns](./enterprise-level-features/support-for-pruning-subquery-projection-columns.md) + + [Pruning ORDER BY in Subqueries](./enterprise-level-features/pruning-order-by-in-subqueries.md) + + [Automatic Creation of Indexes Supporting Fuzzy Matching](./enterprise-level-features/index-support-fuzzy-matching.md) + + [Support for Importing and Exporting Specific Objects](./enterprise-level-features/import-export-specific-objects.md) + Application Development Interfaces + [Standard SQL](./application-development-interfaces/1-standard-sql.md) + [Standard Development Interfaces](./application-development-interfaces/2-standard-development-interfaces.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/characteristic-description.md b/product/en/docs-mogdb/v5.0/characteristic-description/characteristic-description.md index 916dc2b0..2492d1be 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/characteristic-description.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/characteristic-description.md @@ -1,20 +1,20 @@ ---- -title: Characteristic Description -summary: Characteristic Description -author: Guo Huan -date: 2023-05-22 ---- - -# Characteristic Description - -- **[Overview](characteristic-description-overview.md)** -- **[High Performance](high-performance/high-performance.md)** -- **[High Availability](high-availability/high-availability.md)** -- **[Maintainability](maintainability/maintainability.md)** -- **[Compatibility](compatibility/compatibility.md)** -- **[Database Security](database-security/database-security.md)** -- **[Enterprise-Level Features](enterprise-level-features/enterprise-level-features.md)** -- **[Application Development Interfaces](application-development-interfaces/application-development-interfaces.md)** -- **[AI Capabilities](ai-capabilities/ai-capabilities.md)** -- **[Middleware](middleware/middleware.md)** +--- +title: Characteristic Description +summary: Characteristic Description +author: Guo Huan +date: 2023-05-22 +--- + +# Characteristic Description + +- **[Overview](characteristic-description-overview.md)** +- **[High Performance](high-performance/high-performance.md)** +- **[High Availability](high-availability/high-availability.md)** +- **[Maintainability](maintainability/maintainability.md)** +- **[Compatibility](compatibility/compatibility.md)** +- **[Database Security](database-security/database-security.md)** +- **[Enterprise-Level Features](enterprise-level-features/enterprise-level-features.md)** +- **[Application Development Interfaces](application-development-interfaces/application-development-interfaces.md)** +- **[AI Capabilities](ai-capabilities/ai-capabilities.md)** +- **[Middleware](middleware/middleware.md)** - **[Workload Management](workload-management/workload-management.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/authid-current-user.md b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/authid-current-user.md new file mode 100644 index 00000000..95f318c1 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/authid-current-user.md @@ -0,0 +1,98 @@ +--- +title: Support for AUTHID CURRENT_USER +summary: Support for AUTHID CURRENT_USER +author: 郭欢 韩旭 +date: 2024-01-29 +--- + +# Support for AUTHID CURRENT_USER + +## Availability + +This feature is available since MogDB version 5.0.6. + +## Introduction + +This feature supports the specification of the AUTHID CURRENT_USER keyword when creating functions, stored procedures, and packages, allowing stored procedures or functions to execute SQL statements dynamically based on the caller's identity and with the search path set to the caller's schema. + +## Benefits + +Enhances compatibility with Oracle, database security, and flexibility, better adapting to the permission requirements of different users or roles. + +## Description + +AUTHID CURRENT_USER is a keyword used to specify the execution permissions for stored procedures, functions, packages, etc. When the AUTHID CURRENT_USER keyword is used when creating a function or stored procedure, the function or procedure will execute under the identity of the current caller, rather than the creator of the procedure, function, package, etc. + +For example, users A and B have a table with the same name. A stored procedure with the AUTHID CURRENT_USER attribute is created under user A and is granted to user B for execution. When user A executes the stored procedure, the data will be inserted into A's table, and when user B executes the procedure, the data will be inserted into B's table. + +The main purpose of using the AUTHID CURRENT_USER keyword is to create stored procedures or functions with more flexible permission control, allowing access permissions to be determined based on the executor's identity and to execute different logic based on the context, rather than strictly using the definer's permissions. Additionally, when using DBMS_OUTPUT.PUT_LINE or RAISE INFO to output information in stored procedures or functions, the current user's username can be obtained through the USER function. + +It should also be noted that the [behavior_compat_options](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options) = 'plsql_security_definer' switch will affect the default execution permissions of procedures, functions, packages, etc., when the keyword is omitted. If this switch is turned on and the keyword is omitted, it is in the creator mode; if this switch is turned off and the keyword is omitted, it is in the caller mode. + +## Syntax Description + +```sql +CREATE [OR REPLACE] {PROCEDURE | FUNCTION | PACKAGE } object_name + [[ EXTERNAL ] SECURITY INVOKER | AUTHID CURRENT_USER] + {IS | AS} + [declaration_section] +BEGIN + -- body_section +END [object_name]; +``` + +> Note: SECURITY INVOKER and AUTHID CURRENT_USER have the same functionality. + +## Constraints + +- Only supports the default A compatibility. +- This keyword requires the [behavior_compat_options](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options) = 'set_procedure_current_schema' parameter to be used in conjunction. + +## Example + +```sql +-- Create users and grant privileges +drop user if exists authid_user1 cascade; +drop user if exists authid_user2 cascade; +CREATE USER authid_user1 IDENTIFIED BY 'user1@123'; +CREATE USER authid_user2 IDENTIFIED BY 'user2@123'; +GRANT ALL PRIVILEGES to authid_user1; +GRANT ALL PRIVILEGES to authid_user2; + +-- Switch to user 1 and create a stored procedure under user 1 +gsql -d postgres -U authid_user1 -W 'user1@123' + +create table authid_tab01(col1 integer, col2 integer); +create or replace procedure authid_proc01(val integer) AUTHID CURRENT_USER as +begin + insert into authid_tab01 values(val, val); + raise info 'insert values (%, %)', val, val; + raise info 'current user %', USER; +end; +/ + +GRANT EXECUTE ON procedure authid_proc01(val in integer) TO authid_user2; + +-- Switch to user 2, create a table with the same name, and call the stored procedure under user 1 +gsql -d postgres -U authid_user2 -W 'user2@123' + +SET behavior_compat_options = 'set_procedure_current_schema'; + +create table authid_tab01(col1 integer, col2 integer); + +call authid_user1.authid_proc01(2); + +-- Check the execution results +select * from authid_tab01; + col1 | col2 +------+------ + 2 | 2 +(1 row) + +select * from authid_user1.authid_tab01; +(0 rows) +``` + +## Related Pages + +[CREATE FUNCTION](../../reference-guide/sql-syntax/CREATE-FUNCTION.md), [CREATE PROCEDURE](../../reference-guide/sql-syntax/CREATE-PROCEDURE.md), [CREATE PACKAGE](../../reference-guide/sql-syntax/CREATE-PACKAGE.md), [behavior_compat_options](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/compatibility.md b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/compatibility.md index 1e019764..ac5ad299 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/compatibility.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/compatibility.md @@ -1,32 +1,40 @@ ---- -title: Compatibility -summary: Compatibility -author: Guo Huan -date: 2023-06-20 ---- - -# Compatibility - -- **[Add %rowtype Attribute To The View](add-rowtype-attribute-to-the-view.md)** -- **[Aggregate Functions Distinct Performance Optimization](aggregate-functions-distinct-performance-optimization.md)** -- **[Aggregate Functions Support Keep Clause](aggregate-functions-support-keep-clause.md)** -- **[Aggregate Functions Support Scenario Extensions](aggregate-functions-support-scenario-extensions.md)** -- **[Compatible With MySQL Alias Support For Single Quotes](compatible-with-mysql-alias-support-for-single-quotes.md)** -- **[current_date/current_time Keywords As Field Name](current_date-current_time-keywords-as-field-name.md)** -- **[Custom Type Array](custom-type-array.md)** -- **[For Update Support Outer Join](for-update-supports-outer-join.md)** -- **[MogDB Supports Insert All](mogdb-supports-insert-all.md)** -- **[Oracle DBLink Syntax Compatibility](oracle-dblink-syntax-compatibility.md)** -- **[Remove Type Conversion Hint When Creating PACKAGE/FUNCTION/PROCEDURE](remove-type-conversion-hint-when-creating-package-function-procedure.md)** -- **[Support Bypass Method When Merge Into Hit Index](support-bypass-method-when-merge-into-hit-index.md)** -- **[Support For Adding Nocopy Attributes To Procedure And Function Parameters](support-for-adding-nocopy-attributes-to-procedure-and-function-parameters.md)** -- **[Support For Passing The Count Attribute Of An Array As A Parameter Of The Array Extend](support-passing-the-count-attribute.md)** -- **[Support Q Quote Escape Character](support-q-quote-escape-character.md)** -- **[Support Subtracting Two Date Types To Return Numeric Type](support-subtracting-two-date-types-to-return-numeric-type.md)** -- **[Support table()](support-table-function.md)** -- **[Support To Keep The Same Name After The End With Oracle](support-to-keep-the-same-name-after-the-end-with-oracle.md)** -- **[Support Where Current Of](support-where-current-of.md)** -- **[Support For Constants In Package As Default Values](support-for-constants-in-package-as-default-values.md)** -- **[Support PLPGSQL subtype](support-plpgsql-subtype.md)** -- **[Support Synonym Calls Without Parentheses For Function Without Parameters](support-synonym-calls-without-parentheses-for-function-without-parameters.md)** -- **[Support For dbms_utility.format_error_backtrace](format-error-backtrace.md)** \ No newline at end of file +--- +title: Compatibility +summary: Compatibility +author: Guo Huan +date: 2023-06-20 +--- + +# Compatibility + +- **[Add %rowtype Attribute To The View](add-rowtype-attribute-to-the-view.md)** +- **[Aggregate Functions Distinct Performance Optimization](aggregate-functions-distinct-performance-optimization.md)** +- **[Aggregate Functions Support Keep Clause](aggregate-functions-support-keep-clause.md)** +- **[Aggregate Functions Support Scenario Extensions](aggregate-functions-support-scenario-extensions.md)** +- **[Compatible With MySQL Alias Support For Single Quotes](compatible-with-mysql-alias-support-for-single-quotes.md)** +- **[current_date/current_time Keywords As Field Name](current_date-current_time-keywords-as-field-name.md)** +- **[Custom Type Array](custom-type-array.md)** +- **[For Update Support Outer Join](for-update-supports-outer-join.md)** +- **[MogDB Supports Insert All](mogdb-supports-insert-all.md)** +- **[Oracle DBLink Syntax Compatibility](oracle-dblink-syntax-compatibility.md)** +- **[Remove Type Conversion Hint When Creating PACKAGE/FUNCTION/PROCEDURE](remove-type-conversion-hint-when-creating-package-function-procedure.md)** +- **[Support Bypass Method When Merge Into Hit Index](support-bypass-method-when-merge-into-hit-index.md)** +- **[Support For Adding Nocopy Attributes To Procedure And Function Parameters](support-for-adding-nocopy-attributes-to-procedure-and-function-parameters.md)** +- **[Support For Passing The Count Attribute Of An Array As A Parameter Of The Array Extend](support-passing-the-count-attribute.md)** +- **[Support Q Quote Escape Character](support-q-quote-escape-character.md)** +- **[Support Subtracting Two Date Types To Return Numeric Type](support-subtracting-two-date-types-to-return-numeric-type.md)** +- **[Support table()](support-table-function.md)** +- **[Support To Keep The Same Name After The End With Oracle](support-to-keep-the-same-name-after-the-end-with-oracle.md)** +- **[Support Where Current Of](support-where-current-of.md)** +- **[Support For Constants In Package As Default Values](support-for-constants-in-package-as-default-values.md)** +- **[Support PLPGSQL subtype](support-plpgsql-subtype.md)** +- **[Support Synonym Calls Without Parentheses For Function Without Parameters](support-synonym-calls-without-parentheses-for-function-without-parameters.md)** +- **[Support For dbms_utility.format_error_backtrace](format-error-backtrace.md)** +- **[Support for PIVOT and UNPIVOT Syntax](pivot-and-unpivot.md)** +- **[Mod function compatibility](mod-function-float-to-int.md)** +- **[Support for Nesting of Aggregate Functions](nesting-of-aggregate-functions.md)** +- **[ORDER BY/GROUP BY Scenario Expansion](order-by-group-by-scenario-expansion.md)** +- **[Support for Modifying Table Log Properties After Table Creation](modify-table-log-property.md)** +- **[Support for INSERT ON CONFLICT Clause](insert-on-conflict.md)** +- **[Support for AUTHID CURRENT_USER](authid-current-user.md)** +- **[Support for Stored Procedure OUT Parameters in PBE Mode](stored-procedure-out-parameters-in-pbe-mode.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/insert-on-conflict.md b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/insert-on-conflict.md new file mode 100644 index 00000000..2e0ac2bb --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/insert-on-conflict.md @@ -0,0 +1,115 @@ +--- +title: Support for INSERT ON CONFLICT Clause +summary: Support for INSERT ON CONFLICT Clause +author: 郭欢 张沫 蒋兆恒 +date: 2024-01-29 +--- + +# Support for INSERT ON CONFLICT Clause + +## Availability + +This feature is available since MogDB 5.0.6. + +## Introduction + +MogDB supports the syntax and functionality of INSERT ON CONFLICT DO UPDATE/DO NOTHING. The ON CONFLICT clause specifies that when a unique constraint conflict arises, the statement following ON CONFLICT is executed, changing the INSERT action to UPDATE or DO NOTHING to avoid errors. + +## Benefits + +It is compatible with the syntax newly added in PostgreSQL 9.5, reducing the migration cost for applications. + +## Syntax Description + +The INSERT statement has a new ON CONFLICT clause: + +```sql +[ WITH [ RECURSIVE ] with_query [, ...] ] +INSERT INTO table_name [ AS alias ] [ ( column_name [, ...] ) ] +[ ON CONFLICT [conflict_target] DO { NOTHING | { UPDATE SET column_name = { expression | DEFAULT } } [, ...] [ WHERE condition ] } ] +``` + +Where conflict_target can be one of the following: + +```sql +( { index_column_name | ( index_expression ) } [ COLLATE collation ] [ opclass ] [, ...] ) [ WHERE index_predicate ] + ON CONSTRAINT constraint_name +``` + +The optional ON CONFLICT clause provides a method for handling insert conflicts, mainly used to resolve insert failures caused by unique constraints or primary key constraints. When attempting to insert a row of data, if the unique constraint or primary key constraint already has the same value of data, the ON CONFLICT clause can specify the behavior when a conflict occurs, such as performing an update operation instead of inserting new data, or ignoring the conflict and taking no action. + +- conflict_target + + By selecting an index, it specifies which conflicts the ON CONFLICT should take alternative actions for. It either performs unique index inference or explicitly names a constraint. For ON CONFLICT DO NOTHING, conflict_target is optional. When omitted, conflicts with all valid constraints (and unique indexes) are handled. For ON CONFLICT DO UPDATE, conflict_target must be provided. + +- index_column_name + + The name of the index column. + +- index_expression + + Similar to index_column_name, but used for expressions of columns that appear in the index (not simple columns). + +- collation + + When specified, it requires the corresponding index_column_name or index_expression to match using a specific collation. It is usually omitted because collation usually does not affect whether a constraint is violated. + +- opclass + + When specified, it requires the corresponding index_column_name or index_expression to match using a specific operator class. It is usually omitted. + +- index_predicate + + Used to allow inference of partial unique indexes. Any index that satisfies this predicate (not necessarily a partial index) can be inferred. + +- constraint_name + + Explicitly specifies an arbitrator constraint by name, instead of inferring a constraint or index. + +- condition + + An expression that returns a Boolean value; only records for which this expression returns true will be updated. + +## Constraints + +- The target table does not support external tables. +- The target table does not support views, which is inconsistent with PostgreSQL behavior. +- SQL Bypass is not supported when there is a query statement in the INSERT. +- Column-store tables are not supported. +- Applicable to A and PG compatible modes. +- The [allow_concurrent_tuple_update](../../reference-guide/guc-parameters/MogDB-transaction.md#allow_concurrent_tuple_update) parameter needs to be set to off for scenarios that require compatibility with PG. +- The [lockwait_timeout](../../reference-guide/guc-parameters/lock-management.md#lockwait_timeout) needs to be set to 0 for scenarios that require compatibility with PG. + +## Example + +```sql +drop index if exists i_upsert; +drop table if exists t_upsert cascade; + +-- Create a table and insert data +CREATE TABLE t_upsert( + id int , + name text, + price numeric +); +insert into t_upsert select generate_series,'test' || generate_series,generate_series*10 from generate_series(1,10); +select * from t_upsert order by 1 limit 10; + +-- Create an index +create unique index i_upsert on t_upsert(id); +select a.relname,b.indnatts,b.indisusable,b.indisunique,b.indisprimary +from pg_class a,pg_index b +where a.oid = b.indexrelid and b.indrelid = (select oid from pg_class where relname = 't_upsert') order by 1; + +-- Insert duplicate values, the conflict_target in the insert on conflict statement is the index column name +insert into t_upsert values(3,'gram',5.5) on conflict(id) do update set name='gram'; +select * from t_upsert order by 1; + +-- Clean up data +drop index if exists i_upsert; +drop table if exists t_upsert cascade; +``` + +## Related Pages + +[INSERT](../../reference-guide/sql-syntax/INSERT.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/mod-function-float-to-int.md b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/mod-function-float-to-int.md new file mode 100644 index 00000000..7b5a73af --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/mod-function-float-to-int.md @@ -0,0 +1,44 @@ +--- +title: Mod Function Compatibility +summary: Mod Function Compatibility +author: 谢海滨 郭欢 +date: 2024-01-03 +--- + +# Mod Function Compatibility + +## Availability + +This feature is available since MogDB 5.0.6. + +## Introduction + +This feature is compatible with the behavior of the mod function. For mod(float, int), a call that mixes float and integer types, the return type is numeric, not integer. + +## Benefits + +Enhance MogDB compatibility with Oracle to reduce application migration costs. + +## Description + +In the old version of MogDB, the mod(float, int) function would convert the first parameter to int type, and then use mod(int, int) to calculate the result, resulting in the final result being inconsistent with the expectation. To address this situation, MogDB 5.0.6 converts both arguments to numeric type and calculates the result using mod(numeric, numeric) to ensure the correctness of the result. + +## Example + +```sql +select mod(31::int8,16.02::float4); + mod +------- + 14.98 +(1 row) + +select mod(31.415::float4,16::int4); + mod +-------- + 15.415 +(1 row) +``` + +## Related Pages + +[Mathematical Functions and Operators](../../reference-guide/functions-and-operators/mathematical-functions-and-operators.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/modify-table-log-property.md b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/modify-table-log-property.md new file mode 100644 index 00000000..36c9be85 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/modify-table-log-property.md @@ -0,0 +1,105 @@ +--- +title: Support for Modifying Table Log Properties After Table Creation +summary: Support for Modifying Table Log Properties After Table Creation +author: 郭欢 孙久武 +date: 2024-02-21 +--- + +# Support for Modifying Table Log Properties After Table Creation + +## Availability + +This feature is available since MogDB 5.0.6. + +## Introduction + +This feature supports the modification of table log properties via the ALTER TABLE statement after the table has been created. + +## Benefits + +Enhances compatibility with Oracle, reduces the migration cost for applications, improves product usability, and enhances performance in certain scenarios. + +## Description + +Modifying a regular table to a unlogged table can improve import efficiency when temporarily importing a large amount of data for computation. Data written to an unlogged table is not written to the write-ahead log (WAL), but there is a risk of data loss with unlogged tables in the event of conflicts, operating system restarts, database restarts, primary-standby switches, power-off operations, or abnormal shutdowns. + +The ALTER TABLE syntax change is as follows: + +```sql +ALTER TABLE [ IF EXISTS ] { table_name }{ + SET { LOGGED | UNLOGGED } -- PG + | { LOGGING | NOLOGGING } -- ORACLE +} +``` + +## Constraints + +- Partitioned tables, column-store tables, temporary tables, and global temporary tables do not support the modification of log properties. +- Regular tables that are referenced by foreign key constraints of other regular tables do not support being changed to unlogged tables. +- Unlogged tables that reference other unlogged tables with foreign key constraints do not support being changed to regular tables. +- After the table log property is modified, the log property of the index is automatically modified as well. +- This feature is applicable to both A mode and PG mode. + +## Example + +```sql +-- Create a logged regular table +MogDB=# create table t_logged(a integer,b text); +CREATE TABLE +MogDB=# insert into t_logged values (generate_series(1, 3), 'a'|| generate_series(1, 3)); +INSERT 0 3 +MogDB=# select * from t_logged order by a,b; + a | b +---+---- + 1 | a1 + 2 | a2 + 3 | a3 +(3 rows) + +MogDB=# select relname, relpersistence from pg_class where relname='t_logged'; + relname | relpersistence +----------+---------------- + t_logged | p +(1 row) + +-- Modify to an unlogged/logged table +MogDB=# alter table t_logged set unlogged; +ALTER TABLE +MogDB=# select relname, relpersistence from pg_class where relname='t_logged'; + relname | relpersistence +----------+---------------- + t_logged | u +(1 row) + +MogDB=# alter table t_logged logging; +ALTER TABLE +MogDB=# select relname, relpersistence from pg_class where relname='t_logged'; + relname | relpersistence +----------+---------------- + t_logged | p +(1 row) + +MogDB=# alter table t_logged nologging; +ALTER TABLE +MogDB=# select relname, relpersistence from pg_class where relname='t_logged'; + relname | relpersistence +----------+---------------- + t_logged | u +(1 row) + +MogDB=# alter table t_logged set logged; +ALTER TABLE +MogDB=# select relname, relpersistence from pg_class where relname='t_logged'; + relname | relpersistence +----------+---------------- + t_logged | p +(1 row) + +-- Clean up environment +MogDB=# drop table if exists t_logged cascade; +DROP TABLE +``` + +## Related Pages + +[ALTER TABLE](../../reference-guide/sql-syntax/ALTER-TABLE.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/nesting-of-aggregate-functions.md b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/nesting-of-aggregate-functions.md new file mode 100644 index 00000000..d518fb64 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/nesting-of-aggregate-functions.md @@ -0,0 +1,112 @@ +--- +title: Support for Nesting of Aggregate Functions +summary: Support for Nesting of Aggregate Functions +author: 郭欢 孙久武 +date: 2024-02-04 +--- + +# Support for Nesting of Aggregate Functions + +## Availability + +This feature is available since MogDB 5.0.6. + +## Introduction + +Aggregate functions are used to calculate a single result from multiple input rows. This feature supports the nesting of aggregate functions, with up to 2 levels of nesting, where the statement is equivalent to calculating the result of the aggregate function again based on the result of the first layer of aggregate function operations. + +## Benefits + +Enhances compatibility with Oracle, reduces the migration cost for applications, and improves product usability. + +## Constraints + +- The query statement must include a GROUP BY clause and supports all GROUP BY extended grouping functions, such as GROUPING SETS, ROLLUP, and CUBE functions. +- The nesting level is limited to a maximum of 2 layers and does not support deeper nesting. +- Projection columns cannot include table columns, even if they appear in the GROUP BY. +- The ORDER BY in the context of nested aggregate functions has no practical significance; performing ORDER BY on expressions not in GROUP BY will not result in an error. +- This feature is only applicable in A compatibility mode. + +## Example + +```sql +-- Create base table +MogDB=# CREATE TABLE t1 (a INT, b INT, c INT, d INT); +CREATE TABLE + +-- Nesting of aggregate functions +MogDB=# EXPLAIN (VERBOSE ON, COSTS OFF) SELECT sum(count(a)) FROM t1 GROUP BY a; + QUERY PLAN +----------------------------------- + Aggregate + Output: sum((count(t1.a))) + -> HashAggregate + Output: t1.a, count(t1.a) + Group By Key: t1.a + -> Seq Scan on public.t1 + Output: t1.a +(7 rows) + +-- Multiple aggregate functions +MogDB=# EXPLAIN (VERBOSE ON, COSTS OFF) SELECT sum(count(c)), min(count(c)) FROM t1 GROUP BY b; + QUERY PLAN +-------------------------------------------------- + Aggregate + Output: sum((count(t1.c))), min((count(t1.c))) + -> HashAggregate + Output: t1.b, count(t1.c) + Group By Key: t1.b + -> Seq Scan on public.t1 + Output: t1.b, t1.c +(7 rows) + +-- Different nesting levels of aggregate functions +MogDB=# EXPLAIN (VERBOSE ON, COSTS OFF) SELECT sum(count(a)), count(a) FROM t1 GROUP BY a; + QUERY PLAN +------------------------------------------- + Aggregate + Output: sum((count(t1.a))), count(t1.a) + -> HashAggregate + Output: t1.a, count(t1.a) + Group By Key: t1.a + -> Seq Scan on public.t1 + Output: t1.a +(7 rows) + +-- Complex aggregate functions +MogDB=# EXPLAIN (VERBOSE ON, COSTS OFF) SELECT LISTAGG(count(a), ',') WITHIN group(ORDER BY a) FROM t1 GROUP BY a; + QUERY PLAN +---------------------------------------------------------------------------- + Aggregate + Output: listagg((count(t1.a)), ','::text ) WITHIN GROUP ( ORDER BY t1.a) + -> HashAggregate + Output: t1.a, count(t1.a) + Group By Key: t1.a + -> Seq Scan on public.t1 + Output: t1.a +(7 rows) + +-- GROUP BY using CUBE function +MogDB=# EXPLAIN (VERBOSE ON, COSTS OFF) SELECT sum(count(a)), count(a) FROM t1 GROUP BY CUBE(a,b); + QUERY PLAN +------------------------------------------- + Aggregate + Output: sum((count(t1.a))), count(t1.a) + -> GroupAggregate + Output: t1.a, t1.b, count(t1.a) + Group By Key: t1.a, t1.b + Group By Key: t1.a + Group By Key: () + Sort Key: t1.b + Group By Key: t1.b + -> Sort + Output: t1.a, t1.b + Sort Key: t1.a, t1.b + -> Seq Scan on public.t1 + Output: t1.a, t1.b +(14 rows) +``` + +## Related Pages + +[Aggregate Functions](../../reference-guide/functions-and-operators/aggregate-functions.md), [SELECT](../../reference-guide/sql-syntax/SELECT.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/order-by-group-by-scenario-expansion.md b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/order-by-group-by-scenario-expansion.md new file mode 100644 index 00000000..33d96383 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/order-by-group-by-scenario-expansion.md @@ -0,0 +1,94 @@ +--- +title: ORDER BY/GROUP BY Scenario Expansion +summary: ORDER BY/GROUP BY Scenario Expansion +author: 郭欢 孙久武 +date: 2024-02-04 +--- + +# ORDER BY/GROUP BY Scenario Expansion + +## Availability + +This feature is available since MogDB 5.0.6. + +## Introduction + +This feature expands the scenarios supported by the query statement ORDER BY/GROUP BY clauses, making them compatible with some Oracle functionalities. + +## Benefits + +Enhances compatibility with Oracle, reduces the migration cost for applications, and improves product usability. + +## Description + +### ORDER BY Scenario Compatibility + +- Compatible with Oracle's use of ORDER BY, supporting data de-duplication through the DISTINCT keyword. +- Supports using numbers in ORDER BY to represent projection columns. +- Supports using constant strings in ORDER BY, which have no practical significance and do not affect the sorting result. +- Supports ORDER BY with multiple columns, expressions, and aggregate functions. + +### GROUP BY Scenario Compatibility + +- Supports containing an empty string in GROUP BY. +- Supports using integer constants in GROUP BY, which have no practical significance and do not affect the aggregation result. +- Supports using strings and expressions in GROUP BY, which have no practical significance and do not affect the sorting or aggregation results. + +The GUC parameter [behavior_compat_options](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options) has a new option `compat_sort_group_column`, which is used to control the behavior of GROUP/ORDER BY. By default, the behavior is consistent with PG. After setting this parameter, the behavior is consistent with Oracle, and constants no longer affect the GROUP/ORDER BY result set. This parameter only takes effect when the value of the [sql_compatibility](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#sql_compatibility) parameter is A. + +## Constraints + +Only supports A compatibility mode. + +## Example + +```sql +MogDB=# CREATE TABLE t1(aid INT); +CREATE TABLE +MogDB=# CREATE TABLE t2(bid INT); +CREATE TABLE + +MogDB=# EXPLAIN (VERBOSE ON, COSTS OFF) SELECT DISTINCT aid FROM t1 JOIN t2 ON aid = bid ORDER BY t1.aid; + QUERY PLAN +----------------------------------------------- + Sort + Output: t1.aid, t1.aid + Sort Key: t1.aid + -> HashAggregate + Output: t1.aid, t1.aid + Group By Key: t1.aid, t1.aid + -> Hash Join + Output: t1.aid, t1.aid + Hash Cond: (t1.aid = t2.bid) + -> Seq Scan on public.t1 + Output: t1.aid + -> Hash + Output: t2.bid + -> Seq Scan on public.t2 + Output: t2.bid +(15 rows) + + +MogDB=# set behavior_compat_options to compat_sort_group_column; +SET +MogDB=# EXPLAIN (VERBOSE ON, COSTS OFF) SELECT aid, bid, '', count(1) FROM t1 JOIN t2 ON aid = bid GROUP BY aid, bid, ''; + QUERY PLAN +-------------------------------------------------- + HashAggregate + Output: t1.aid, t2.bid, (NULL::text), count(1) + Group By Key: t1.aid, t2.bid, NULL::text + -> Hash Join + Output: t1.aid, t2.bid, NULL::text + Hash Cond: (t1.aid = t2.bid) + -> Seq Scan on public.t1 + Output: t1.aid + -> Hash + Output: t2.bid + -> Seq Scan on public.t2 + Output: t2.bid +(12 rows) +``` + +## Related Pages + +[behavior_compat_options](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options), [SELECT](../../reference-guide/sql-syntax/SELECT.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/pivot-and-unpivot.md b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/pivot-and-unpivot.md new file mode 100644 index 00000000..0e88445b --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/pivot-and-unpivot.md @@ -0,0 +1,669 @@ +--- +title: Support for PIVOT and UNPIVOT Syntax +summary: Support for PIVOT and UNPIVOT Syntax +author: Guo Huan +date: 2023-12-18 +--- + +# Support for PIVOT and UNPIVOT Syntax + +## Availability + +This feature is available since MogDB 5.0.4. + +## Introduction + +This feature is compatible with Oracle's PIVOT and UNPIVOT syntax and functionality. + +## Benefits + +Enhance MogDB compatibility with Oracle to reduce application migration costs. + +## Description + +The PIVOT clause is used to transform the values of specified fields from rows into columns, while the UNPIVOT clause is used to transform the values of specified fields from columns into rows. + +## Syntax Description + +### PIVOT + +``` +pivot_clause::= PIVOT ( aggregate_function ( expr ) [[AS] alias ][, ...] + pivot_for_clause + pivot_in_clause + ) +``` + +``` +pivot_for_clause::= FOR (column [, ...]) +``` + +``` +pivot_in_clause::= IN ({{{ expr | (expr [, ...])} [[AS] alias] [, ...]} | subquery [, ...]}) +``` + +### UNPIVOT + +``` +unpivot_clause::= UNPIVOT [ {INCLUDE | EXCLUDE} NULLS ] +({column | (column [, ...])} + pivot_for_clause + unpivot_in_clause +) +``` + +``` +pivot_for_clause::= FOR (column [, ...]) +``` + +``` +unpivot_in_clause::= IN ({column | (column [, ...])} [AS {literal | (literal [, ...])}] [ {column | (column [, ...])} [AS {literal | (literal [, ...])}]]) +``` + +## Constraints + +- PIVOT and UNPIVOT only support query statements. +- PIVOT and UNPIVOT support regular tables, temporary tables, column store tables, partitioned tables, subqueries, and WITH clauses, etc. They support multiple PIVOTs, multiple UNPIVOTs, joins, and parallel processing. +- PIVOT and UNPIVOT support CREATE VIEW, CREATE TABLE AS, and SELECT INTO statements. +- PIVOT supports hashAgg and sortAgg. +- The PIVOT clause does not support XML. +- The PIVOT IN clause does not support subqueries and ANY. +- PIVOT and UNPIVOT do not support vectorization at the moment. +- PIVOT and UNPIVOT do not support nesting. + +## Example + +### PIVOT + +```sql +# PIVOT usage example with a regular table +MogDB=# create table emp_phone(name varchar2(50), type char, phone varchar2(50)); +CREATE TABLE +MogDB=# insert into emp_phone values('aaa', '1', '1234-5678'); +INSERT 0 1 +MogDB=# insert into emp_phone values('aaa', '2', '3219-6066'); +INSERT 0 1 +MogDB=# insert into emp_phone values('aaa', '3', '5365-9583'); +INSERT 0 1 +MogDB=# insert into emp_phone values('bbb', '1', '6837-2745'); +INSERT 0 1 +MogDB=# insert into emp_phone values('bbb', '3', '2649-5820'); +INSERT 0 1 +MogDB=# insert into emp_phone values('ccc', '1', '5838-9002'); +INSERT 0 1 +MogDB=# insert into emp_phone values('ccc', '2', '2749-5580'); +INSERT 0 1 +MogDB=# insert into emp_phone values('ddd', '2', '9876-3453'); +INSERT 0 1 +MogDB=# insert into emp_phone values('aaa', '3', '5365-9599'); +INSERT 0 1 +MogDB=# insert into emp_phone values('aaa', '3', '1111-9599'); +INSERT 0 1 +MogDB=# select * from emp_phone pivot(max(phone) for type in (1 home, 2 office, 3 mobile)) order by 1; + name | home | office | mobile +------+-----------+-----------+----------- + aaa | 1234-5678 | 3219-6066 | 5365-9599 + bbb | 6837-2745 | | 2649-5820 + ccc | 5838-9002 | 2749-5580 | + ddd | | 9876-3453 | +(4 rows) + +# If there is no alias in the in clause, then only the value is used as the column name +MogDB=# select * from emp_phone pivot(max(phone) for type in (1, 2, 3)); + name | 1 | 2 | 3 +------+-----------+-----------+----------- + aaa | 1234-5678 | 3219-6066 | 5365-9599 + bbb | 6837-2745 | | 2649-5820 + ccc | 5838-9002 | 2749-5580 | + ddd | | 9876-3453 | +(4 rows) + +# Delete table +MogDB=# drop table emp_phone; +DROP TABLE + +# Example of using PIVOT partition table +MogDB=# create table emp_phone(name varchar2(50), type char, phone varchar2(50)) +partition by list(type) +( + PARTITION p1 VALUES ('1', '2'), + PARTITION p2 VALUES ('3') +); +CREATE TABLE +MogDB=# insert into emp_phone values('aaa', '1', '1234-5678'); +INSERT 0 1 +MogDB=# insert into emp_phone values('aaa', '2', '3219-6066'); +INSERT 0 1 +MogDB=# insert into emp_phone values('aaa', '3', '5365-9583'); +INSERT 0 1 +MogDB=# insert into emp_phone values('bbb', '1', '6837-2745'); +INSERT 0 1 +MogDB=# insert into emp_phone values('bbb', '3', '2649-5820'); +INSERT 0 1 +MogDB=# insert into emp_phone values('ccc', '1', '5838-9002'); +INSERT 0 1 +MogDB=# insert into emp_phone values('ccc', '2', '2749-5580'); +INSERT 0 1 +MogDB=# insert into emp_phone values('ddd', '2', '9876-3453'); +INSERT 0 1 +MogDB=# insert into emp_phone values('aaa', '3', '5365-9599'); +INSERT 0 1 +MogDB=# insert into emp_phone values('aaa', '3', '1111-9599'); +INSERT 0 1 +MogDB=# select * from emp_phone pivot(max(phone) for type in (1 home, 2 office, 3 mobile)) order by 1; + name | home | office | mobile +------+-----------+-----------+----------- + aaa | 2234-5678 | 3219-6066 | 5365-9599 + bbb | 6837-2745 | | 2649-5820 + ccc | 5838-9002 | 2749-5580 | + ddd | | 9876-3453 | +(4 rows) + +MogDB=# explain(verbose, costs off) select * from emp_phone partition(p1) pivot(max(phone) for type in (1 home, 2 office, 3 mobile)) order by 1; + QUERY PLAN + +---------------------------------------------------------------------------------------------------------------------------- + Sort + Output: emp_phone.name, (max((CASE WHEN ((emp_phone.type)::bigint = 1) THEN emp_phone.phone ELSE NULL::character varying END)::text)), (max((CASE WHEN ((emp_phone.type)::bigint = 2) THEN + emp_phone.phone ELSE NULL::character varying END)::text)), (max((CASE WHEN ((emp_phone.type)::bigint = 3) THEN emp_phone.phone ELSE NULL::character varying END)::text)) + Sort Key: emp_phone.name + -> HashAggregate + Output: emp_phone.name, max((CASE WHEN ((emp_phone.type)::bigint = 1) THEN emp_phone.phone ELSE NULL::character varying END)::text), max((CASE WHEN ((emp_phone.type)::bigint = 2) T +HEN emp_phone.phone ELSE NULL::character varying END)::text), max((CASE WHEN ((emp_phone.type)::bigint = 3) THEN emp_phone.phone ELSE NULL::character varying END)::text) + Group By Key: emp_phone.name + -> Partition Iterator + Output: emp_phone.name, emp_phone.type, emp_phone.phone + Iterations: 1 + Selected Partitions: 1 + -> Partitioned Seq Scan on public.emp_phone + Output: emp_phone.name, emp_phone.type, emp_phone.phone +(12 rows) + +# PIVOT supports join operations +MogDB=# explain (verbose) select * from emp_phone pivot(max(phone) for type in (1 as home, 2 as office, 3 as mobile)) as p1, emp_phone pivot(max(phone) for type in (1 as home, 2 as office, 3 as mobile)) as p2 where p1.name=p2.name; + + QUERY PLAN + +---------------------------------------------------------------------------------------------------------------------------- + + Hash Join (cost=47.36..54.11 rows=200 distinct=[200, 200] width=428) + Output: public.emp_phone.name, (max((CASE WHEN ((public.emp_phone.type)::bigint = 1) THEN public.emp_phone.phone ELSE NULL::character varying END)::text)), (max((CASE WHEN ((public.emp_p +hone.type)::bigint = 2) THEN public.emp_phone.phone ELSE NULL::character varying END)::text)), (max((CASE WHEN ((public.emp_phone.type)::bigint = 3) THEN public.emp_phone.phone ELSE NULL::c +haracter varying END)::text)), public.emp_phone.name, (max((CASE WHEN ((public.emp_phone.type)::bigint = 1) THEN public.emp_phone.phone ELSE NULL::character varying END)::text)), (max((CASE + WHEN ((public.emp_phone.type)::bigint = 2) THEN public.emp_phone.phone ELSE NULL::character varying END)::text)), (max((CASE WHEN ((public.emp_phone.type)::bigint = 3) THEN public.emp_phon +e.phone ELSE NULL::character varying END)::text)) + Hash Cond: ((public.emp_phone.name)::text = (public.emp_phone.name)::text) + -> HashAggregate (cost=20.43..22.43 rows=200 width=340) + Output: public.emp_phone.name, max((CASE WHEN ((public.emp_phone.type)::bigint = 1) THEN public.emp_phone.phone ELSE NULL::character varying END)::text), max((CASE WHEN ((public.em +p_phone.type)::bigint = 2) THEN public.emp_phone.phone ELSE NULL::character varying END)::text), max((CASE WHEN ((public.emp_phone.type)::bigint = 3) THEN public.emp_phone.phone ELSE NULL:: +character varying END)::text) + Group By Key: public.emp_phone.name + -> Partition Iterator (cost=0.00..12.98 rows=298 width=244) + Output: public.emp_phone.name, public.emp_phone.type, public.emp_phone.phone + Iterations: 2 + Selected Partitions: 1..2 + -> Partitioned Seq Scan on public.emp_phone (cost=0.00..12.98 rows=298 width=244) + Output: public.emp_phone.name, public.emp_phone.type, public.emp_phone.phone + -> Hash (cost=24.43..24.43 rows=200 width=214) + Output: public.emp_phone.name, (max((CASE WHEN ((public.emp_phone.type)::bigint = 1) THEN public.emp_phone.phone ELSE NULL::character varying END)::text)), (max((CASE WHEN ((public +.emp_phone.type)::bigint = 2) THEN public.emp_phone.phone ELSE NULL::character varying END)::text)), (max((CASE WHEN ((public.emp_phone.type)::bigint = 3) THEN public.emp_phone.phone ELSE N +ULL::character varying END)::text)) + -> HashAggregate (cost=20.43..22.43 rows=200 width=340) + Output: public.emp_phone.name, max((CASE WHEN ((public.emp_phone.type)::bigint = 1) THEN public.emp_phone.phone ELSE NULL::character varying END)::text), max((CASE WHEN ((pub +lic.emp_phone.type)::bigint = 2) THEN public.emp_phone.phone ELSE NULL::character varying END)::text), max((CASE WHEN ((public.emp_phone.type)::bigint = 3) THEN public.emp_phone.phone ELSE +NULL::character varying END)::text) + Group By Key: public.emp_phone.name + -> Partition Iterator (cost=0.00..12.98 rows=298 width=244) + Output: public.emp_phone.name, public.emp_phone.type, public.emp_phone.phone + Iterations: 2 + Selected Partitions: 1..2 + -> Partitioned Seq Scan on public.emp_phone (cost=0.00..12.98 rows=298 width=244) + Output: public.emp_phone.name, public.emp_phone.type, public.emp_phone.phone +(23 rows) + +# The pivot_for clause supports multiple columns. +MogDB=# create table cust_sales_category(location varchar(20),prod_category varchar(50),customer_id int,sale_amount int); +CREATE TABLE +MogDB=# insert into cust_sales_category (location,prod_category,customer_id,sale_amount) values +MogDB-# ('north','furniture',2,875), +MogDB-# ('south','electronics',2,378), +MogDB-# ('east','gardening',4,136), +MogDB-# ('west','electronics',3,236), +MogDB-# ('central','furniture',3,174), +MogDB-# ('north','electronics',1,729), +MogDB-# ('east','gardening',2,147), +MogDB-# ('west','electronics',3,200), +MogDB-# ('north','furniture',4,987), +MogDB-# ('central','gardening',4,584), +MogDB-# ('south','electronics',3,714), +MogDB-# ('east','furniture',1,192), +MogDB-# ('west','gardening',3,946), +MogDB-# ('east','electronics',4,649), +MogDB-# ('south','furniture',2,503), +MogDB-# ('north','electronics',1,399), +MogDB-# ('central','gardening',3,259), +MogDB-# ('east','electronics',3,407), +MogDB-# ('west','furniture',1,545); +INSERT 0 19 +MogDB=# SELECT * FROM (SELECT location, prod_category, customer_id, sale_amount FROM cust_sales_category) PIVOT (SUM(sale_amount) FOR (customer_id, prod_category)IN ((1, 'furniture') AS furn1, (2, 'furniture') AS furn2, (1, 'electronics') AS elec1, (2, 'electronics') AS elec2)) order by 1; + location | furn1 | furn2 | elec1 | elec2 +----------+-------+-------+-------+------- + central | | | | + east | 192 | | | + north | | 875 | 1128 | + south | | 503 | | 378 + west | 545 | | | +(5 rows) + +MogDB=# explain(verbose, analyze) SELECT * FROM (SELECT location, prod_category, customer_id, sale_amount FROM cust_sales_category) PIVOT (SUM(sale_amount) FOR (customer_id, prod_category)IN ((1, 'furniture') AS furn1, (2, 'furniture') AS furn2, (1, 'electronics') AS elec1, (2, 'electronics') AS elec2)) order by 1; + QUERY PLAN + +---------------------------------------------------------------------------------------------------------------------------- + Sort (cost=37.88..38.38 rows=200 width=90) (actual time=0.075..0.076 rows=5 loops=1) + Output: cust_sales_category.location, (sum(CASE WHEN ((cust_sales_category.customer_id = 1) AND ((cust_sales_category.prod_category)::text = 'furniture'::text)) THEN cust_sales_category.sale_amount ELSE NULL +::integer END)), (sum(CASE WHEN ((cust_sales_category.customer_id = 2) AND ((cust_sales_category.prod_category)::text = 'furniture'::text)) THEN cust_sales_category.sale_amount ELSE NULL::integer END)), (sum(CA +SE WHEN ((cust_sales_category.customer_id = 1) AND ((cust_sales_category.prod_category)::text = 'electronics'::text)) THEN cust_sales_category.sale_amount ELSE NULL::integer END)), (sum(CASE WHEN ((cust_sales_c +ategory.customer_id = 2) AND ((cust_sales_category.prod_category)::text = 'electronics'::text)) THEN cust_sales_category.sale_amount ELSE NULL::integer END)) + Sort Key: cust_sales_category.location + Sort Method: quicksort Memory: 25kB + -> HashAggregate (cost=26.23..28.23 rows=200 width=216) (actual time=0.060..0.060 rows=5 loops=1) + Output: cust_sales_category.location, sum(CASE WHEN ((cust_sales_category.customer_id = 1) AND ((cust_sales_category.prod_category)::text = 'furniture'::text)) THEN cust_sales_category.sale_amount ELSE + NULL::integer END), sum(CASE WHEN ((cust_sales_category.customer_id = 2) AND ((cust_sales_category.prod_category)::text = 'furniture'::text)) THEN cust_sales_category.sale_amount ELSE NULL::integer END), sum(C +ASE WHEN ((cust_sales_category.customer_id = 1) AND ((cust_sales_category.prod_category)::text = 'electronics'::text)) THEN cust_sales_category.sale_amount ELSE NULL::integer END), sum(CASE WHEN ((cust_sales_ca +tegory.customer_id = 2) AND ((cust_sales_category.prod_category)::text = 'electronics'::text)) THEN cust_sales_category.sale_amount ELSE NULL::integer END) + Group By Key: cust_sales_category.location + -> Seq Scan on public.cust_sales_category (cost=0.00..13.82 rows=382 width=184) (actual time=0.019..0.022 rows=19 loops=1) + Output: cust_sales_category.location, cust_sales_category.customer_id, cust_sales_category.prod_category, cust_sales_category.sale_amount + Total runtime: 0.179 ms +(10 rows) + +# pivot_for supports with clauses +MogDB=# with a as ( +MogDB(# select 'Jack' Name ,'sex' Key,'male' Value union all +MogDB(# select 'Jack' ,'country','USA' union all +MogDB(# select 'Jack' ,'hobby','sing' union all +MogDB(# select 'Jack' ,'age','19' union all +MogDB(# select 'Bob' ,'country','UK' union all +MogDB(# select 'Bob' ,'age','20' union all +MogDB(# select 'Bob' ,'weight','70' union all +MogDB(# select 'Maria' ,'sex','female' union all +MogDB(# select 'Maria' ,'weight','50') +MogDB-# select * from a pivot (max(value) for key in ('sex' sex,'country' country,'hobby' hobby,'age' age,'weight' weight)) order by 1,2; + name | sex | country | hobby | age | weight +-------+--------+---------+-------+-----+-------- + Bob | | UK | | 20 | 70 + Jack | male | USA | sing | 19 | + Maria | female | | | | 50 +(3 rows) + +# PIVOT supports multiple aggregation functions +MogDB=# create table t_demo(id int, name text, nums int); +CREATE TABLE +MogDB=# insert into t_demo values(1,'aa',1000),(2,'aa',2000),(3,'aa',4000),(4,'bb',5000),(5,'bb',3000),(6,'cc',3500),(7,'dd',4200),(8,'dd',5500); +INSERT 0 8 +MogDB=# select * from (select name, nums from t_demo) pivot (sum(nums) total,min(nums) min for name in ('aa' as apple, 'bb' as orange, 'cc' as grape, 'dd' as mango)); + apple_total | apple_min | orange_total | orange_min | grape_total | grape_min | mango_total | mango_min +-------------+-----------+--------------+------------+-------------+-----------+-------------+----------- + 7000 | 1000 | 8000 | 3000 | 3500 | 3500 | 9700 | 4200 +(1 row) + +# PIVOT supports multi-column and multi-aggregate functions +MogDB=# create table tab1(type varchar2(50), weight int, height int); +CREATE TABLE +MogDB=# insert into tab1 values('A',50,10),('A',60,12),('B',40,8),('C',30,15); +INSERT 0 4 +MogDB=# select * from tab1 pivot (count(type) as ct, sum(weight) as wt, sum(height) as ht for type in ('A' as A, 'B' as B, 'C' as C)); + a_ct | a_wt | a_ht | b_ct | b_wt | b_ht | c_ct | c_wt | c_ht +------+------+------+------+------+------+------+------+------ + 2 | 110 | 22 | 1 | 40 | 8 | 1 | 30 | 15 +(1 row) + +# PIVOT aggregation function supports expr +MogDB=# select * from emp_phone pivot(max(phone||'xxx') for type in (1 home, 2 office, 3 mobile)) order by 1; + name | home | office | mobile +------+--------------+--------------+-------------- + aaa | 2234-5678xxx | 3219-6066xxx | 5365-9599xxx + bbb | 6837-2745xxx | | 2649-5820xxx + ccc | 5838-9002xxx | 2749-5580xxx | + ddd | | 9876-3453xxx | +(4 rows) + +MogDB=# explain(verbose, analyze) select * from emp_phone pivot(max(phone||'xxx') for type in (1 home, 2 office, 3 mobile)) order by 1; + QUERY PLAN + +---------------------------------------------------------------------------------------------------------------------------- + Sort (cost=34.31..34.81 rows=200 width=214) (actual time=0.088..0.088 rows=4 loops=1) + Output: emp_phone.name, (max(CASE WHEN ((emp_phone.type)::bigint = 1) THEN ((emp_phone.phone)::text || 'xxx'::text) ELSE NULL::text END)), (max(CASE WHEN ((emp_phone.type)::bigint = 2) THEN ((emp_phone.phone +)::text || 'xxx'::text) ELSE NULL::text END)), (max(CASE WHEN ((emp_phone.type)::bigint = 3) THEN ((emp_phone.phone)::text || 'xxx'::text) ELSE NULL::text END)) + Sort Key: emp_phone.name + Sort Method: quicksort Memory: 25kB + -> HashAggregate (cost=22.67..24.67 rows=200 width=340) (actual time=0.073..0.075 rows=4 loops=1) + Output: emp_phone.name, max(CASE WHEN ((emp_phone.type)::bigint = 1) THEN ((emp_phone.phone)::text || 'xxx'::text) ELSE NULL::text END), max(CASE WHEN ((emp_phone.type)::bigint = 2) THEN ((emp_phone.ph +one)::text || 'xxx'::text) ELSE NULL::text END), max(CASE WHEN ((emp_phone.type)::bigint = 3) THEN ((emp_phone.phone)::text || 'xxx'::text) ELSE NULL::text END) + Group By Key: emp_phone.name + -> Partition Iterator (cost=0.00..12.98 rows=298 width=244) (actual time=0.012..0.022 rows=11 loops=1) + Output: emp_phone.name, emp_phone.type, emp_phone.phone + Iterations: 2 + Selected Partitions: 1..2 + -> Partitioned Seq Scan on public.emp_phone (cost=0.00..12.98 rows=298 width=244) (actual time=0.007..0.010 rows=11 loops=2) + Output: emp_phone.name, emp_phone.type, emp_phone.phone + Total runtime: 0.201 ms +(14 rows) + +# PIVOT supports create table as +MogDB=# create table test1 as select * from emp_phone pivot(max(phone) for type in (1 home, 2 office, 3 mobile)) order by 1; +INSERT 0 4 +MogDB=# select * from test1; + name | home | office | mobile +------+-----------+-----------+----------- + aaa | 2234-5678 | 3219-6066 | 5365-9599 + bbb | 6837-2745 | | 2649-5820 + ccc | 5838-9002 | 2749-5580 | + ddd | | 9876-3453 | +(4 rows) + +# PIVOT supports select into +MogDB=# select * into test2 from emp_phone pivot(max(phone) for type in (1 home, 2 office, 3 mobile)) order by 1; +INSERT 0 4 +MogDB=# select * from test2; + name | home | office | mobile +------+-----------+-----------+----------- + aaa | 2234-5678 | 3219-6066 | 5365-9599 + bbb | 6837-2745 | | 2649-5820 + ccc | 5838-9002 | 2749-5580 | + ddd | | 9876-3453 | +(4 rows) + +# PIVOT supports view +MogDB=# create view tv1 as select * from emp_phone pivot(max(phone) for type in (1 home, 2 office, 3 mobile)); +CREATE VIEW +MogDB=# \d+ tv1; + View "public.tv1" + Column | Type | Modifiers | Storage | Description +--------+-----------------------+-----------+----------+------------- + name | character varying(50) | | extended | + home | text | | extended | + office | text | | extended | + mobile | text | | extended | +View definition: + SELECT * + FROM ( SELECT emp_phone.name, + max( + CASE + WHEN emp_phone.type::bigint = 1 THEN emp_phone.phone + ELSE NULL::character varying + END::text) AS home, + max( + CASE + WHEN emp_phone.type::bigint = 2 THEN emp_phone.phone + ELSE NULL::character varying + END::text) AS office, + max( + CASE + WHEN emp_phone.type::bigint = 3 THEN emp_phone.phone + ELSE NULL::character varying + END::text) AS mobile + FROM emp_phone + GROUP BY emp_phone.name) unnamed_pivot; + +MogDB=# select * from tv1; + name | home | office | mobile +------+-----------+-----------+----------- + aaa | 2234-5678 | 3219-6066 | 5365-9599 + bbb | 6837-2745 | | 2649-5820 + ccc | 5838-9002 | 2749-5580 | + ddd | | 9876-3453 | +(4 rows) +``` + +### UNPIVOT + +```SQL +# UNPIVOT usage examples +MogDB=# create table emp_phone1(name varchar2(50), home varchar2(50), office varchar2(50), mobile varchar2(50)); +CREATE TABLE +MogDB=# insert into emp_phone1 values('aaa','1234-5678','3219-6066','5365-9583'); +INSERT 0 1 +MogDB=# insert into emp_phone1 values('bbb','5838-9002','2749-5580',''); +INSERT 0 1 +MogDB=# insert into emp_phone1 values('ccc','','9876-3453',''); +INSERT 0 1 +MogDB=# insert into emp_phone1 values('ddd','6837-2745','','2649-5820'); +INSERT 0 1 +MogDB=# insert into emp_phone1 values('eee','','','2649-5820'); +INSERT 0 1 +# In unpivot_in, the privacy conversion of the in type uses the privacy type conversion of the default list +MogDB=# select * from emp_phone1 unpivot(phone for type in (home as 1, office as 2, mobile as 3)); + name | type | phone +------+------+----------- + aaa | 1 | 1234-5678 + aaa | 2 | 3219-6066 + aaa | 3 | 5365-9583 + bbb | 1 | 5838-9002 + bbb | 2 | 2749-5580 + ccc | 2 | 9876-3453 + ddd | 1 | 6837-2745 + ddd | 3 | 2649-5820 + eee | 3 | 2649-5820 +(9 rows) + +MogDB=# explain(verbose, analyze) select * from emp_phone1 unpivot(phone for type in (home as 1, office as 2, mobile as 3)); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Unpivot (cost=0.00..12.85 rows=487 width=154) (actual time=0.010..0.012 rows=9 loops=1) + Output: unnamed_unpivot.name, unnamed_unpivot.type, unnamed_unpivot.phone + Project 1: emp_phone1.name, 1, emp_phone1.home + Project 2: emp_phone1.name, 2, emp_phone1.office + Project 3: emp_phone1.name, 3, emp_phone1.mobile + Filter 1: (emp_phone1.home IS NOT NULL) + Filter 2: (emp_phone1.office IS NOT NULL) + Filter 3: (emp_phone1.mobile IS NOT NULL) + -> Seq Scan on public.emp_phone1 (cost=0.00..11.63 rows=163 width=472) (actual time=0.007..0.007 rows=5 loops=1) + Output: emp_phone1.name, emp_phone1.home, emp_phone1.office, emp_phone1.mobile + Total runtime: 0.067 ms +(11 rows) + +MogDB=# select * from emp_phone1 unpivot include nulls (phone for type in (home as 1, office as 2, mobile as 3)); + name | type | phone +------+------+----------- + aaa | 1 | 1234-5678 + aaa | 2 | 3219-6066 + aaa | 3 | 5365-9583 + bbb | 1 | 5838-9002 + bbb | 2 | 2749-5580 + bbb | 3 | + ccc | 1 | + ccc | 2 | 9876-3453 + ccc | 3 | + ddd | 1 | 6837-2745 + ddd | 2 | + ddd | 3 | 2649-5820 + eee | 1 | + eee | 2 | + eee | 3 | 2649-5820 +(15 rows) + +# UNPIVOT supports parallelism +MogDB=# set query_dop = 4; +SET +MogDB=# set smp_thread_cost = 0; +SET +MogDB=# explain(verbose,analyze) select * from emp_phone1 unpivot include nulls (phone for type in (home as 1, office as 2, mobile as 3)); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- + Streaming(type: LOCAL GATHER dop: 1/4) (cost=0.00..23.04 rows=489 width=154) (actual time=[18.733,32.127]..[18.733,32.127], rows=15) + Output: unnamed_unpivot.name, unnamed_unpivot.type, unnamed_unpivot.phone + -> Unpivot (cost=0.00..3.21 rows=489 width=154) (actual time=[0.001,0.001]..[0.008,0.015], rows=15) + Output: unnamed_unpivot.name, unnamed_unpivot.type, unnamed_unpivot.phone + Project 1: emp_phone1.name, 1, emp_phone1.home + Project 2: emp_phone1.name, 2, emp_phone1.office + Project 3: emp_phone1.name, 3, emp_phone1.mobile + -> Seq Scan on public.emp_phone1 (cost=0.00..2.91 rows=163 width=472) (actual time=[0.000,0.000]..[0.005,0.006], rows=5) + Output: emp_phone1.name, emp_phone1.home, emp_phone1.office, emp_phone1.mobile + Total runtime: 33.168 ms +(10 rows) + +# UNPIVOT supports join +MogDB=# explain(verbose,analyze) select * from emp_phone1 unpivot(phone for type in (home as 1, office as 2, mobile as 3)) as p1, emp_phone1 unpivot(phone for type in (home as 1, office as 2, mobile as 3)) as p2 where p1.name=p2.name; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Hash Join (cost=18.94..46.08 rows=1186 distinct=[200, 200] width=308) (actual time=0.162..0.170 rows=19 loops=1) + Output: p1.name, p1.type, p1.phone, p2.name, p2.type, p2.phone + Hash Cond: ((p1.name)::text = (p2.name)::text) + -> Unpivot (cost=0.00..12.85 rows=487 width=154) (actual time=0.009..0.013 rows=9 loops=1) + Output: p1.name, p1.type, p1.phone + Project 1: public.emp_phone1.name, 1, public.emp_phone1.home + Project 2: public.emp_phone1.name, 2, public.emp_phone1.office + Project 3: public.emp_phone1.name, 3, public.emp_phone1.mobile + Filter 1: (public.emp_phone1.home IS NOT NULL) + Filter 2: (public.emp_phone1.office IS NOT NULL) + Filter 3: (public.emp_phone1.mobile IS NOT NULL) + -> Seq Scan on public.emp_phone1 (cost=0.00..11.63 rows=163 width=472) (actual time=0.006..0.006 rows=5 loops=1) + Output: public.emp_phone1.name, public.emp_phone1.home, public.emp_phone1.office, public.emp_phone1.mobile + -> Hash (cost=17.72..17.72 rows=487 width=154) (actual time=0.024..0.024 rows=9 loops=1) + Output: p2.name, p2.type, p2.phone + Buckets: 32768 Batches: 1 Memory Usage: 257kB + -> Unpivot (cost=0.00..12.85 rows=487 width=154) (actual time=0.001..0.014 rows=9 loops=1) + Output: p2.name, p2.type, p2.phone + Project 1: public.emp_phone1.name, 1, public.emp_phone1.home + Project 2: public.emp_phone1.name, 2, public.emp_phone1.office + Project 3: public.emp_phone1.name, 3, public.emp_phone1.mobile + Filter 1: (public.emp_phone1.home IS NOT NULL) + Filter 2: (public.emp_phone1.office IS NOT NULL) + Filter 3: (public.emp_phone1.mobile IS NOT NULL) + -> Seq Scan on public.emp_phone1 (cost=0.00..11.63 rows=163 width=472) (actual time=0.001..0.013 rows=5 loops=1) + Output: public.emp_phone1.name, public.emp_phone1.home, public.emp_phone1.office, public.emp_phone1.mobile + Total runtime: 0.290 ms +(27 rows) + +MogDB=# explain(verbose,analyze) select * from emp_phone1 unpivot(phone for type in (home as 1, office as 2, mobile as 3)) as p1, emp_phone as p2 where p1.name=p2.name; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------- + Hash Join (cost=16.71..38.64 rows=726 distinct=[200, 200] width=398) (actual time=0.187..0.194 rows=26 loops=1) + Output: p1.name, p1.type, p1.phone, p2.name, p2.type, p2.phone + Hash Cond: ((p1.name)::text = (p2.name)::text) + -> Unpivot (cost=0.00..12.85 rows=487 width=154) (actual time=0.009..0.011 rows=9 loops=1) + Output: p1.name, p1.type, p1.phone + Project 1: emp_phone1.name, 1, emp_phone1.home + Project 2: emp_phone1.name, 2, emp_phone1.office + Project 3: emp_phone1.name, 3, emp_phone1.mobile + Filter 1: (emp_phone1.home IS NOT NULL) + Filter 2: (emp_phone1.office IS NOT NULL) + Filter 3: (emp_phone1.mobile IS NOT NULL) + -> Seq Scan on public.emp_phone1 (cost=0.00..11.63 rows=163 width=472) (actual time=0.005..0.005 rows=5 loops=1) + Output: emp_phone1.name, emp_phone1.home, emp_phone1.office, emp_phone1.mobile + -> Hash (cost=12.98..12.98 rows=298 width=244) (actual time=0.040..0.040 rows=11 loops=1) + Output: p2.name, p2.type, p2.phone + Buckets: 32768 Batches: 1 Memory Usage: 257kB + -> Partition Iterator (cost=0.00..12.98 rows=298 width=244) (actual time=0.025..0.033 rows=11 loops=1) + Output: p2.name, p2.type, p2.phone + Iterations: 2 + Selected Partitions: 1..2 + -> Partitioned Seq Scan on public.emp_phone p2 (cost=0.00..12.98 rows=298 width=244) (actual time=0.005..0.005 rows=11 loops=2) + Output: p2.name, p2.type, p2.phone + Total runtime: 0.299 ms +(23 rows) + +# UNPIVOT supports multiple columns +MogDB=# create table emp_phone2(name varchar2(50), home varchar2(50), office varchar2(50), mobile varchar2(50), extra varchar2(50)); +CREATE TABLE +MogDB=# insert into emp_phone2 values('aaa','1234-5678','3219-6066','5365-9583','11111'); +INSERT 0 1 +MogDB=# insert into emp_phone2 values('bbb','5838-9002','2749-5580','','22222'); +INSERT 0 1 +MogDB=# insert into emp_phone2 values('ccc','','9876-3453','','333333'); +INSERT 0 1 +MogDB=# insert into emp_phone2 values('ddd','6837-2745','','2649-5820','44444'); +INSERT 0 1 +MogDB=# insert into emp_phone2 values('eee','','','2649-5820','44444'); +INSERT 0 1 +MogDB=# select * from emp_phone2 unpivot((phone,phone1) for (type1,type2) in ((home,office) as (1,11), (mobile,extra) as (3,33))); + name | type1 | type2 | phone | phone1 +------+-------+-------+-----------+----------- + aaa | 1 | 11 | 1234-5678 | 3219-6066 + aaa | 3 | 33 | 5365-9583 | 11111 + bbb | 1 | 11 | 5838-9002 | 2749-5580 + bbb | 3 | 33 | | 22222 + ccc | 1 | 11 | | 9876-3453 + ccc | 3 | 33 | | 333333 + ddd | 1 | 11 | 6837-2745 | + ddd | 3 | 33 | 2649-5820 | 44444 + eee | 3 | 33 | 2649-5820 | 44444 +(9 rows) + +MogDB=# explain(verbose,analyze) select * from emp_phone2 unpivot((phone,phone1) for (type1,type2) in ((home,office) as (1,11), (mobile,extra) as (3,33))); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Unpivot (cost=0.00..11.98 rows=264 width=190) (actual time=0.009..0.012 rows=9 loops=1) + Output: unnamed_unpivot.name, unnamed_unpivot.type1, unnamed_unpivot.type2, unnamed_unpivot.phone, unnamed_unpivot.phone1 + Project 1: emp_phone2.name, 1, 11, emp_phone2.home, emp_phone2.office + Project 2: emp_phone2.name, 3, 33, emp_phone2.mobile, emp_phone2.extra + Filter 1: ((emp_phone2.home IS NOT NULL) OR (emp_phone2.office IS NOT NULL)) + Filter 2: ((emp_phone2.mobile IS NOT NULL) OR (emp_phone2.extra IS NOT NULL)) + -> Seq Scan on public.emp_phone2 (cost=0.00..11.32 rows=132 width=590) (actual time=0.006..0.007 rows=5 loops=1) + Output: emp_phone2.name, emp_phone2.home, emp_phone2.office, emp_phone2.mobile, emp_phone2.extra + Total runtime: 0.078 ms +(9 rows) + +# UNPIVOT supports the with clause +MogDB=# with t as (select 0 a,1 b,2 c,3 d) select * from t unpivot (val for col in (A,B,C,D)); + col | val +-----+----- + a | 0 + b | 1 + c | 2 + d | 3 +(4 rows) + +MogDB=# explain(verbose,analyze) with t as (select 0 a,1 b,2 c,3 d) select * from t unpivot (val for col in (A,B,C,D)); + QUERY PLAN +------------------------------------------------------------------------------------------ + Unpivot (cost=0.00..0.02 rows=1 width=36) (actual time=0.004..0.005 rows=4 loops=1) + Output: unnamed_unpivot.col, unnamed_unpivot.val + Project 1: 'a'::text, (0) + Project 2: 'b'::text, (1) + Project 3: 'c'::text, (2) + Project 4: 'd'::text, (3) + Filter 1: ((0) IS NOT NULL) + Filter 2: ((1) IS NOT NULL) + Filter 3: ((2) IS NOT NULL) + Filter 4: ((3) IS NOT NULL) + -> Result (cost=0.00..0.01 rows=1 width=0) (actual time=0.002..0.002 rows=1 loops=1) + Output: 0, 1, 2, 3 + Total runtime: 0.047 ms +(13 rows) + +# UNPIVOT supports view +MogDB=# create view tv2 as select * from emp_phone2 unpivot include nulls((phone,phone1) for (type1,type2) in ((home,office) as (1,11), (mobile,extra) as (3,33))); +CREATE VIEW +MogDB=# \d+ tv2; + View "public.tv2" + Column | Type | Modifiers | Storage | Description +--------+-----------------------+-----------+----------+------------- + name | character varying(50) | | extended | + type1 | integer | | plain | + type2 | integer | | plain | + phone | character varying | | extended | + phone1 | character varying | | extended | +View definition: + SELECT * + FROM emp_phone2 UNPIVOT INCLUDE NULLS ((phone,phone1) FOR (type1,type2) IN ((home,office) AS (1,11),(mobile,extra) AS (3,33))); +MogDB=# select * from tv2; + name | type1 | type2 | phone | phone1 +------+-------+-------+-----------+----------- + aaa | 1 | 11 | 1234-5678 | 3219-6066 + aaa | 3 | 33 | 5365-9583 | 11111 + bbb | 1 | 11 | 5838-9002 | 2749-5580 + bbb | 3 | 33 | | 22222 + ccc | 1 | 11 | | 9876-3453 + ccc | 3 | 33 | | 333333 + ddd | 1 | 11 | 6837-2745 | + ddd | 3 | 33 | 2649-5820 | 44444 + eee | 1 | 11 | | + eee | 3 | 33 | 2649-5820 | 44444 +(10 rows) +``` + +## Related Pages + +[SELECT](../../reference-guide/sql-syntax/SELECT.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/stored-procedure-out-parameters-in-pbe-mode.md b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/stored-procedure-out-parameters-in-pbe-mode.md new file mode 100644 index 00000000..e4d0a812 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/compatibility/stored-procedure-out-parameters-in-pbe-mode.md @@ -0,0 +1,179 @@ +--- +title: Support for Stored Procedure OUT Parameters in PBE Mode +summary: When using PBE connection methods such as JDBC, support is provided for returning values from anonymous blocks with OUT/INOUT type parameters in stored procedures to the corresponding driver side. The use of this feature requires the `enable_outparams_override` parameter to be enabled. +author: 任建成 韩旭 +date: 2024-06-24 +--- + +# Support for Stored Procedure OUT Parameters in PBE Mode + +## Availability + +This feature is available since MogDB 5.0.8. + +## Introduction + +When using JDBC and other PBE connection methods, support is provided for returning values from the left side of expressions in anonymous blocks (including OUT/INOUT type data in stored procedure parameter lists) to the corresponding driver side. To use this feature, set the `proc_outparam_override` option in the `behavior_compat_options` parameter. The method is as follows: + +```sql +set behavior_compat_options = 'proc_outparam_override'; +``` + +Or set this parameter in the `postgres.conf` file and restart the database. + +## Benefits + +Improves compatibility, aligning with Oracle's corresponding usage. Enhances usability, facilitating the writing of JDBC and other programs to connect to and operate databases. + +## Description + +When using JDBC and other PBE connection methods, support is provided for returning values from the left side of expressions in anonymous blocks (including OUT/INOUT type data in stored procedure parameter lists) to the corresponding driver side. Taking JDBC as an example, users may wish to return the OUT parameters of expressions to the Java program side when writing and executing anonymous block programs, and then process these data on the Java program side. Replace the corresponding position of the variable in the anonymous block with “?”, and then register the data type correctly in the subsequent program, and you can use the corresponding `get` method to retrieve the data. + +The supported data range includes basic data types supported by JDBC mapping, such as `tableof`, `object`, `array`, `refcursor`, `composite`, etc. + +Reference for JDBC default data type mapping relationship: + +get:[java.sql.CallableStatement](../../developer-guide/dev/2-development-based-on-jdbc/15-JDBC/2-java-sql-CallableStatement.md) + +set:[java.sql.PreparedStatement](../../developer-guide/dev/2-development-based-on-jdbc/15-JDBC/5-java-sql-PreparedStatement.md) + +Supported scenarios include direct execution of anonymous blocks, calling functions, stored procedures, and packages within anonymous blocks, as well as internal calls to `immutable execute`, `select into`, `bulk into`, `execute immediate`, `fetch into`, and other scenarios within anonymous blocks. + +## Constraints + +1. This feature can only be used when the database compatibility mode is Oracle (i.e., not specified when creating the DB, or DBCOMPATIBILITY='A'), and cannot be used in other database compatibility modes. + +2. The JDBC version should be greater than or equal to 5.0.0.4, and the JDBC end should be correctly connected to the database. + +3. It is only applicable to the left side of expressions (including OUT/INOUT type data in the stored procedure parameter list). + +4. In the anonymous block executed by the user, variables should not be named with a “$” prefix, such as “$1”, as this may cause inaccurate data issues. + +5. The `immutable execute` scenario does not support the direct use of anonymous block OUT parameters, as the “?” in the string is not calculated as a variable that needs to be replaced, but it can be used in conjunction with `using`. + +## Example + +```java +public static void test_case_0001_output_mutil(Connection conn) throws Exception { + String baseSQLStrings = "set behavior_compat_options='proc_outparam_override';"; + String baseSQLString = "DECLARE" + + "baselen integer:= 199;" + + "BEGIN" + + "? := baselen;" + + "? := baselen*2;" + + "END;"; + try { + CallableStatement pstmt = conn.prepareCall(baseSQLStrings); + pstmt.execute(); + pstmt.close(); + + pstmt = conn.prepareCall(baseSQLString); + System.out.println("Prepare param out SQL succeed!"); + + pstmt.registerOutParameter(1, Types.INTEGER); + System.out.println("Register succeed!"); + + pstmt.registerOutParameter(2, Types.INTEGER); + System.out.println("Register succeed!"); + + pstmt.execute(); + System.out.println("Execute succeed!"); + + + if (199 == pstmt.getInt(1)) { + System.out.println("answer true"); + } else { + System.out.println("answer false"); + } + + if (398 == pstmt.getInt(2)) { + System.out.println("answer true"); + } else { + System.out.println("answer false"); + } + + System.out.println("Get succeed!"); + + pstmt.close(); + System.out.println("Run succeed!"); + } + catch (Exception e) { + String exceptionStr = e.toString(); + System.out.println(exceptionStr); + } +} +``` + +In the above example, we use the anonymous block OUT parameter feature in `baseSQLString`, which involves the return of two expression left values. The results can be obtained and processed on the Java side. + +```java +public static void t02_base_test(Connection conn) throws Exception { + String createPackageHead = + "CREATE OR REPLACE PACKAGE testuser.pck2 AS" + + " PROCEDURE get_IN_OUT(output1 OUT varchar(26), output2 OUT bool, output3 OUT TINYINT, output4 OUT smallint, ret1 IN OUT DOUBLE PRECISION);" + + "END pck2;"; + + String createPackageBody = + "CREATE OR REPLACE PACKAGE BODY testuser.pck2 AS" + + " PROCEDURE get_IN_OUT(output1 OUT varchar(26), output2 OUT bool, output3 OUT TINYINT, output4 OUT smallint, ret1 IN OUT DOUBLE PRECISION) IS" + + " BEGIN" + + " output1 := 'abcdefghigklmnopqrstuvwxyz';" + + " output2 := false;" + + " output3 := 2;" + + " output4 := 12;" + + " ret1 := ret1 + 10;" + + " END get_IN_OUT;" + + "END pck2;"; + + String baseSQLString = + "BEGIN" + + " testuser.pck2.get_IN_OUT(?, ?, ?, ?, ?);" + + "END;"; + try { + CallableStatement pstmt = conn.prepareCall(createPackageHead); + pstmt.execute(); + pstmt.close(); + System.out.println("HEAD Prepare succeed!"); + + pstmt = conn.prepareCall(createPackageBody); + pstmt.execute(); + pstmt.close(); + System.out.println("BODY Prepare succeed!"); + + pstmt = conn.prepareCall(baseSQLString); + + pstmt.setDouble(5, 99.99999999); + + pstmt.registerOutParameter(1, Types.VARCHAR); + pstmt.registerOutParameter(2, Types.BOOLEAN); + pstmt.registerOutParameter(3, Types.TINYINT); + pstmt.registerOutParameter(4, Types.SMALLINT); + pstmt.registerOutParameter(5, Types.DOUBLE); + System.out.println("Register succeed!"); + + pstmt.execute(); + System.out.println("Execute succeed!"); + + System.out.println(pstmt.getString(1)); + System.out.println(pstmt.getBoolean(2)); + System.out.println(pstmt.getByte(3)); + System.out.println(pstmt.getShort(4)); + System.out.println(pstmt.getDouble(5)); + + System.out.println("Get succeed!"); + + pstmt.close(); + System.out.println("Run succeed!"); + } + catch (Exception e) { + String exceptionStr = e.toString(); + System.out.println(exceptionStr); + } +} +``` + +In the above example, we use the anonymous block OUT parameter feature in `baseSQLString`, which involves the return of five basic types and the calling of a package, as well as the use of OUT and INOUT types. The results can be obtained and processed on the Java side. + +## Related Pages + +[behavior_compat_options](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options), [Development Based on JDBC](../../developer-guide/dev/2-development-based-on-jdbc/development-based-on-jdbc.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/1-access-control-model.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/1-access-control-model.md index a7ffdf43..179c7926 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/1-access-control-model.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/1-access-control-model.md @@ -1,46 +1,46 @@ ---- -title: Access Control Model -summary: Access Control Model -author: Guo Huan -date: 2022-05-07 ---- - -# Access Control Model - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -The access control model can be used to manage users' access permissions and grant them the minimum permissions required for completing a task. - -## Benefits - -You can create users and grant permissions to them as needed to minimize risks. - -## Description - -The database provides a role-based access control model and an access control model based on the separation of duties. In the role-based access control model, database roles are classified into system administrator, monitoring administrator, O&M administrator, security policy administrator, and common user. The security administrator creates roles or user groups and grant permissions to roles. The monitoring administrator views the monitoring views or functions in **dbe_perf** mode. The O&M administrator uses the Roach tool to back up and restore the database. The security policy administrator creates resource labels, anonymization policies, and unified audit policies. A user who is assigned a role has the role's permissions. - -In the access control model based on the separation of duties, database roles are classified into system administrator, security administrator, audit administrator, monitoring administrator, O&M administrator, security policy administrator, and common user. The security administrator creates users, the system administrator grants permissions to users, and the audit administrator audits all user behavior. - -By default, the role-based access control model is used. To switch to another mode, set the GUC parameter **enableSeparationOfDuty** to **on**. - -## Enhancements - -None - -## Constraints - -- The permissions of the system administrator are controlled by the GUC parameter **enableSeparationOfDuty**. - -- The database needs to be restarted when the separation of duties is enabled, disabled or switched. In addition, improper user permissions in the new model cannot be automatically identified. The database administrator needs to manually identify and rectify the fault. - -## Dependencies - -None - -## Related Pages - +--- +title: Access Control Model +summary: Access Control Model +author: Guo Huan +date: 2022-05-07 +--- + +# Access Control Model + +## Availability + +This feature is available as of MogDB 1.1.0. + +## Introduction + +The access control model can be used to manage users' access permissions and grant them the minimum permissions required for completing a task. + +## Benefits + +You can create users and grant permissions to them as needed to minimize risks. + +## Description + +The database provides a role-based access control model and an access control model based on the separation of duties. In the role-based access control model, database roles are classified into system administrator, monitoring administrator, O&M administrator, security policy administrator, and common user. The security administrator creates roles or user groups and grant permissions to roles. The monitoring administrator views the monitoring views or functions in **dbe_perf** mode. The O&M administrator uses the Roach tool to back up and restore the database. The security policy administrator creates resource labels, anonymization policies, and unified audit policies. A user who is assigned a role has the role's permissions. + +In the access control model based on the separation of duties, database roles are classified into system administrator, security administrator, audit administrator, monitoring administrator, O&M administrator, security policy administrator, and common user. The security administrator creates users, the system administrator grants permissions to users, and the audit administrator audits all user behavior. + +By default, the role-based access control model is used. To switch to another mode, set the GUC parameter **enableSeparationOfDuty** to **on**. + +## Enhancements + +None + +## Constraints + +- The permissions of the system administrator are controlled by the GUC parameter **enableSeparationOfDuty**. + +- The database needs to be restarted when the separation of duties is enabled, disabled or switched. In addition, improper user permissions in the new model cannot be automatically identified. The database administrator needs to manually identify and rectify the fault. + +## Dependencies + +None + +## Related Pages + [Operation Audit](../../reference-guide/guc-parameters/auditing/operation-audit.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/10-row-level-access-control.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/10-row-level-access-control.md index 57407ef9..9f2fe222 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/10-row-level-access-control.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/10-row-level-access-control.md @@ -1,47 +1,47 @@ ---- -title: Row-Level Access Control -summary: Row-Level Access Control -author: Guo Huan -date: 2022-05-07 ---- - -# Row-Level Access Control - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -The row-level access control feature enables database access control to be accurate to each row of data tables. When different users perform the same SQL query operation, the read results may be different. - -## Benefits - -When different users perform the same SQL query operation, the read results may be different. - -## Description - -You can create a row-level access control policy for a data table. The policy defines an expression that takes effect only for specific database users and SQL operations. When a database user accesses the data table, if a SQL statement meets the specified row-level access control policy of the data table, the expressions that meet the specified condition will be combined by using **AND** or **OR** based on the attribute type (**PERMISSIVE** or **RESTRICTIVE**) and applied to the execution plan in the query optimization phase. - -Row-level access control is used to control the visibility of row-level data in tables. By predefining filters for data tables, the expressions that meet the specified condition can be applied to execution plans in the query optimization phase, which will affect the final execution result. Currently, the SQL statements that can be affected include **SELECT**, **UPDATE**, and **DELETE**. - -## Enhancements - -None - -## Constraints - -- Row-level access control policies can be applied only to **SELECT**, **UPDATE**, and **DELETE** operations and cannot be applied to **INSERT** and **MERGE** operations. -- Row-level access control policies can be defined for row-store tables, row-store partitioned tables, column-store tables, column-store partitioned tables, replication tables, unlogged tables, and hash tables. Row-level access control policies cannot be defined for foreign tables, and temporary tables. -- Row-level access control policies cannot be defined for views. -- A maximum of 100 row-level access control policies can be defined for a table. -- Initial users and system administrators are not affected by row-level access control policies. -- If a dynamic data anonymization policy is configured for a table that has the row-level access control policies defined, grant the trigger permission of the table to other users with caution to prevent other users from using the trigger to bypass the anonymization policy. - -## Dependencies - -None - -## Related Pages - +--- +title: Row-Level Access Control +summary: Row-Level Access Control +author: Guo Huan +date: 2022-05-07 +--- + +# Row-Level Access Control + +## Availability + +This feature is available as of MogDB 1.1.0. + +## Introduction + +The row-level access control feature enables database access control to be accurate to each row of data tables. When different users perform the same SQL query operation, the read results may be different. + +## Benefits + +When different users perform the same SQL query operation, the read results may be different. + +## Description + +You can create a row-level access control policy for a data table. The policy defines an expression that takes effect only for specific database users and SQL operations. When a database user accesses the data table, if a SQL statement meets the specified row-level access control policy of the data table, the expressions that meet the specified condition will be combined by using **AND** or **OR** based on the attribute type (**PERMISSIVE** or **RESTRICTIVE**) and applied to the execution plan in the query optimization phase. + +Row-level access control is used to control the visibility of row-level data in tables. By predefining filters for data tables, the expressions that meet the specified condition can be applied to execution plans in the query optimization phase, which will affect the final execution result. Currently, the SQL statements that can be affected include **SELECT**, **UPDATE**, and **DELETE**. + +## Enhancements + +None + +## Constraints + +- Row-level access control policies can be applied only to **SELECT**, **UPDATE**, and **DELETE** operations and cannot be applied to **INSERT** and **MERGE** operations. +- Row-level access control policies can be defined for row-store tables, row-store partitioned tables, column-store tables, column-store partitioned tables, replication tables, unlogged tables, and hash tables. Row-level access control policies cannot be defined for foreign tables, and temporary tables. +- Row-level access control policies cannot be defined for views. +- A maximum of 100 row-level access control policies can be defined for a table. +- Initial users and system administrators are not affected by row-level access control policies. +- If a dynamic data anonymization policy is configured for a table that has the row-level access control policies defined, grant the trigger permission of the table to other users with caution to prevent other users from using the trigger to bypass the anonymization policy. + +## Dependencies + +None + +## Related Pages + [Row-Level Access Control](../../security-guide/security/2-managing-users-and-their-permissions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/11-password-strength-verification.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/11-password-strength-verification.md index c1a742ce..0e8ce16e 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/11-password-strength-verification.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/11-password-strength-verification.md @@ -1,75 +1,75 @@ ---- -title: Password Strength Verification -summary: Password Strength Verification -author: Guo Huan -date: 2022-05-07 ---- - -# Password Strength Verification - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -Verifies the password strength when users access the database. - -## Benefits - -Users cannot set passwords with low strength to enhance customer data security. - -## Description - -You need to specify a password when initializing a database, creating a user, or modifying a user. The password must meet the strength requirements. Otherwise, the system prompts you to enter the password again. Password complexity requirements: - -- Minimum number of uppercase letters (A-Z) (**password_min_uppercase**) -- Minimum number of lowercase letters (a-z) (**password_min_lowercase**) -- Minimum number of digits (0-9) (**password_min_digital**) -- Minimum number of special characters (**password_min_special**) -- Minimum password length (**password_min_length**) -- Maximum password length (**password_max_length**) -- A password must contain at least three types of the characters (uppercase letters, lowercase letters, digits, and special characters). -- A password is case insensitive and cannot be the username or the username spelled backwards. -- A new password cannot be the current password and the current password spelled backwards. -- It must be a strong password. - - Weak passwords are weak passwords that are easy to crack. The definition of weak passwords may vary with users or user groups. Users can define their own weak passwords. - -If parameter **password_policy** is set to **1**, the default password complexity rule is used to check passwords. - -Passwords in the weak password dictionary are stored in the **gs_global_config** system catalog (the record whose name field is **weak_password** is the stored weak password). When a user is created or modified, the password set by the user is compared with the password stored in the weak password dictionary. If the password is matched, a message is displayed, indicating that the password is weak and the password fails to be set. - -The weak password dictionary is empty by default. You can add or delete weak passwords using the following syntax: - -```sql -CREATE WEAK PASSWORD DICTIONARY WITH VALUES ('password1'), ('password2'); - -DROP WEAK PASSWORD DICTIONARY; -``` - -In the preceding statement, **password1** and **password2** are weak passwords prepared by users. After the statement is executed successfully, the passwords are saved to the weak password system catalog. - -When a user attempts to run the CREATE WEAK PASSWORD DICTIONARY statement to insert a weak password that already exists in the table, only one weak password is retained in the table. - -The DROP WEAK PASSWORD DICTIONARY statement clears weak passwords in the entire system catalog. - -The gs_global_config system catalog does not have a unique index. You are not advised to use the COPY FROM statement to copy the same data to the gs_global_config system catalog. - -To audit weak password operations, set the third bit of the value of the **audit_system_object** parameter to **1**. - -## Enhancements - -In MogDB, the weak password dictionary function is implemented. - -## Constraints - -- Initial users, system administrators, and security administrators can view, add, and delete weak password dictionaries. -- Common users can view but cannot add or delete weak password dictionaries. - -## Dependencies - -None - -## Related Pages - -[CREATE WEAK PASSWORD DICTIONARY](../../reference-guide/sql-syntax/CREATE-WEAK-PASSWORD-DICTIONARY.md) +--- +title: Password Strength Verification +summary: Password Strength Verification +author: Guo Huan +date: 2022-05-07 +--- + +# Password Strength Verification + +## Availability + +This feature is available as of MogDB 1.1.0. + +## Introduction + +Verifies the password strength when users access the database. + +## Benefits + +Users cannot set passwords with low strength to enhance customer data security. + +## Description + +You need to specify a password when initializing a database, creating a user, or modifying a user. The password must meet the strength requirements. Otherwise, the system prompts you to enter the password again. Password complexity requirements: + +- Minimum number of uppercase letters (A-Z) (**password_min_uppercase**) +- Minimum number of lowercase letters (a-z) (**password_min_lowercase**) +- Minimum number of digits (0-9) (**password_min_digital**) +- Minimum number of special characters (**password_min_special**) +- Minimum password length (**password_min_length**) +- Maximum password length (**password_max_length**) +- A password must contain at least three types of the characters (uppercase letters, lowercase letters, digits, and special characters). +- A password is case insensitive and cannot be the username or the username spelled backwards. +- A new password cannot be the current password and the current password spelled backwards. +- It must be a strong password. + - Weak passwords are weak passwords that are easy to crack. The definition of weak passwords may vary with users or user groups. Users can define their own weak passwords. + +If parameter **password_policy** is set to **1**, the default password complexity rule is used to check passwords. + +Passwords in the weak password dictionary are stored in the **gs_global_config** system catalog (the record whose name field is **weak_password** is the stored weak password). When a user is created or modified, the password set by the user is compared with the password stored in the weak password dictionary. If the password is matched, a message is displayed, indicating that the password is weak and the password fails to be set. + +The weak password dictionary is empty by default. You can add or delete weak passwords using the following syntax: + +```sql +CREATE WEAK PASSWORD DICTIONARY WITH VALUES ('password1'), ('password2'); + +DROP WEAK PASSWORD DICTIONARY; +``` + +In the preceding statement, **password1** and **password2** are weak passwords prepared by users. After the statement is executed successfully, the passwords are saved to the weak password system catalog. + +When a user attempts to run the CREATE WEAK PASSWORD DICTIONARY statement to insert a weak password that already exists in the table, only one weak password is retained in the table. + +The DROP WEAK PASSWORD DICTIONARY statement clears weak passwords in the entire system catalog. + +The gs_global_config system catalog does not have a unique index. You are not advised to use the COPY FROM statement to copy the same data to the gs_global_config system catalog. + +To audit weak password operations, set the third bit of the value of the **audit_system_object** parameter to **1**. + +## Enhancements + +In MogDB, the weak password dictionary function is implemented. + +## Constraints + +- Initial users, system administrators, and security administrators can view, add, and delete weak password dictionaries. +- Common users can view but cannot add or delete weak password dictionaries. + +## Dependencies + +None + +## Related Pages + +[CREATE WEAK PASSWORD DICTIONARY](../../reference-guide/sql-syntax/CREATE-WEAK-PASSWORD-DICTIONARY.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/12-equality-query-in-a-fully-encrypted-database.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/12-equality-query-in-a-fully-encrypted-database.md index b1916cd8..cf14afe5 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/12-equality-query-in-a-fully-encrypted-database.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/12-equality-query-in-a-fully-encrypted-database.md @@ -1,94 +1,94 @@ ---- -title: Equality Query in a Fully-encrypted Database -summary: Equality Query in a Fully-encrypted Database -author: Guo Huan -date: 2022-05-07 ---- - -# Equality Query in a Fully-encrypted Database - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The encrypted database aims to protect privacy throughout the data lifecycle. In this way, data is always in ciphertext during transmission, computing, and storage regardless of the service scenario and environment. After the data owner encrypts data on the client and sends the encrypted data to the server, no attacker can obtain valuable information even if the attacker steals user data by exploiting system vulnerabilities. In this way, data privacy is protected. - -## Benefits - -The entire service data flow is in ciphertext during data processing, so the following can be implemented by using a fully-encrypted database: - -1. Protect data privacy and security throughout the lifecycle on the cloud. Attackers cannot obtain valid information from the database server regardless of the data status. -2. Help cloud service providers obtain third-party trust. Users, including service administrators and O&M administrators in enterprise service scenarios and application developers in consumer cloud services, can keep keys by themselves so that users with high permissions cannot obtain valid data. -3. Enable cloud databases to better comply with personal privacy protection laws and regulations with the help of the fully-encrypted database. - -## Description - -From the perspective of users, the encrypted equality query functions are divided into three parts, which are implemented by the newly added KeyTool and the enhanced MogDB gsql client. - -First, this feature provides the client key management function. Users can use KeyTool to generate, destroy, and update CMKs, and import and export keys. With the import and export functions of KeyTool, CMKs can be transmitted between different clients. In addition, the KeyTool implements key management on a single client. By configuring management files, you can store and update keys. - -In addition, this feature provides the key creation and encrypted table creation functions. The SQL syntax CREATE CLINET MASTER KEY and CREATE COLUMN ENCRYPTION KEY are added to record and manage CMK and CEK metadata in the database. The CMK and CEK information is recorded in the new system catalog. The CREATE TABLE syntax is extended to specify a column encryption key and encryption algorithm for each sensitive information column in a table, facilitating subsequent ciphertext data storage. - -This feature supports the encrypted equality query function, which is the core of the entire feature. Although users are unaware of the ciphertext query, the query of sensitive data is restricted by the specifications of the current encrypted equality query. - -From the overall perspective, this feature is used to store and manage data based on sensitive data protection requirements and implement query tasks based on ciphertext data. - -## Enhancements - -None. - -## Constraints - -- Data is encrypted at the column level, and encryption policies cannot be differentiated by row level. -- Except the RENAME operation, the ALTER TABLE syntax cannot be used to change columns in an encrypted table (including the conversion between encrypted and unencrypted columns). The ADD and DROP operations can be used to add and delete encrypted columns, respectively. -- The CHECK(COLUMN IS NOT NULL) syntax can be used, but most check constraint syntax cannot be set for encrypted columns. -- When **support_extended_features** is set to **off**, primary key and unique cannot be used for encrypted columns. When **support_extended_features** is set to **on**, only primary key and unique can be used for encrypted columns. -- Different data types cannot be implicitly converted. -- The set operation cannot be performed between ciphertexts of different data types. -- Range partitioning cannot be created for encrypted columns. -- Only the repeat and empty_blob() functions can be used to encrypt columns. -- The current version supports only gsql and JDBC (deployed on a Linux OS) clients. Other clients such as ODBC do not support encrypted equality query. -- Data can only be imported to the encrypted table by running **copy from stdin**, **\copy**, or **insert into values (…)** on the client. -- Copying an encrypted table to a file is not supported. -- The system does not support encrypted queries, such as sorting, range query, and fuzzy query, except equality query. -- The encrypted syntax of stored procedures for some functions is supported. For details about the constraints, see “Encrypted Functions and Stored Procedures” in the *Developer Guide*. -- Non-encrypted table data cannot be inserted into encrypted table data using the **INSERT INTO… SELECT…** or **MERGE INTO** syntax. -- For a request in connection state, the CEK information change on the server can be detected only after the cache update operation is triggered (for example, the user is changed or the encrypted column fails to be decrypted) and the connection is re-established. -- Encrypted equality query is not supported on columns encrypted using the random encryption algorithm. -- An error is reported if the two attribute conditions used for comparison in the encrypted equality query use different data encryption keys. -- Encrypted equality query is not supported in time series tables and foreign tables. The ustore storage engine is not supported. -- If the database service configuration (such as the pg_settings system catalog, permission, key, and encrypted column) is changed, you need to re-establish a JDBC connection to make the configuration take effect. -- Multiple SQL statements cannot be executed at the same time. This constraint does not apply to the scenario where the INSERT INTO statement is executed in multiple batches. -- The encrypted database does not encrypt empty strings of zero length. -- Deterministic encryption is prone to frequency attacks. Therefore, it is not recommended that deterministic encryption be used in scenarios where the plaintext frequency is obviously distributed. -- Encrypted equality query supports the following data types: - -| Category | Type | Description | -| -------------------- | ------------------ | ------------------------------------------------------------ | -| Integer types | tinyint/tinyint(n) | Tiny integer, which is the same as int1. | -| | smallint | Small integer, which is the same as int2. | -| | int4 | Common integer. | -| | binary_integer | Oracle compatibility type. Generally, the value is an integer. | -| | bigint/bigint(n) | Big integer, which is the same as int8. | -| Numeric data types | numeric(p,s) | A number with the precision **p**. | -| | number | Oracle compatibility type, which is the same as numeric(p,s). | -| Floating point types | float4 | Single-precision floating point. | -| | float8 | Double-precision floating point. | -| | double precision | Double-precision floating point. | -| Character data types | char/char(n) | Fixed-length character string. If the length is insufficient, add spaces. The default precision is **1**. | -| | varchar(n) | Variable-length character string, where **n** indicates the maximum number of bytes. | -| | text | Text type. | -| | varchar2(n) | Oracle compatibility type, which is the same as varchar(n). | -| | clob | Character large object. | -| Binary data types | bytea | Variable-length binary string. | -| | blob | Binary large object. | - -## Dependencies - -None. - -## Related Pages - -[Setting Encrypted Equality Query](../../security-guide/security/4-setting-encrypted-equality-query.md) +--- +title: Equality Query in a Fully-encrypted Database +summary: Equality Query in a Fully-encrypted Database +author: Guo Huan +date: 2022-05-07 +--- + +# Equality Query in a Fully-encrypted Database + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +The encrypted database aims to protect privacy throughout the data lifecycle. In this way, data is always in ciphertext during transmission, computing, and storage regardless of the service scenario and environment. After the data owner encrypts data on the client and sends the encrypted data to the server, no attacker can obtain valuable information even if the attacker steals user data by exploiting system vulnerabilities. In this way, data privacy is protected. + +## Benefits + +The entire service data flow is in ciphertext during data processing, so the following can be implemented by using a fully-encrypted database: + +1. Protect data privacy and security throughout the lifecycle on the cloud. Attackers cannot obtain valid information from the database server regardless of the data status. +2. Help cloud service providers obtain third-party trust. Users, including service administrators and O&M administrators in enterprise service scenarios and application developers in consumer cloud services, can keep keys by themselves so that users with high permissions cannot obtain valid data. +3. Enable cloud databases to better comply with personal privacy protection laws and regulations with the help of the fully-encrypted database. + +## Description + +From the perspective of users, the encrypted equality query functions are divided into three parts, which are implemented by the newly added KeyTool and the enhanced MogDB gsql client. + +First, this feature provides the client key management function. Users can use KeyTool to generate, destroy, and update CMKs, and import and export keys. With the import and export functions of KeyTool, CMKs can be transmitted between different clients. In addition, the KeyTool implements key management on a single client. By configuring management files, you can store and update keys. + +In addition, this feature provides the key creation and encrypted table creation functions. The SQL syntax CREATE CLINET MASTER KEY and CREATE COLUMN ENCRYPTION KEY are added to record and manage CMK and CEK metadata in the database. The CMK and CEK information is recorded in the new system catalog. The CREATE TABLE syntax is extended to specify a column encryption key and encryption algorithm for each sensitive information column in a table, facilitating subsequent ciphertext data storage. + +This feature supports the encrypted equality query function, which is the core of the entire feature. Although users are unaware of the ciphertext query, the query of sensitive data is restricted by the specifications of the current encrypted equality query. + +From the overall perspective, this feature is used to store and manage data based on sensitive data protection requirements and implement query tasks based on ciphertext data. + +## Enhancements + +None. + +## Constraints + +- Data is encrypted at the column level, and encryption policies cannot be differentiated by row level. +- Except the RENAME operation, the ALTER TABLE syntax cannot be used to change columns in an encrypted table (including the conversion between encrypted and unencrypted columns). The ADD and DROP operations can be used to add and delete encrypted columns, respectively. +- The CHECK(COLUMN IS NOT NULL) syntax can be used, but most check constraint syntax cannot be set for encrypted columns. +- When **support_extended_features** is set to **off**, primary key and unique cannot be used for encrypted columns. When **support_extended_features** is set to **on**, only primary key and unique can be used for encrypted columns. +- Different data types cannot be implicitly converted. +- The set operation cannot be performed between ciphertexts of different data types. +- Range partitioning cannot be created for encrypted columns. +- Only the repeat and empty_blob() functions can be used to encrypt columns. +- The current version supports only gsql and JDBC (deployed on a Linux OS) clients. Other clients such as ODBC do not support encrypted equality query. +- Data can only be imported to the encrypted table by running **copy from stdin**, **\copy**, or **insert into values (…)** on the client. +- Copying an encrypted table to a file is not supported. +- The system does not support encrypted queries, such as sorting, range query, and fuzzy query, except equality query. +- The encrypted syntax of stored procedures for some functions is supported. For details about the constraints, see “Encrypted Functions and Stored Procedures” in the *Developer Guide*. +- Non-encrypted table data cannot be inserted into encrypted table data using the **INSERT INTO… SELECT…** or **MERGE INTO** syntax. +- For a request in connection state, the CEK information change on the server can be detected only after the cache update operation is triggered (for example, the user is changed or the encrypted column fails to be decrypted) and the connection is re-established. +- Encrypted equality query is not supported on columns encrypted using the random encryption algorithm. +- An error is reported if the two attribute conditions used for comparison in the encrypted equality query use different data encryption keys. +- Encrypted equality query is not supported in time series tables and foreign tables. The ustore storage engine is not supported. +- If the database service configuration (such as the pg_settings system catalog, permission, key, and encrypted column) is changed, you need to re-establish a JDBC connection to make the configuration take effect. +- Multiple SQL statements cannot be executed at the same time. This constraint does not apply to the scenario where the INSERT INTO statement is executed in multiple batches. +- The encrypted database does not encrypt empty strings of zero length. +- Deterministic encryption is prone to frequency attacks. Therefore, it is not recommended that deterministic encryption be used in scenarios where the plaintext frequency is obviously distributed. +- Encrypted equality query supports the following data types: + +| Category | Type | Description | +| -------------------- | ------------------ | ------------------------------------------------------------ | +| Integer types | tinyint/tinyint(n) | Tiny integer, which is the same as int1. | +| | smallint | Small integer, which is the same as int2. | +| | int4 | Common integer. | +| | binary_integer | Oracle compatibility type. Generally, the value is an integer. | +| | bigint/bigint(n) | Big integer, which is the same as int8. | +| Numeric data types | numeric(p,s) | A number with the precision **p**. | +| | number | Oracle compatibility type, which is the same as numeric(p,s). | +| Floating point types | float4 | Single-precision floating point. | +| | float8 | Double-precision floating point. | +| | double precision | Double-precision floating point. | +| Character data types | char/char(n) | Fixed-length character string. If the length is insufficient, add spaces. The default precision is **1**. | +| | varchar(n) | Variable-length character string, where **n** indicates the maximum number of bytes. | +| | text | Text type. | +| | varchar2(n) | Oracle compatibility type, which is the same as varchar(n). | +| | clob | Character large object. | +| Binary data types | bytea | Variable-length binary string. | +| | blob | Binary large object. | + +## Dependencies + +None. + +## Related Pages + +[Setting Encrypted Equality Query](../../security-guide/security/4-setting-encrypted-equality-query.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/13-ledger-database-mechanism.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/13-ledger-database-mechanism.md index 02b688ad..95289c7a 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/13-ledger-database-mechanism.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/13-ledger-database-mechanism.md @@ -1,48 +1,48 @@ ---- -title: Ledger Database Mechanism -summary: Ledger Database Mechanism -author: Guo Huan -date: 2022-05-07 ---- - -# Ledger Database Mechanism - -## Availability - -This feature is available since MogDB 2.1.0. - -## Introduction - -The ledger database feature includes adding the verification information to a tamper-proof table specified by a user and recording the user's data operation history. The consistency between the data and operation history is checked to ensure that the user data cannot be maliciously tampered with. When a user performs DML operations on a tamper-proof table, the system adds a small amount of additional row-level verification information to the table and records the SQL statements and data change history. The feature provides a verification API for users to check whether the data in the tamper-proof table is consistent with the operation information recorded by the system. - -## Benefits - -The ledger database provides user data operation records, historical data change records, and easy-to-use consistency verification API to help users check whether sensitive information in the database is maliciously tampered with at any time, effectively improving the tamper-proof capability of the database. - -## Description - -The ledger database uses the ledger schema to isolate common tables from tamper-proof user tables. If a row-store table created in the ledger schema has the tamper-proof attribute, it is a tamper-proof user table. When data is inserted into a tamper-proof user table, the system automatically generates a small amount of row-level verification information. When a user executes DML, the system records user operations in the global blockchain table (GS_GLOBAL_CHAIN) and records data changes in the historical table corresponding to the user table. The data in operation records, data change records, and the user table must be the same. The ledger database provides a high-performance verification API for users to verify data consistency. If the consistency verification fails, the data may be tampered with. In this case, contact the audit administrator to trace the operation history. - -## Enhancements - -None. - -## Constraints - -- In tamper-proof schema, row-store tables are tamper-proofing, whereas temporary tables, unlogged tables, column-store tables, and time series tables are not. -- The structure of the tamper-proof user table cannot be modified. The tamper-proof tables cannot be truncated. The tamper-proof user table cannot be switched to a common schema. The non-tamper-proof table cannot be switched to the tamper-proof schema. -- If the tamper-proof table is a partitioned table, operations such as exchange partition, drop partition and truncate partition are not supported. -- Functions and triggers cannot be used to modify data in a tamper-proof user table. -- When a tamper-proof user table is created, the column named **hash** cannot exist. -- Common users can call the tampering verification API to verify only tables that they have the permission to query. -- Only the audit administrator and initial user can query the global blockchain table and tables in BLOCKCHAIN schema. Common users do not have the permission to access and all users do not have the permission to modify the tables. -- According to the naming rules of historical tables, if the name of the schema or table to be created ends or starts with an underscore (_), the name of the corresponding historical table may conflict with that of an existing table. In this case, you need to rename the table. -- Currently, the hash digest of user row-level data in the ledger database is used only to ensure data consistency. It cannot prevent attackers from directly tampering with data files. - -## Dependencies - -None. - -## Related Pages - +--- +title: Ledger Database Mechanism +summary: Ledger Database Mechanism +author: Guo Huan +date: 2022-05-07 +--- + +# Ledger Database Mechanism + +## Availability + +This feature is available since MogDB 2.1.0. + +## Introduction + +The ledger database feature includes adding the verification information to a tamper-proof table specified by a user and recording the user's data operation history. The consistency between the data and operation history is checked to ensure that the user data cannot be maliciously tampered with. When a user performs DML operations on a tamper-proof table, the system adds a small amount of additional row-level verification information to the table and records the SQL statements and data change history. The feature provides a verification API for users to check whether the data in the tamper-proof table is consistent with the operation information recorded by the system. + +## Benefits + +The ledger database provides user data operation records, historical data change records, and easy-to-use consistency verification API to help users check whether sensitive information in the database is maliciously tampered with at any time, effectively improving the tamper-proof capability of the database. + +## Description + +The ledger database uses the ledger schema to isolate common tables from tamper-proof user tables. If a row-store table created in the ledger schema has the tamper-proof attribute, it is a tamper-proof user table. When data is inserted into a tamper-proof user table, the system automatically generates a small amount of row-level verification information. When a user executes DML, the system records user operations in the global blockchain table (GS_GLOBAL_CHAIN) and records data changes in the historical table corresponding to the user table. The data in operation records, data change records, and the user table must be the same. The ledger database provides a high-performance verification API for users to verify data consistency. If the consistency verification fails, the data may be tampered with. In this case, contact the audit administrator to trace the operation history. + +## Enhancements + +None. + +## Constraints + +- In tamper-proof schema, row-store tables are tamper-proofing, whereas temporary tables, unlogged tables, column-store tables, and time series tables are not. +- The structure of the tamper-proof user table cannot be modified. The tamper-proof tables cannot be truncated. The tamper-proof user table cannot be switched to a common schema. The non-tamper-proof table cannot be switched to the tamper-proof schema. +- If the tamper-proof table is a partitioned table, operations such as exchange partition, drop partition and truncate partition are not supported. +- Functions and triggers cannot be used to modify data in a tamper-proof user table. +- When a tamper-proof user table is created, the column named **hash** cannot exist. +- Common users can call the tampering verification API to verify only tables that they have the permission to query. +- Only the audit administrator and initial user can query the global blockchain table and tables in BLOCKCHAIN schema. Common users do not have the permission to access and all users do not have the permission to modify the tables. +- According to the naming rules of historical tables, if the name of the schema or table to be created ends or starts with an underscore (_), the name of the corresponding historical table may conflict with that of an existing table. In this case, you need to rename the table. +- Currently, the hash digest of user row-level data in the ledger database is used only to ensure data consistency. It cannot prevent attackers from directly tampering with data files. + +## Dependencies + +None. + +## Related Pages + [Setting a Ledger Database](../../security-guide/security/5-setting-a-ledger-database.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/14-transparent-data-encryption.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/14-transparent-data-encryption.md index 71a199b1..795577e1 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/14-transparent-data-encryption.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/14-transparent-data-encryption.md @@ -1,57 +1,57 @@ ---- -title: Transparent Data Encryption -summary: Transparent Data Encryption -author: Guo Huan -date: 2022-05-07 ---- - -# Transparent Data Encryption - -## Availability - -This feature is available since MogDB 2.1.0. - -## Introduction - -Transparent data encryption (TDE) encrypts data when the database writes the data to the storage medium and automatically decrypts the data when reading the data from the storage medium. This prevents attackers from reading data in the data file without database authentication, solving the static data leakage problem. This function is almost transparent to the application layer. You can determine whether to enable the transparent data encryption function as required. - -## Benefits - -To prevent attackers from reading data files without authentication, you can use the transparent data encryption function to encrypt data files in the database. This ensures that users can read decrypted data only after starting and connecting to the database properly. - -## Description - -The three-layer key structure is used to implement the key management mechanism, including the root key (RK), cluster master key (CMK), and data encryption key (DEK). CMKs are encrypted and protected by RKs, and DEKs are encrypted and protected by CMKs. DEKs are used to encrypt and decrypt user data. Each table corresponds to a DEK. - -Table-level encryption is supported. When creating a table, you can specify whether to encrypt the table and the encryption algorithm to be used. The encryption algorithm can be AES_128_CTR or SM4_CTR, which cannot be changed once specified. If an encrypted table is created, the database automatically applies for a DEK for the table and saves the encryption algorithm, key ciphertext, and corresponding CMK ID in the **reloptions** column of the pg_class system catalog in keyword=value format. - -You can switch an encrypted table to a non-encrypted table or switch a non-encrypted table to an encrypted table. If the encryption function is not enabled when a table is created, the table cannot be switched to an encrypted table. - -For encrypted tables, DEK rotation is supported. After the key rotation, the data encrypted using the old key is decrypted using the old key, and the newly written data is encrypted using the new key. The encryption algorithm is not changed during key rotation. - -## Enhancements - -None. - -## Constraints - -The current version interconnects with HUAWEI CLOUD KMS to support table-level key storage and row-store table encryption. The specifications are as follows: - -- Encryption of a row-store table stored as a heap is supported. -- Column-store encryption, materialized view encryption, and ustore storage engine encryption are not supported. -- Indexes, sequences, Xlogs, MOTs, and system catalogs cannot be encrypted. -- You can specify an encryption algorithm when creating a table. Once specified, the encryption algorithm cannot be changed. If **enable_tde** is set to **on** but the encryption algorithm **encrypt_algo** is not specified when a table is created, the AES-128-CTR encryption algorithm is used by default. -- If the encryption function is not enabled or the encryption algorithm is not specified when a table is created, the table cannot be switched to an encrypted table. -- For a table that has been assigned an encryption key, switching between the encrypted and unencrypted states of the table does not change the key or encryption algorithm. -- Data key rotation is supported only when the table encryption function is enabled. -- Cross-region primary/standby synchronization of multiple copies in a single cluster is not supported. Cross-region scaling of a single cluster is not supported. Cross-region backup and restoration, cluster DR, and data migration are not supported. -- In hybrid cloud scenarios, if the HUAWEI CLOUD KMS and management plane functions are used, transparent data encryption is supported. Other KMS services are not supported if their APIs are incompatible. -- The query performance of encrypted tables is lower than that of non-encrypted tables. If high performance is required, exercise caution when enabling the encryption function. - -## Dependencies - -The key management service is provided by the external KMS. The current version can interconnect with HUAWEI CLOUD KMS. - -## Related Pages - +--- +title: Transparent Data Encryption +summary: Transparent Data Encryption +author: Guo Huan +date: 2022-05-07 +--- + +# Transparent Data Encryption + +## Availability + +This feature is available since MogDB 2.1.0. + +## Introduction + +Transparent data encryption (TDE) encrypts data when the database writes the data to the storage medium and automatically decrypts the data when reading the data from the storage medium. This prevents attackers from reading data in the data file without database authentication, solving the static data leakage problem. This function is almost transparent to the application layer. You can determine whether to enable the transparent data encryption function as required. + +## Benefits + +To prevent attackers from reading data files without authentication, you can use the transparent data encryption function to encrypt data files in the database. This ensures that users can read decrypted data only after starting and connecting to the database properly. + +## Description + +The three-layer key structure is used to implement the key management mechanism, including the root key (RK), cluster master key (CMK), and data encryption key (DEK). CMKs are encrypted and protected by RKs, and DEKs are encrypted and protected by CMKs. DEKs are used to encrypt and decrypt user data. Each table corresponds to a DEK. + +Table-level encryption is supported. When creating a table, you can specify whether to encrypt the table and the encryption algorithm to be used. The encryption algorithm can be AES_128_CTR or SM4_CTR, which cannot be changed once specified. If an encrypted table is created, the database automatically applies for a DEK for the table and saves the encryption algorithm, key ciphertext, and corresponding CMK ID in the **reloptions** column of the pg_class system catalog in keyword=value format. + +You can switch an encrypted table to a non-encrypted table or switch a non-encrypted table to an encrypted table. If the encryption function is not enabled when a table is created, the table cannot be switched to an encrypted table. + +For encrypted tables, DEK rotation is supported. After the key rotation, the data encrypted using the old key is decrypted using the old key, and the newly written data is encrypted using the new key. The encryption algorithm is not changed during key rotation. + +## Enhancements + +None. + +## Constraints + +The current version interconnects with HUAWEI CLOUD KMS to support table-level key storage and row-store table encryption. The specifications are as follows: + +- Encryption of a row-store table stored as a heap is supported. +- Column-store encryption, materialized view encryption, and ustore storage engine encryption are not supported. +- Indexes, sequences, Xlogs, MOTs, and system catalogs cannot be encrypted. +- You can specify an encryption algorithm when creating a table. Once specified, the encryption algorithm cannot be changed. If **enable_tde** is set to **on** but the encryption algorithm **encrypt_algo** is not specified when a table is created, the AES-128-CTR encryption algorithm is used by default. +- If the encryption function is not enabled or the encryption algorithm is not specified when a table is created, the table cannot be switched to an encrypted table. +- For a table that has been assigned an encryption key, switching between the encrypted and unencrypted states of the table does not change the key or encryption algorithm. +- Data key rotation is supported only when the table encryption function is enabled. +- Cross-region primary/standby synchronization of multiple copies in a single cluster is not supported. Cross-region scaling of a single cluster is not supported. Cross-region backup and restoration, cluster DR, and data migration are not supported. +- In hybrid cloud scenarios, if the HUAWEI CLOUD KMS and management plane functions are used, transparent data encryption is supported. Other KMS services are not supported if their APIs are incompatible. +- The query performance of encrypted tables is lower than that of non-encrypted tables. If high performance is required, exercise caution when enabling the encryption function. + +## Dependencies + +The key management service is provided by the external KMS. The current version can interconnect with HUAWEI CLOUD KMS. + +## Related Pages + [Configuring TDE](../../security-guide/security/6-transparent-data-encryption.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/2-separation-of-control-and-access-permissions.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/2-separation-of-control-and-access-permissions.md index 168747a7..85fe2a32 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/2-separation-of-control-and-access-permissions.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/2-separation-of-control-and-access-permissions.md @@ -1,50 +1,50 @@ ---- -title: Separation of Control and Access Permissions -summary: Separation of Control and Access Permissions -author: Guo Huan -date: 2022-05-07 ---- - -# Separation of Control and Access Permissions - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -The control permissions and the access permissions can be separated. - -## Benefits - -The control permissions of database administrators for tables need to be isolated from their access permissions to improve the data security of common users. - -## Description - -If multiple business departments use different database users to perform service operations and a database maintenance department at the same level uses database administrators to perform O&M operations, the business departments may require that database administrators can only perform control operations (**DROP**, **ALTER**, and **TRUNCATE**) and cannot perform access operations (**INSERT**, **DELETE**, **UPDATE**, **SELECT**, and **COPY**) without authorization. That is, the control permissions of database administrators for tables need to be isolated from their access permissions to improve the data security of common users. - -In separation-of-duties mode, a database administrator does not have permissions for the tables in schemas of other users. In this case, database administrators have neither control permissions nor access permissions. This does not meet the requirements of the business departments mentioned above. Therefore, MogDB provides private users to solve the problem. That is, create private users with the **INDEPENDENT** attribute in non-separation-of-duties mode. Users with the CREATEROLE permission or the system administrator permission can create private users or change the attributes of common users to private users. Common users can also change their own attributes to private users. - -```sql -MogDB=# CREATE USER user_independent WITH INDEPENDENT IDENTIFIED BY "1234@abc"; -``` - -System administrators can manage (**DROP**, **ALTER**, and **TRUNCATE**) table objects of private users but cannot access (**INSERT**, **DELETE**, **SELECT**, **UPDATE**, **COPY**, **GRANT**, **REVOKE**, and **ALTER OWNER**) the objects before being authorized. - -## Enhancements - -None - -## Constraints - -For a table owned by a private user, grant the trigger permission of the table to other users with caution to prevent other users from using the trigger to view the data of the private user. - -If permissions related to private user tables are granted to non-private users, the system administrator will obtain the same permissions. - -## Dependencies - -None - -## Related Pages - +--- +title: Separation of Control and Access Permissions +summary: Separation of Control and Access Permissions +author: Guo Huan +date: 2022-05-07 +--- + +# Separation of Control and Access Permissions + +## Availability + +This feature is available as of MogDB 1.1.0. + +## Introduction + +The control permissions and the access permissions can be separated. + +## Benefits + +The control permissions of database administrators for tables need to be isolated from their access permissions to improve the data security of common users. + +## Description + +If multiple business departments use different database users to perform service operations and a database maintenance department at the same level uses database administrators to perform O&M operations, the business departments may require that database administrators can only perform control operations (**DROP**, **ALTER**, and **TRUNCATE**) and cannot perform access operations (**INSERT**, **DELETE**, **UPDATE**, **SELECT**, and **COPY**) without authorization. That is, the control permissions of database administrators for tables need to be isolated from their access permissions to improve the data security of common users. + +In separation-of-duties mode, a database administrator does not have permissions for the tables in schemas of other users. In this case, database administrators have neither control permissions nor access permissions. This does not meet the requirements of the business departments mentioned above. Therefore, MogDB provides private users to solve the problem. That is, create private users with the **INDEPENDENT** attribute in non-separation-of-duties mode. Users with the CREATEROLE permission or the system administrator permission can create private users or change the attributes of common users to private users. Common users can also change their own attributes to private users. + +```sql +MogDB=# CREATE USER user_independent WITH INDEPENDENT IDENTIFIED BY "1234@abc"; +``` + +System administrators can manage (**DROP**, **ALTER**, and **TRUNCATE**) table objects of private users but cannot access (**INSERT**, **DELETE**, **SELECT**, **UPDATE**, **COPY**, **GRANT**, **REVOKE**, and **ALTER OWNER**) the objects before being authorized. + +## Enhancements + +None + +## Constraints + +For a table owned by a private user, grant the trigger permission of the table to other users with caution to prevent other users from using the trigger to view the data of the private user. + +If permissions related to private user tables are granted to non-private users, the system administrator will obtain the same permissions. + +## Dependencies + +None + +## Related Pages + [CREATE USER](../../reference-guide/sql-syntax/CREATE-USER.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/3-database-encryption-authentication.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/3-database-encryption-authentication.md index bbebd2e3..d7f30ab4 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/3-database-encryption-authentication.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/3-database-encryption-authentication.md @@ -1,40 +1,40 @@ ---- -title: Database Encryption Authentication -summary: Database Encryption Authentication -author: Guo Huan -date: 2022-05-07 ---- - -# Database Encryption Authentication - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The client/server (C/S) mode-based client connection authentication mechanism is provided. - -## Benefits - -The unidirectional, irreversible hash encryption algorithm PBKDF2 is used for encryption and authentication, effectively defending against rainbow attacks. - -## Description - -MogDB uses a basic client connection authentication mechanism. After a client initiates a connection request, the server verifies the information and sends the information required for authentication to the client based on the verification result. The authentication information includes the salt, token, and server signature. The client responds to the request and sends the authentication information to the server. The server calls the authentication module to authenticate the client authentication information. The user password is encrypted and stored in the memory. During the entire authentication process, passwords are encrypted for storage and transmission. When the user logs in to the system next time, the hash value is calculated and compared with the key value stored on the server to verify the correctness. - -## Enhancements - -The message processing flow in the unified encryption and authentication process effectively prevents attackers from cracking the username or password by capturing packets. - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - +--- +title: Database Encryption Authentication +summary: Database Encryption Authentication +author: Guo Huan +date: 2022-05-07 +--- + +# Database Encryption Authentication + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +The client/server (C/S) mode-based client connection authentication mechanism is provided. + +## Benefits + +The unidirectional, irreversible hash encryption algorithm PBKDF2 is used for encryption and authentication, effectively defending against rainbow attacks. + +## Description + +MogDB uses a basic client connection authentication mechanism. After a client initiates a connection request, the server verifies the information and sends the information required for authentication to the client based on the verification result. The authentication information includes the salt, token, and server signature. The client responds to the request and sends the authentication information to the server. The server calls the authentication module to authenticate the client authentication information. The user password is encrypted and stored in the memory. During the entire authentication process, passwords are encrypted for storage and transmission. When the user logs in to the system next time, the hash value is calculated and compared with the key value stored on the server to verify the correctness. + +## Enhancements + +The message processing flow in the unified encryption and authentication process effectively prevents attackers from cracking the username or password by capturing packets. + +## Constraints + +None + +## Dependencies + +None + +## Related Pages + [Client Access Authentication](../../security-guide/security/1-client-access-authentication.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/4-data-encryption-and-storage.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/4-data-encryption-and-storage.md index aa6d2b18..cde3aa4f 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/4-data-encryption-and-storage.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/4-data-encryption-and-storage.md @@ -1,52 +1,52 @@ ---- -title: Data Encryption and Storage -summary: Data Encryption and Storage -author: Guo Huan -date: 2022-05-07 ---- - -# Data Encryption and Storage - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Imported data is encrypted before stored. - -## Benefits - -You can use encrypted import interfaces to encrypt sensitive information and store it in a table. - -## Description - -MogDB provides the encryption functions **gs_encrypt_aes128()** and **gs_encrypt()**, and decryption functions **gs_decrypt_aes128()** and **gs_decrypt()**. Before you import data to a certain column in a table, you can use this function to encrypt the data. The function can be called using a statement in the following format: - -```shell -gs_encrypt_aes128(column, key), gs_encrypt (decryptstr, keystr, decrypttype) -``` - -In the preceding command, **key** indicates the initial password specified by the user, which is used to derive the encryption key. To encrypt an entire table, you need to write an encryption function for each column. - -If a user with the required permission wants to view specific data, the user can decrypt required columns using the decryption function interface **gs_decrypt_aes128(***column***, ***key\***)**. To invoke the interface, run the following command: - -```shell -gs_decrypt_aes128(column, key), gs_decrypt(decryptstr, keystr, decrypttype) -``` - -## Enhancements - -None. - -## Constraints - -None. - -## Dependencies - -None. - -## Related Pages - +--- +title: Data Encryption and Storage +summary: Data Encryption and Storage +author: Guo Huan +date: 2022-05-07 +--- + +# Data Encryption and Storage + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +Imported data is encrypted before stored. + +## Benefits + +You can use encrypted import interfaces to encrypt sensitive information and store it in a table. + +## Description + +MogDB provides the encryption functions **gs_encrypt_aes128()** and **gs_encrypt()**, and decryption functions **gs_decrypt_aes128()** and **gs_decrypt()**. Before you import data to a certain column in a table, you can use this function to encrypt the data. The function can be called using a statement in the following format: + +```shell +gs_encrypt_aes128(column, key), gs_encrypt (decryptstr, keystr, decrypttype) +``` + +In the preceding command, **key** indicates the initial password specified by the user, which is used to derive the encryption key. To encrypt an entire table, you need to write an encryption function for each column. + +If a user with the required permission wants to view specific data, the user can decrypt required columns using the decryption function interface **gs_decrypt_aes128(***column***, ***key\***)**. To invoke the interface, run the following command: + +```shell +gs_decrypt_aes128(column, key), gs_decrypt(decryptstr, keystr, decrypttype) +``` + +## Enhancements + +None. + +## Constraints + +None. + +## Dependencies + +None. + +## Related Pages + [Security Functions](../../reference-guide/functions-and-operators/security-functions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/5-database-audit.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/5-database-audit.md index cf2210be..21297b85 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/5-database-audit.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/5-database-audit.md @@ -1,40 +1,40 @@ ---- -title: Database Audit -summary: Database Audit -author: Guo Huan -date: 2022-05-07 ---- - -# Database Audit - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -Audit logs record user operations performed on database startup and stopping, as well as connection, DDL, DML, and DCL operations. - -## Benefits - -The audit log mechanism enhances the database capability of tracing unauthorized operations and collecting evidence. - -## Description - -Database security is essential for a database system. MogDB writes all user operations in the database into audit logs. Database security administrators can use the audit logs to reproduce a series of events that cause faults in the database and identify unauthorized users, unauthorized operations, and the time when these operations are performed. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - +--- +title: Database Audit +summary: Database Audit +author: Guo Huan +date: 2022-05-07 +--- + +# Database Audit + +## Availability + +This feature is available as of MogDB 1.1.0. + +## Introduction + +Audit logs record user operations performed on database startup and stopping, as well as connection, DDL, DML, and DCL operations. + +## Benefits + +The audit log mechanism enhances the database capability of tracing unauthorized operations and collecting evidence. + +## Description + +Database security is essential for a database system. MogDB writes all user operations in the database into audit logs. Database security administrators can use the audit logs to reproduce a series of events that cause faults in the database and identify unauthorized users, unauthorized operations, and the time when these operations are performed. + +## Enhancements + +None + +## Constraints + +None + +## Dependencies + +None + +## Related Pages + [Configuring Database Audit](../../security-guide/security/3-configuring-database-audit.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/6-network-communication-security.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/6-network-communication-security.md index 1947ac62..8daa5c01 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/6-network-communication-security.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/6-network-communication-security.md @@ -1,48 +1,48 @@ ---- -title: Network Communication Security -summary: Network Communication Security -author: Guo Huan -date: 2022-05-07 ---- - -# Network Communication Security - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -To secure the transmission of sensitive data on the Internet, MogDB encrypts communication between the server and the client using the Secure Socket Layer (SSL) protocol. - -## Benefits - -The communication between your client and the server can be secured. - -## Description - -MogDB supports the SSL protocol. The SSL protocol is an application-layer communication protocol with high security, which is mainly used for secure web transmission. SSL contains a record layer and a transport layer. The record-layer protocol determines the encapsulation format of the transport-layer data. The transport-layer security protocol uses X.509 for authentication. The SSL protocol uses asymmetric encryption algorithms to authenticate the identities of communicating parties, and then the two parties exchange symmetric keys as communication keys. The SSL protocol effectively ensures the confidentiality and reliability of the communication between two applications and prevents the communication between a client and a server from being eavesdropped by attackers. - -MogDB also supports the TLS 1.2 protocol. TLS 1.2 is a transport-layer communication protocol with high security. It consists of the TLS Record and TLS Handshake protocols. Each protocol suit has information in multiple formats. The TLS protocol is independent of application-layer protocols. Upper-layer protocols can be transparently distributed on the TLS protocol. The TLS protocol ensures the data confidentiality and integrity for both communication parties. - -## Enhancements - -Checking the strength of certificate signature algorithms: For low-strength signature algorithms, alarms are reported, reminding you to replace the certificate with another certificate containing a high-strength signature algorithm. - -Checking the certificate validity period: If a certificate is about to expire in less than seven days, an alarm is reported, reminding you to replace the certificate on the client. - -Checking certificate permissions: The certificate permissions are verified at the connection setup stage. - -## Constraints - -The formal certificates and keys for servers and clients shall be obtained from the Certificate Authority (CA). Assume the private key and certificate for a server are **server.key** and **server.crt**, the private key and certificate for the client are **client.key** and **client.crt**, and the CA root certificate is **cacert.pem**. - -You need to enable the SSL protocol and configure the certificate and connection mode. - -## Dependencies - -OpenSSL - -## Related Pages - +--- +title: Network Communication Security +summary: Network Communication Security +author: Guo Huan +date: 2022-05-07 +--- + +# Network Communication Security + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +To secure the transmission of sensitive data on the Internet, MogDB encrypts communication between the server and the client using the Secure Socket Layer (SSL) protocol. + +## Benefits + +The communication between your client and the server can be secured. + +## Description + +MogDB supports the SSL protocol. The SSL protocol is an application-layer communication protocol with high security, which is mainly used for secure web transmission. SSL contains a record layer and a transport layer. The record-layer protocol determines the encapsulation format of the transport-layer data. The transport-layer security protocol uses X.509 for authentication. The SSL protocol uses asymmetric encryption algorithms to authenticate the identities of communicating parties, and then the two parties exchange symmetric keys as communication keys. The SSL protocol effectively ensures the confidentiality and reliability of the communication between two applications and prevents the communication between a client and a server from being eavesdropped by attackers. + +MogDB also supports the TLS 1.2 protocol. TLS 1.2 is a transport-layer communication protocol with high security. It consists of the TLS Record and TLS Handshake protocols. Each protocol suit has information in multiple formats. The TLS protocol is independent of application-layer protocols. Upper-layer protocols can be transparently distributed on the TLS protocol. The TLS protocol ensures the data confidentiality and integrity for both communication parties. + +## Enhancements + +Checking the strength of certificate signature algorithms: For low-strength signature algorithms, alarms are reported, reminding you to replace the certificate with another certificate containing a high-strength signature algorithm. + +Checking the certificate validity period: If a certificate is about to expire in less than seven days, an alarm is reported, reminding you to replace the certificate on the client. + +Checking certificate permissions: The certificate permissions are verified at the connection setup stage. + +## Constraints + +The formal certificates and keys for servers and clients shall be obtained from the Certificate Authority (CA). Assume the private key and certificate for a server are **server.key** and **server.crt**, the private key and certificate for the client are **client.key** and **client.crt**, and the CA root certificate is **cacert.pem**. + +You need to enable the SSL protocol and configure the certificate and connection mode. + +## Dependencies + +OpenSSL + +## Related Pages + [Managing SSL Certificates](../../security-guide/security/1-client-access-authentication.md#managing-ssl-certificates) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/7-resource-label.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/7-resource-label.md index 93ac72d1..c17280d3 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/7-resource-label.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/7-resource-label.md @@ -1,53 +1,53 @@ ---- -title: Resource Label -summary: Resource Label -author: Guo Huan -date: 2022-05-07 ---- - -# Resource Label - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -Database resources refer to database objects, such as databases, schemas, tables, columns, views, and triggers. The more the database objects are, the more complex the classification management of database resources is. The resource label mechanism is a technology that classifies and labels database resources with certain features to implement resource classification management. After adding labels to some resources in a database, administrators can perform operations such as data audit or anonymization using the labels to implement security management on labeled database resources. - -## Benefits - -Proper resource labels can be used to effectively classify data objects, improve management efficiency, and simplify security policy configuration. To perform unified audit or data anonymization on a group of database resources, the administrator can allocate a resource label to these resources first. The label indicates that the database resources have a certain feature or require unified configuration of a certain policy. The administrator can directly perform operations on the resource label, which greatly reduces the complexity of policy configuration and information redundancy as well as improves management efficiency. - -## Description - -The resource label mechanism selectively classifies resources in the current database. Administrators can use the following SQL syntax to create a resource label and add the label to a group of database resources: - -```sql -CREATE RESOURCE LABEL schm_lb ADD SCHEMA(schema_for_label); -CREATE RESOURCE LABEL tb_lb ADD TABLE(schema_for_label.table_for_label); -CREATE RESOURCE LABEL col_lb ADD COLUMN(schema_for_label.table_for_label.column_for_label); -CREATE RESOURCE LABEL multi_lb ADD SCHEMA(schema_for_label), TABLE(table_for_label); -``` - -**schema_for_label**, **table_for_label**, and **column_for_label** indicate the schema, table, and column to be labeled, respectively. The **schm_lb** label is added to schema **schm_for_label**, **tb_lb** is added to table **table_for_label**, **col_lb** is added to column **column_for_label**, and **multi_lb** is added to schema **schm_for_label** and table **table_for_label**. You can perform unified audit or dynamic data anonymization using the configured resource labels, that is, manage all labeled database resources. - -Currently, resource labels support the following database resource types: schema, table, column, view, and function. - -## Enhancements - -None - -## Constraints - -- Resource labels can be created only by a user with the **POLADMIN** and **SYSADMIN** attributes or an initial user. -- Resource labels cannot be created for temporary tables. -- Columns in the same basic table can belong to only one resource tag. - -## Dependencies - -None - -## Related Pages - +--- +title: Resource Label +summary: Resource Label +author: Guo Huan +date: 2022-05-07 +--- + +# Resource Label + +## Availability + +This feature is available as of MogDB 1.1.0. + +## Introduction + +Database resources refer to database objects, such as databases, schemas, tables, columns, views, and triggers. The more the database objects are, the more complex the classification management of database resources is. The resource label mechanism is a technology that classifies and labels database resources with certain features to implement resource classification management. After adding labels to some resources in a database, administrators can perform operations such as data audit or anonymization using the labels to implement security management on labeled database resources. + +## Benefits + +Proper resource labels can be used to effectively classify data objects, improve management efficiency, and simplify security policy configuration. To perform unified audit or data anonymization on a group of database resources, the administrator can allocate a resource label to these resources first. The label indicates that the database resources have a certain feature or require unified configuration of a certain policy. The administrator can directly perform operations on the resource label, which greatly reduces the complexity of policy configuration and information redundancy as well as improves management efficiency. + +## Description + +The resource label mechanism selectively classifies resources in the current database. Administrators can use the following SQL syntax to create a resource label and add the label to a group of database resources: + +```sql +CREATE RESOURCE LABEL schm_lb ADD SCHEMA(schema_for_label); +CREATE RESOURCE LABEL tb_lb ADD TABLE(schema_for_label.table_for_label); +CREATE RESOURCE LABEL col_lb ADD COLUMN(schema_for_label.table_for_label.column_for_label); +CREATE RESOURCE LABEL multi_lb ADD SCHEMA(schema_for_label), TABLE(table_for_label); +``` + +**schema_for_label**, **table_for_label**, and **column_for_label** indicate the schema, table, and column to be labeled, respectively. The **schm_lb** label is added to schema **schm_for_label**, **tb_lb** is added to table **table_for_label**, **col_lb** is added to column **column_for_label**, and **multi_lb** is added to schema **schm_for_label** and table **table_for_label**. You can perform unified audit or dynamic data anonymization using the configured resource labels, that is, manage all labeled database resources. + +Currently, resource labels support the following database resource types: schema, table, column, view, and function. + +## Enhancements + +None + +## Constraints + +- Resource labels can be created only by a user with the **POLADMIN** and **SYSADMIN** attributes or an initial user. +- Resource labels cannot be created for temporary tables. +- Columns in the same basic table can belong to only one resource tag. + +## Dependencies + +None + +## Related Pages + [CREATE RESOURCE LABEL](../../reference-guide/sql-syntax/CREATE-RESOURCE-LABEL.md), [ALTER RESOURCE LABEL](../../reference-guide/sql-syntax/ALTER-RESOURCE-LABEL.md), [DROP RESOURCE LABEL](../../reference-guide/sql-syntax/DROP-RESOURCE-LABEL.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/8-unified-audit.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/8-unified-audit.md index 018e9726..bbbfebcc 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/8-unified-audit.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/8-unified-audit.md @@ -1,78 +1,78 @@ ---- -title: Unified Audit -summary: Unified Audit -author: Guo Huan -date: 2022-05-07 ---- - -# Unified Audit - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The audit mechanism is a security management solution that can effectively deal with the attackers' repudiation. The larger the audit scope is, the more operations can be monitored and the more audit logs are generated, affecting the actual audit efficiency. The unified audit mechanism is a technology that implements efficient security audit management by customizing audit policies. After the administrator defines the audit object and audit behaviors, if the task executed by a user is associated with an audit policy, the corresponding audit behavior is generated and the audit log is recorded. Customized audit policies can cover common user management activities, as well as DDL and DML operations, meeting routine audit requirements. - -## Benefits - -Audit is indispensable for routine security management. When a traditional audit mechanism is used to audit an operation, such as **SELECT**, a large number of audit logs are generated, increasing the I/O of the entire system and affecting the system performance and audit efficiency of administrators. The unified audit mechanism allows you to customize policies for generating audit logs. For example, only the operation that database account **A** queries table **a** is audited. Customized audit greatly reduces the number of generated audit logs, ensuring audit behaviors and reducing the impact on system performance. In addition, customized audit policies can improve the audit efficiency of administrators. - -## Description - -The unified audit mechanism customizes audit behaviors based on resource labels and classifies the supported audit behaviors into the **ACCESS** and **PRIVILEGES** classes. The SQL syntax for creating a complete audit policy is as follows: - -```sql -CREATE RESOURCE LABEL auditlabel add table(table_for_audit1, table_for_audit2); -CREATE AUDIT POLICY audit_select_policy ACCESS SELECT ON LABEL(auditlabel) FILTER ON ROLES(usera); -CREATE AUDIT POLICY audit_admin_policy PRIVILEGES ALTER, DROP ON LABEL(auditlabel) FILTER ON IP(local); -``` - -**auditlabel** indicates the resource label in the current audit, which contains two table objects. **audit_select_policy** defines the audit policy for user **usera** to audit the **SELECT** operation on the objects with the **auditlabel** label, regardless of the access source. **audit_admin_policy** defines a local audit policy for **ALTER** and **DROP** operations on the objects with the **auditlabel** label, regardless of the user. If **ACCESS** and **PRIVILEGES** are not specified, all DDL and DML operations on objects with a resource label are audited. If no audit objects are specified, operations on all objects are audited. The addition, deletion, and modification of unified audit policies are also recorded in unified audit logs. - -Currently, unified audit supports the following audit behaviors: - -| **SQL Type** | Supported operations and object types | -| ------------ | ------------------------------------------------------------ | -| DDL | Operations: ALL, ALTER, ANALYZE, COMMENT, CREATE, DROP, GRANT, and REVOKE
SET SHOW
Objects: DATABASE, SCHEMA, FUNCTION, TRIGGER, TABLE, SEQUENCE, FOREIGN_SERVER, FOREIGN_TABLE, TABLESPACE, ROLE/USER, INDEX, VIEW, and DATA_SOURCE | -| DML | Operations: ALL, COPY, DEALLOCATE, DELETE, EXECUTE, REINDEX, INSERT, PREPARE, SELECT, TRUNCATE, and UPDATE | - -## Enhancements - -None. - -## Constraints - -- The unified audit policy must be created by a user with the **POLADMIN** or **SYSADMIN** attribute, or by the initial user. Common users do not have the permission to access the security policy system catalog and system view. - -- The syntax of a unified audit policy applies to either DDL or DML operations. DDL operations and DML operations are mutually exclusive in an audit policy. A maximum of 98 unified audit policies can be configured. - -- Unified audit monitors the SQL statements executed by users on the clients, but does not record the internal SQL statements of databases. - -- In the same audit policy, the same resource tag can be bound to different audit behaviors, and the same behavior can be bound to different resource tags. The ALL operation type includes all operations supported by DDL or DML. - -- A resource label can be associated with different unified audit policies. Unified audit outputs audit information in sequence based on the policies matched by SQL statements. - -- Audit logs of unified audit policies are recorded separately. Currently, no visualized query interfaces are provided. Audit logs depend on the OS service Rsyslog and are archived through the service configuration. - -- In cloud service scenarios, logs need to be stored in the OBS. In hybrid cloud scenarios, you can deploy Elasticsearch to collect CN logs and perform visualized processing. - -- It is recommended that **APP** in **FILTER** be set to applications in the same trusted domain. Since a client may be forged, a security mechanism must be formed on the client when **APP** is used to reduce misuse risks. Generally, you are not advised to set **APP**. If it is set, pay attention to the risk of client spoofing. - -- Taking an IPv4 address as an example, the following formats are supported: - - | IP Address Format | Example | - | -------------------- | ------------------------ | - | Single IP address | 127.0.0.1 | - | IP address with mask | 127.0.0.1\|255.255.255.0 | - | CIDR IP address | 127.0.0.1⁄24 | - | IP address segment | 127.0.0.1-127.0.0.5 | - -## Dependencies - -None. - -## Related Pages - +--- +title: Unified Audit +summary: Unified Audit +author: Guo Huan +date: 2022-05-07 +--- + +# Unified Audit + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +The audit mechanism is a security management solution that can effectively deal with the attackers' repudiation. The larger the audit scope is, the more operations can be monitored and the more audit logs are generated, affecting the actual audit efficiency. The unified audit mechanism is a technology that implements efficient security audit management by customizing audit policies. After the administrator defines the audit object and audit behaviors, if the task executed by a user is associated with an audit policy, the corresponding audit behavior is generated and the audit log is recorded. Customized audit policies can cover common user management activities, as well as DDL and DML operations, meeting routine audit requirements. + +## Benefits + +Audit is indispensable for routine security management. When a traditional audit mechanism is used to audit an operation, such as **SELECT**, a large number of audit logs are generated, increasing the I/O of the entire system and affecting the system performance and audit efficiency of administrators. The unified audit mechanism allows you to customize policies for generating audit logs. For example, only the operation that database account **A** queries table **a** is audited. Customized audit greatly reduces the number of generated audit logs, ensuring audit behaviors and reducing the impact on system performance. In addition, customized audit policies can improve the audit efficiency of administrators. + +## Description + +The unified audit mechanism customizes audit behaviors based on resource labels and classifies the supported audit behaviors into the **ACCESS** and **PRIVILEGES** classes. The SQL syntax for creating a complete audit policy is as follows: + +```sql +CREATE RESOURCE LABEL auditlabel add table(table_for_audit1, table_for_audit2); +CREATE AUDIT POLICY audit_select_policy ACCESS SELECT ON LABEL(auditlabel) FILTER ON ROLES(usera); +CREATE AUDIT POLICY audit_admin_policy PRIVILEGES ALTER, DROP ON LABEL(auditlabel) FILTER ON IP(local); +``` + +**auditlabel** indicates the resource label in the current audit, which contains two table objects. **audit_select_policy** defines the audit policy for user **usera** to audit the **SELECT** operation on the objects with the **auditlabel** label, regardless of the access source. **audit_admin_policy** defines a local audit policy for **ALTER** and **DROP** operations on the objects with the **auditlabel** label, regardless of the user. If **ACCESS** and **PRIVILEGES** are not specified, all DDL and DML operations on objects with a resource label are audited. If no audit objects are specified, operations on all objects are audited. The addition, deletion, and modification of unified audit policies are also recorded in unified audit logs. + +Currently, unified audit supports the following audit behaviors: + +| **SQL Type** | Supported operations and object types | +| ------------ | ------------------------------------------------------------ | +| DDL | Operations: ALL, ALTER, ANALYZE, COMMENT, CREATE, DROP, GRANT, and REVOKE
SET SHOW
Objects: DATABASE, SCHEMA, FUNCTION, TRIGGER, TABLE, SEQUENCE, FOREIGN_SERVER, FOREIGN_TABLE, TABLESPACE, ROLE/USER, INDEX, VIEW, and DATA_SOURCE | +| DML | Operations: ALL, COPY, DEALLOCATE, DELETE, EXECUTE, REINDEX, INSERT, PREPARE, SELECT, TRUNCATE, and UPDATE | + +## Enhancements + +None. + +## Constraints + +- The unified audit policy must be created by a user with the **POLADMIN** or **SYSADMIN** attribute, or by the initial user. Common users do not have the permission to access the security policy system catalog and system view. + +- The syntax of a unified audit policy applies to either DDL or DML operations. DDL operations and DML operations are mutually exclusive in an audit policy. A maximum of 98 unified audit policies can be configured. + +- Unified audit monitors the SQL statements executed by users on the clients, but does not record the internal SQL statements of databases. + +- In the same audit policy, the same resource tag can be bound to different audit behaviors, and the same behavior can be bound to different resource tags. The ALL operation type includes all operations supported by DDL or DML. + +- A resource label can be associated with different unified audit policies. Unified audit outputs audit information in sequence based on the policies matched by SQL statements. + +- Audit logs of unified audit policies are recorded separately. Currently, no visualized query interfaces are provided. Audit logs depend on the OS service Rsyslog and are archived through the service configuration. + +- In cloud service scenarios, logs need to be stored in the OBS. In hybrid cloud scenarios, you can deploy Elasticsearch to collect CN logs and perform visualized processing. + +- It is recommended that **APP** in **FILTER** be set to applications in the same trusted domain. Since a client may be forged, a security mechanism must be formed on the client when **APP** is used to reduce misuse risks. Generally, you are not advised to set **APP**. If it is set, pay attention to the risk of client spoofing. + +- Taking an IPv4 address as an example, the following formats are supported: + + | IP Address Format | Example | + | -------------------- | ------------------------ | + | Single IP address | 127.0.0.1 | + | IP address with mask | 127.0.0.1\|255.255.255.0 | + | CIDR IP address | 127.0.0.1⁄24 | + | IP address segment | 127.0.0.1-127.0.0.5 | + +## Dependencies + +None. + +## Related Pages + [CREATE RESOURCE LABEL](../../reference-guide/sql-syntax/CREATE-RESOURCE-LABEL.md), [CREATE AUDIT POLICY](../../reference-guide/sql-syntax/CREATE-AUDIT-POLICY.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/9-dynamic-data-anonymization.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/9-dynamic-data-anonymization.md index 793e36ed..20a64302 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/9-dynamic-data-anonymization.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/9-dynamic-data-anonymization.md @@ -1,111 +1,111 @@ ---- -title: Dynamic Data Masking -summary: Dynamic Data Masking -author: Guo Huan -date: 2022-05-07 ---- - -# Dynamic Data Masking - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Data masking is an effective database privacy protection solution, which can prevent attackers from snooping on private data. The dynamic data masking mechanism is a technology that protects privacy data by customizing masking policies. It can effectively prevent unauthorized users from accessing sensitive information while retaining original data. After the administrator specifies the object to be anonymized and customizes a data masking policy, if the database resources queried by a user are associated with a masking policy, data is anonymized based on the user identity and masking policy to restrict attackers' access to privacy data. - -## Benefits - -Data privacy protection is one of the required database security capabilities. It can restrict attackers' access to privacy data, ensuring privacy data security. The dynamic data masking mechanism can protect the privacy of specified database resources by configuring masking policies. In addition, the masking policy configuration is flexible and can implement targeted privacy protection in specific user scenarios. - -## Description - -The dynamic data masking mechanism customizes masking policies based on resource labels. It can select masking modes based on the site requirements or customize masking policies for specific users. The SQL syntax for creating a complete masking policy is as follows: - -```sql -CREATE RESOURCE LABEL label_for_creditcard ADD COLUMN(user1.table1.creditcard); -CREATE RESOURCE LABEL label_for_name ADD COLUMN(user1.table1.name); -CREATE MASKING POLICY msk_creditcard creditcardmasking ON LABEL(label_for_creditcard); -CREATE MASKING POLICY msk_name randommasking ON LABEL(label_for_name) FILTER ON IP(local), ROLES(dev); -``` - -**label_for_creditcard** and **msk_name** are the resource labels for masking, and each label is allocated to two column objects. **creditcardmasking** and **randommasking** are preset masking functions. **msk_creditcard** specifies that the masking policy **creditcardmasking** will be applied when any user accesses resources with **label_for_creditcard**, regardless of the access source. **msk_name** specifies that the masking policy **randommasking** will be applied when local user **dev** accesses resources with **label_for_name**. If **FILTER** is not specified, the setting takes effect for all users. Otherwise, the setting takes effect only for specified users. - -The following table shows the preset masking functions: - -| **Masking Function** | **Example** | -| -------------------- | ------------------------------------------------------------ | -| creditcardmasking | '4880-9898-4545-2525' will be anonymized as 'xxxx-xxxx-xxxx-2525'. This function anonymizes digits except the last four digits. | -| basicemailmasking | 'abcd@gmail.com' will be anonymized as 'xxxx@gmail.com'. This function anonymizes text before the first @. | -| fullemailmasking | 'abcd@gmail.com' will be anonymized as 'xxxx@xxxxx.com'. This function anonymizes text before the first dot (.) (except @). | -| alldigitsmasking | 'alex123alex' will be anonymized as 'alex000alex'. This function anonymizes only digits in the text. | -| shufflemasking | 'hello word' will be anonymized as 'hlwoeor dl'. This weak masking function is implemented through character dislocation. You are not advised to use this function to anonymize strings with strong semantics. | -| randommasking | 'hello word' will be anonymized as 'ad5f5ghdf5'. This function randomly anonymizes text by character. | -| regexpmasking | You need to enter four parameters in sequence. **reg** indicates the character string to be replaced, **replace_text** indicates the character string after replacement, **pos** indicates the position where the target character string starts to be replaced, and **reg_len** indicates the replacement length. Both **pos** and **reg_len** are of the integer type. **reg** and **replace_text** can be expressed by regular expressions. If **pos** is not specified, the default value is **0**. If **reg_len** is not specified, the default value is **–1**, indicating that all character strings after **pos** will be replaced. If the type of the input parameter is inconsistent with the expected parameter type, the maskall function is used for anonymization.
`CREATE MASKING POLICY msk_creditcard regexpmasking('[\d+]', 'x', 5, 9 ) ON LABEL(label_for_creditcard);` | -| maskall | '4880-9898-4545-2525' will be anonymized as 'xxxxxxxxxxxxxxxxxxx'. | - -The data types supported by each masking function are as follows: - -| **Masking Function** | **Supported Data Types** | -| -------------------- | ------------------------------------------------------------ | -| creditcardmasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (character data in credit card format only) | -| basicemailmasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (character data in email format only) | -| fullemailmasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (character data in email format only) | -| alldigitsmasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (character data containing digits only) | -| shufflemasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (text data only) | -| randommasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (text data only) | -| maskall | BOOL, RELTIME, TIME, TIMETZ, INTERVAL, TIMESTAMP, TIMESTAMPTZ, SMALLDATETIME, ABSTIME, TEXT, BPCHAR, VARCHAR, NVARCHAR2, NAME, INT8, INT4, INT2, INT1, NUMRIC, FLOAT4, FLOAT8, CASH | - -For unsupported data types, the **maskall** function is used for data masking by default. The data of the BOOL type is masked as **'0'**. The RELTIME type is masked as **'1970'**. The TIME, TIMETZ, and INTERVAL types are masked as **'00:00:00.0000+00'**. The TIMESTAMP, TIMESTAMPTZ, SMALLDATETIME, and ABSTIME types are masked as **'1970-01-01 00:00:00.0000'**. The TEXT, CHAR, BPCHAR, VARCHAR, NVARCHAR2, and NAME type are masked as **'x'**. The INT8, INT4, INT2, INT1, NUMERIC, FLOAT4, FLOAT8 types are masked as **'0'**. If the data type is not supported by **maskall**, the masking policy cannot be created. If implicit conversion is involved in the masking column, the data type after implicit conversion is used for masking. In addition, if the masking policy is applied to a data column and takes effect, operations on the data in the column are performed based on the masking result. - -Dynamic data masking applies to scenarios closely related to actual services. It provides users with proper masking query APIs and error handling logic based on service requirements to prevent raw data from being obtained through credential stuffing. - -## Enhancements - -None. - -## Constraints - -- The dynamic data masking policy must be created by a user with the **POLADMIN** or **SYSADMIN** attribute, or by the initial user. Common users do not have the permission to access the security policy system catalog and system view. - -- Dynamic data masking takes effect only on data tables for which masking policies are configured. Audit logs are not within the effective scope of the masking policies. - -- In a masking policy, only one masking mode can be specified for a resource label. - -- Multiple masking policies cannot be used to anonymize the same resource label, except when **FILTER** is used to specify user scenarios where the policies take effect and there is no intersection between user scenarios of different masking policies that contain the same resource label. In this case, you can identify the policy that a resource label is anonymized by based on the user scenario. - -- It is recommended that **APP** in **FILTER** be set to applications in the same trusted domain. Since a client may be forged, a security mechanism must be formed on the client when **APP** is used to reduce misuse risks. Generally, you are not advised to set **APP**. If it is set, pay attention to the risk of client spoofing. - -- For INSERT or MERGE INTO operations with the query clause, if the source table contains anonymized columns, the inserted or updated result in the preceding two operations is the anonymized value and cannot be restored. - -- When the built-in security policy is enabled, the ALTER TABLE EXCHANGE PARTITION statement fails to be executed if the source table is in the anonymized column. - -- If a dynamic data masking policy is configured for a table, grant the trigger permission of the table to other users with caution to prevent other users from using the trigger to bypass the masking policy. - -- A maximum of 98 dynamic data masking policies can be created. - -- Only the preceding seven preset masking policies can be used. - -- Only data with the resource labels containing the **COLUMN** attribute can be anonymized. - -- Only columns in base tables can be anonymized. - -- Only the data queried using **SELECT** can be anonymized. - -- Taking an IPv4 address as an example, the following formats are supported: - - | IP Address Format | Example | - | -------------------- | ------------------------ | - | Single IP address | 127.0.0.1 | - | IP address with mask | 127.0.0.1\|255.255.255.0 | - | CIDR IP address | 127.0.0.1⁄24 | - | IP address segment | 127.0.0.1-127.0.0.5 | - -## Dependencies - -None. - -## Related Pages - -[CREATE RESOURCE LABEL](../../reference-guide/sql-syntax/CREATE-RESOURCE-LABEL.md), [CREATE MASKING POLICY](../../reference-guide/sql-syntax/CREATE-MASKING-POLICY.md) +--- +title: Dynamic Data Masking +summary: Dynamic Data Masking +author: Guo Huan +date: 2022-05-07 +--- + +# Dynamic Data Masking + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +Data masking is an effective database privacy protection solution, which can prevent attackers from snooping on private data. The dynamic data masking mechanism is a technology that protects privacy data by customizing masking policies. It can effectively prevent unauthorized users from accessing sensitive information while retaining original data. After the administrator specifies the object to be anonymized and customizes a data masking policy, if the database resources queried by a user are associated with a masking policy, data is anonymized based on the user identity and masking policy to restrict attackers' access to privacy data. + +## Benefits + +Data privacy protection is one of the required database security capabilities. It can restrict attackers' access to privacy data, ensuring privacy data security. The dynamic data masking mechanism can protect the privacy of specified database resources by configuring masking policies. In addition, the masking policy configuration is flexible and can implement targeted privacy protection in specific user scenarios. + +## Description + +The dynamic data masking mechanism customizes masking policies based on resource labels. It can select masking modes based on the site requirements or customize masking policies for specific users. The SQL syntax for creating a complete masking policy is as follows: + +```sql +CREATE RESOURCE LABEL label_for_creditcard ADD COLUMN(user1.table1.creditcard); +CREATE RESOURCE LABEL label_for_name ADD COLUMN(user1.table1.name); +CREATE MASKING POLICY msk_creditcard creditcardmasking ON LABEL(label_for_creditcard); +CREATE MASKING POLICY msk_name randommasking ON LABEL(label_for_name) FILTER ON IP(local), ROLES(dev); +``` + +**label_for_creditcard** and **msk_name** are the resource labels for masking, and each label is allocated to two column objects. **creditcardmasking** and **randommasking** are preset masking functions. **msk_creditcard** specifies that the masking policy **creditcardmasking** will be applied when any user accesses resources with **label_for_creditcard**, regardless of the access source. **msk_name** specifies that the masking policy **randommasking** will be applied when local user **dev** accesses resources with **label_for_name**. If **FILTER** is not specified, the setting takes effect for all users. Otherwise, the setting takes effect only for specified users. + +The following table shows the preset masking functions: + +| **Masking Function** | **Example** | +| -------------------- | ------------------------------------------------------------ | +| creditcardmasking | '4880-9898-4545-2525' will be anonymized as 'xxxx-xxxx-xxxx-2525'. This function anonymizes digits except the last four digits. | +| basicemailmasking | 'abcd@gmail.com' will be anonymized as 'xxxx@gmail.com'. This function anonymizes text before the first @. | +| fullemailmasking | 'abcd@gmail.com' will be anonymized as 'xxxx@xxxxx.com'. This function anonymizes text before the first dot (.) (except @). | +| alldigitsmasking | 'alex123alex' will be anonymized as 'alex000alex'. This function anonymizes only digits in the text. | +| shufflemasking | 'hello word' will be anonymized as 'hlwoeor dl'. This weak masking function is implemented through character dislocation. You are not advised to use this function to anonymize strings with strong semantics. | +| randommasking | 'hello word' will be anonymized as 'ad5f5ghdf5'. This function randomly anonymizes text by character. | +| regexpmasking | You need to enter four parameters in sequence. **reg** indicates the character string to be replaced, **replace_text** indicates the character string after replacement, **pos** indicates the position where the target character string starts to be replaced, and **reg_len** indicates the replacement length. Both **pos** and **reg_len** are of the integer type. **reg** and **replace_text** can be expressed by regular expressions. If **pos** is not specified, the default value is **0**. If **reg_len** is not specified, the default value is **–1**, indicating that all character strings after **pos** will be replaced. If the type of the input parameter is inconsistent with the expected parameter type, the maskall function is used for anonymization.
`CREATE MASKING POLICY msk_creditcard regexpmasking('[\d+]', 'x', 5, 9 ) ON LABEL(label_for_creditcard);` | +| maskall | '4880-9898-4545-2525' will be anonymized as 'xxxxxxxxxxxxxxxxxxx'. | + +The data types supported by each masking function are as follows: + +| **Masking Function** | **Supported Data Types** | +| -------------------- | ------------------------------------------------------------ | +| creditcardmasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (character data in credit card format only) | +| basicemailmasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (character data in email format only) | +| fullemailmasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (character data in email format only) | +| alldigitsmasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (character data containing digits only) | +| shufflemasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (text data only) | +| randommasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (text data only) | +| maskall | BOOL, RELTIME, TIME, TIMETZ, INTERVAL, TIMESTAMP, TIMESTAMPTZ, SMALLDATETIME, ABSTIME, TEXT, BPCHAR, VARCHAR, NVARCHAR2, NAME, INT8, INT4, INT2, INT1, NUMRIC, FLOAT4, FLOAT8, CASH | + +For unsupported data types, the **maskall** function is used for data masking by default. The data of the BOOL type is masked as **'0'**. The RELTIME type is masked as **'1970'**. The TIME, TIMETZ, and INTERVAL types are masked as **'00:00:00.0000+00'**. The TIMESTAMP, TIMESTAMPTZ, SMALLDATETIME, and ABSTIME types are masked as **'1970-01-01 00:00:00.0000'**. The TEXT, CHAR, BPCHAR, VARCHAR, NVARCHAR2, and NAME type are masked as **'x'**. The INT8, INT4, INT2, INT1, NUMERIC, FLOAT4, FLOAT8 types are masked as **'0'**. If the data type is not supported by **maskall**, the masking policy cannot be created. If implicit conversion is involved in the masking column, the data type after implicit conversion is used for masking. In addition, if the masking policy is applied to a data column and takes effect, operations on the data in the column are performed based on the masking result. + +Dynamic data masking applies to scenarios closely related to actual services. It provides users with proper masking query APIs and error handling logic based on service requirements to prevent raw data from being obtained through credential stuffing. + +## Enhancements + +None. + +## Constraints + +- The dynamic data masking policy must be created by a user with the **POLADMIN** or **SYSADMIN** attribute, or by the initial user. Common users do not have the permission to access the security policy system catalog and system view. + +- Dynamic data masking takes effect only on data tables for which masking policies are configured. Audit logs are not within the effective scope of the masking policies. + +- In a masking policy, only one masking mode can be specified for a resource label. + +- Multiple masking policies cannot be used to anonymize the same resource label, except when **FILTER** is used to specify user scenarios where the policies take effect and there is no intersection between user scenarios of different masking policies that contain the same resource label. In this case, you can identify the policy that a resource label is anonymized by based on the user scenario. + +- It is recommended that **APP** in **FILTER** be set to applications in the same trusted domain. Since a client may be forged, a security mechanism must be formed on the client when **APP** is used to reduce misuse risks. Generally, you are not advised to set **APP**. If it is set, pay attention to the risk of client spoofing. + +- For INSERT or MERGE INTO operations with the query clause, if the source table contains anonymized columns, the inserted or updated result in the preceding two operations is the anonymized value and cannot be restored. + +- When the built-in security policy is enabled, the ALTER TABLE EXCHANGE PARTITION statement fails to be executed if the source table is in the anonymized column. + +- If a dynamic data masking policy is configured for a table, grant the trigger permission of the table to other users with caution to prevent other users from using the trigger to bypass the masking policy. + +- A maximum of 98 dynamic data masking policies can be created. + +- Only the preceding seven preset masking policies can be used. + +- Only data with the resource labels containing the **COLUMN** attribute can be anonymized. + +- Only columns in base tables can be anonymized. + +- Only the data queried using **SELECT** can be anonymized. + +- Taking an IPv4 address as an example, the following formats are supported: + + | IP Address Format | Example | + | -------------------- | ------------------------ | + | Single IP address | 127.0.0.1 | + | IP address with mask | 127.0.0.1\|255.255.255.0 | + | CIDR IP address | 127.0.0.1⁄24 | + | IP address segment | 127.0.0.1-127.0.0.5 | + +## Dependencies + +None. + +## Related Pages + +[CREATE RESOURCE LABEL](../../reference-guide/sql-syntax/CREATE-RESOURCE-LABEL.md), [CREATE MASKING POLICY](../../reference-guide/sql-syntax/CREATE-MASKING-POLICY.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/database-security.md b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/database-security.md index cfae33c9..8eaf2905 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/database-security/database-security.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/database-security/database-security.md @@ -1,23 +1,23 @@ ---- -title: Database Security -summary: Database Security -author: Guo Huan -date: 2023-05-22 ---- - -# Database Security - -+ **[Access Control Model](1-access-control-model.md)** -+ **[Separation of Control and Access Permissions](2-separation-of-control-and-access-permissions.md)** -+ **[Database Encryption Authentication](3-database-encryption-authentication.md)** -+ **[Data Encryption and Storage](4-data-encryption-and-storage.md)** -+ **[Database Audit](5-database-audit.md)** -+ **[Network Communication Security](6-network-communication-security.md)** -+ **[Resource Label](7-resource-label.md)** -+ **[Unified Audit](8-unified-audit.md)** -+ **[Dynamic Data Masking](9-dynamic-data-anonymization.md)** -+ **[Row-Level Access Control](10-row-level-access-control.md)** -+ **[Password Strength Verification](11-password-strength-verification.md)** -+ **[Equality Query in a Fully-encrypted Database](12-equality-query-in-a-fully-encrypted-database.md)** -+ **[Ledger Database Mechanism](13-ledger-database-mechanism.md)** +--- +title: Database Security +summary: Database Security +author: Guo Huan +date: 2023-05-22 +--- + +# Database Security + ++ **[Access Control Model](1-access-control-model.md)** ++ **[Separation of Control and Access Permissions](2-separation-of-control-and-access-permissions.md)** ++ **[Database Encryption Authentication](3-database-encryption-authentication.md)** ++ **[Data Encryption and Storage](4-data-encryption-and-storage.md)** ++ **[Database Audit](5-database-audit.md)** ++ **[Network Communication Security](6-network-communication-security.md)** ++ **[Resource Label](7-resource-label.md)** ++ **[Unified Audit](8-unified-audit.md)** ++ **[Dynamic Data Masking](9-dynamic-data-anonymization.md)** ++ **[Row-Level Access Control](10-row-level-access-control.md)** ++ **[Password Strength Verification](11-password-strength-verification.md)** ++ **[Equality Query in a Fully-encrypted Database](12-equality-query-in-a-fully-encrypted-database.md)** ++ **[Ledger Database Mechanism](13-ledger-database-mechanism.md)** + **[Transparent Data Encryption](14-transparent-data-encryption.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/1-support-for-functions-and-stored-procedures.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/1-support-for-functions-and-stored-procedures.md index be2e8037..cab0122d 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/1-support-for-functions-and-stored-procedures.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/1-support-for-functions-and-stored-procedures.md @@ -1,42 +1,42 @@ ---- -title: Support for Functions and Stored Procedures -summary: Support for Functions and Stored Procedures -author: Guo Huan -date: 2022-05-07 ---- - -# Support for Functions and Stored Procedures - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Functions and stored procedures are important database objects. They encapsulate SQL statement sets used for certain functions so that the statements can be easily invoked. - -## Benefits - -1. Allows customers to modularize program design and encapsulate SQL statement sets, easy to invoke. -2. Caches the compilation results of stored procedures to accelerate SQL statement set execution. -3. Allows system administrators to restrict the permission for executing a specific stored procedure and controls access to the corresponding type of data. This prevents access from unauthorized users and ensures data security. - -## Description - -MogDB supports functions and stored procedures compliant with the SQL standard. The stored procedures are compatible with certain mainstream stored procedure syntax, improving their usability. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - +--- +title: Support for Functions and Stored Procedures +summary: Support for Functions and Stored Procedures +author: Guo Huan +date: 2022-05-07 +--- + +# Support for Functions and Stored Procedures + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +Functions and stored procedures are important database objects. They encapsulate SQL statement sets used for certain functions so that the statements can be easily invoked. + +## Benefits + +1. Allows customers to modularize program design and encapsulate SQL statement sets, easy to invoke. +2. Caches the compilation results of stored procedures to accelerate SQL statement set execution. +3. Allows system administrators to restrict the permission for executing a specific stored procedure and controls access to the corresponding type of data. This prevents access from unauthorized users and ensures data security. + +## Description + +MogDB supports functions and stored procedures compliant with the SQL standard. The stored procedures are compatible with certain mainstream stored procedure syntax, improving their usability. + +## Enhancements + +None + +## Constraints + +None + +## Dependencies + +None + +## Related Pages + [Functions and Operators](../../reference-guide/functions-and-operators/functions-and-operators.md), [Stored Procedure](../../developer-guide/1-1-stored-procedure.md),[Overview of PL/pgSQL Functions](../../developer-guide/plpgsql/1-1-plpgsql-overview.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/10-autonomous-transaction.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/10-autonomous-transaction.md index 85e3d950..06e7e628 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/10-autonomous-transaction.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/10-autonomous-transaction.md @@ -1,48 +1,48 @@ ---- -title: Autonomous Transaction -summary: Autonomous Transaction -author: Guo Huan -date: 2022-05-07 ---- - -# Autonomous Transaction - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -An autonomous transaction is a type of transaction in which the commit of a sub-transaction is not affected by the commit or rollback of the main transaction. - -## Benefits - -This feature meets diversified application scenarios. - -## Description - -In an autonomous transaction, a specified type of SQL statements are executed in an independent transaction context during the execution of the main transaction. The commit and rollback operations of an autonomous transaction are not affected by the commit and rollback operations of the main transaction. - -User-defined functions and stored procedures support autonomous transactions. - -A typical application scenario is as follows: A table is used to record the operation information during the main transaction execution. When the main transaction fails to be rolled back, the operation information recorded in the table cannot be rolled back. - -## Enhancements - -None - -## Constraints - -- A trigger function does not support autonomous transactions. -- In the autonomous transaction block of a function or stored procedure, static SQL statements do not support variable transfer. -- Autonomous transactions do not support nesting. -- A function containing an autonomous transaction does not support the return value of parameter transfer. -- A stored procedure or function that contains an autonomous transaction does not support exception handling. - -## Dependencies - -None - -## Related Pages - +--- +title: Autonomous Transaction +summary: Autonomous Transaction +author: Guo Huan +date: 2022-05-07 +--- + +# Autonomous Transaction + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +An autonomous transaction is a type of transaction in which the commit of a sub-transaction is not affected by the commit or rollback of the main transaction. + +## Benefits + +This feature meets diversified application scenarios. + +## Description + +In an autonomous transaction, a specified type of SQL statements are executed in an independent transaction context during the execution of the main transaction. The commit and rollback operations of an autonomous transaction are not affected by the commit and rollback operations of the main transaction. + +User-defined functions and stored procedures support autonomous transactions. + +A typical application scenario is as follows: A table is used to record the operation information during the main transaction execution. When the main transaction fails to be rolled back, the operation information recorded in the table cannot be rolled back. + +## Enhancements + +None + +## Constraints + +- A trigger function does not support autonomous transactions. +- In the autonomous transaction block of a function or stored procedure, static SQL statements do not support variable transfer. +- Autonomous transactions do not support nesting. +- A function containing an autonomous transaction does not support the return value of parameter transfer. +- A stored procedure or function that contains an autonomous transaction does not support exception handling. + +## Dependencies + +None + +## Related Pages + [Autonomous Transaction](../../developer-guide/autonomous-transaction/1-introduction-to-autonomous-transaction.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/11-global-temporary-table.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/11-global-temporary-table.md index 6e47db1b..5ad5931b 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/11-global-temporary-table.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/11-global-temporary-table.md @@ -1,49 +1,49 @@ ---- -title: Global Temporary Table -summary: Global Temporary Table -author: Guo Huan -date: 2022-05-07 ---- - -# Global Temporary Table - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -A temporary table does not guarantee persistency. Its life cycle is usually bound to a session or transaction, which can be used to store temporary data during processing and accelerate query. - -## Benefits - -This feature improves the expression capability and usability of temporary tables. - -## Description - -The metadata of the global temporary table is visible to all sessions. After the sessions end, the metadata still exists. The user data, indexes, and statistics of a session are isolated from those of another session. Each session can only view and modify the data submitted by itself. - -Global temporary tables have two schemas: ON COMMIT PRESERVE ROWS and ON COMMIT PRESERVE ROWS. In session-based ON COMMIT PRESERVE ROWS schema, user data is automatically cleared when a session ends. In transaction-based ON COMMIT DELETE ROWS schema, user data is automatically cleared when the commit or rollback operation is performed. If the **ON COMMIT** option is not specified during table creation, the session level is used by default. Different from local temporary tables, you can specify a schema that does not start with **pg_temp_** when creating a global temporary table. - -## Enhancements - -The processing of the global temporary table is added based on the local temporary table. - -## Constraints - -- Parallel scanning is not supported. -- Temp tablespace is not supported. -- Partitions are not supported. -- GIST indexes are not supported. -- The user-defined statistics **pg_statistic_ext** is not supported. -- ON COMMIT DROP is not supported. -- Hash bucket cluster storage is not supported. -- Row store is not supported. - -## Dependencies - -None - -## Related Pages - +--- +title: Global Temporary Table +summary: Global Temporary Table +author: Guo Huan +date: 2022-05-07 +--- + +# Global Temporary Table + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +A temporary table does not guarantee persistency. Its life cycle is usually bound to a session or transaction, which can be used to store temporary data during processing and accelerate query. + +## Benefits + +This feature improves the expression capability and usability of temporary tables. + +## Description + +The metadata of the global temporary table is visible to all sessions. After the sessions end, the metadata still exists. The user data, indexes, and statistics of a session are isolated from those of another session. Each session can only view and modify the data submitted by itself. + +Global temporary tables have two schemas: ON COMMIT PRESERVE ROWS and ON COMMIT PRESERVE ROWS. In session-based ON COMMIT PRESERVE ROWS schema, user data is automatically cleared when a session ends. In transaction-based ON COMMIT DELETE ROWS schema, user data is automatically cleared when the commit or rollback operation is performed. If the **ON COMMIT** option is not specified during table creation, the session level is used by default. Different from local temporary tables, you can specify a schema that does not start with **pg_temp_** when creating a global temporary table. + +## Enhancements + +The processing of the global temporary table is added based on the local temporary table. + +## Constraints + +- Parallel scanning is not supported. +- Temp tablespace is not supported. +- Partitions are not supported. +- GIST indexes are not supported. +- The user-defined statistics **pg_statistic_ext** is not supported. +- ON COMMIT DROP is not supported. +- Hash bucket cluster storage is not supported. +- Row store is not supported. + +## Dependencies + +None + +## Related Pages + [Global Temporary Table](../../reference-guide/guc-parameters/global-temporary-table.md), [Global Temporary Table Functions](../../reference-guide/functions-and-operators/global-temporary-table-functions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/12-pseudocolumn-rownum.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/12-pseudocolumn-rownum.md index 202de848..54c11da6 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/12-pseudocolumn-rownum.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/12-pseudocolumn-rownum.md @@ -1,47 +1,47 @@ ---- -title: Pseudocolumn ROWNUM -summary: Pseudocolumn ROWNUM -author: Guo Huan -date: 2022-05-07 ---- - -# Pseudocolumn ROWNUM - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -ROWNUM is a sequence number generated for each record in the query result. The sequence number starts from 1 and is unique. - -## Benefits - -- This feature is compatible with Oracle features, facilitating database migration. -- Similar to the LIMIT feature, this feature can filter out the first *n* records in the result set. - -## Description - -ROWNUM (pseudocolumn), which is used to label the records that meet conditions in the SQL query in sequence. In the query result, the value of **ROWNUM** in the first line is **1**, the value of **ROWNUM** in the second line is **2**, and so on. The value of **ROWNUM** in the _n_th line is *n*. This feature is used to filter the first *n* rows of data in the query result set, which is similar to the LIMIT function in MogDB. - -## Enhancements - -During internal execution, the optimizer rewrites ROWNUM into LIMIT to accelerate the execution speed. - -## Constraints - -- Do not use the pseudocolumn ROWNUM as an alias to avoid ambiguity in SQL statements. -- Do not use ROWNUM when creating an index. Bad example: **create index index_name on table(rownum);** -- Do not use ROWNUM as the default value when creating a table. Bad example: **create table table_name(id int default rownum);** -- Do not use ROWNUM as an alias in the WHERE clause. Bad example: **select rownum rn from table where rn < 5;** -- Do not use ROWNUM when inserting data. Bad example: **insert into table values (rownum,'blue')** -- Do not use ROWNUM in a table-less query. Bad example: **select \* from (values(rownum,1)), x(a,b);** -- If the HAVING clause contains ROWNUM (and is not in the aggregate function), the GROUP BY clause must contain ROWNUM (and is not in the aggregate function). - -## Dependencies - -None - -## Related Pages - +--- +title: Pseudocolumn ROWNUM +summary: Pseudocolumn ROWNUM +author: Guo Huan +date: 2022-05-07 +--- + +# Pseudocolumn ROWNUM + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +ROWNUM is a sequence number generated for each record in the query result. The sequence number starts from 1 and is unique. + +## Benefits + +- This feature is compatible with Oracle features, facilitating database migration. +- Similar to the LIMIT feature, this feature can filter out the first *n* records in the result set. + +## Description + +ROWNUM (pseudocolumn), which is used to label the records that meet conditions in the SQL query in sequence. In the query result, the value of **ROWNUM** in the first line is **1**, the value of **ROWNUM** in the second line is **2**, and so on. The value of **ROWNUM** in the _n_th line is *n*. This feature is used to filter the first *n* rows of data in the query result set, which is similar to the LIMIT function in MogDB. + +## Enhancements + +During internal execution, the optimizer rewrites ROWNUM into LIMIT to accelerate the execution speed. + +## Constraints + +- Do not use the pseudocolumn ROWNUM as an alias to avoid ambiguity in SQL statements. +- Do not use ROWNUM when creating an index. Bad example: **create index index_name on table(rownum);** +- Do not use ROWNUM as the default value when creating a table. Bad example: **create table table_name(id int default rownum);** +- Do not use ROWNUM as an alias in the WHERE clause. Bad example: **select rownum rn from table where rn < 5;** +- Do not use ROWNUM when inserting data. Bad example: **insert into table values (rownum,'blue')** +- Do not use ROWNUM in a table-less query. Bad example: **select \* from (values(rownum,1)), x(a,b);** +- If the HAVING clause contains ROWNUM (and is not in the aggregate function), the GROUP BY clause must contain ROWNUM (and is not in the aggregate function). + +## Dependencies + +None + +## Related Pages + [Simple Expressions](../../reference-guide/sql-reference/expressions/simple-expressions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/13-stored-procedure-debugging.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/13-stored-procedure-debugging.md index 2528c160..7ee1c260 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/13-stored-procedure-debugging.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/13-stored-procedure-debugging.md @@ -1,42 +1,42 @@ ---- -title: Stored Procedure Debugging -summary: Stored Procedure Debugging -author: Guo Huan -date: 2022-05-07 ---- - -# Stored Procedure Debugging - -## Availability - -This feature was introduced in MogDB 1.1.0. After the third-party library code directory structure was adjusted, this feature was temporarily deleted and is now available since MogDB 1.1.0. - -## Introduction - -This feature provides a group of APIs for debugging stored procedures, such as breakpoint debugging and variable printing. - -## Benefits - -This feature improves user experience in developing stored procedures based on MogDB. - -## Description - -Stored procedures are important database objects. They encapsulate SQL statement sets used for certain functions so that the statements can be easily invoked. A stored procedure usually contains many SQL statements and procedural execution structures, depending on the service scale. However, writing a large stored procedure is usually accompanied by logic bugs. It is difficult or even impossible to find the bugs by only executing the stored procedure. Therefore, a debugging tool is required. - -The stored procedure debugging tool provides a group of debugging APIs to enable the stored procedure to be executed step by step. During the execution, you can set breakpoints and print variables so that SQL developers can detect and correct errors in time and develop functions more efficiently and with high quality. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - +--- +title: Stored Procedure Debugging +summary: Stored Procedure Debugging +author: Guo Huan +date: 2022-05-07 +--- + +# Stored Procedure Debugging + +## Availability + +This feature was introduced in MogDB 1.1.0. After the third-party library code directory structure was adjusted, this feature was temporarily deleted and is now available since MogDB 1.1.0. + +## Introduction + +This feature provides a group of APIs for debugging stored procedures, such as breakpoint debugging and variable printing. + +## Benefits + +This feature improves user experience in developing stored procedures based on MogDB. + +## Description + +Stored procedures are important database objects. They encapsulate SQL statement sets used for certain functions so that the statements can be easily invoked. A stored procedure usually contains many SQL statements and procedural execution structures, depending on the service scale. However, writing a large stored procedure is usually accompanied by logic bugs. It is difficult or even impossible to find the bugs by only executing the stored procedure. Therefore, a debugging tool is required. + +The stored procedure debugging tool provides a group of debugging APIs to enable the stored procedure to be executed step by step. During the execution, you can set breakpoints and print variables so that SQL developers can detect and correct errors in time and develop functions more efficiently and with high quality. + +## Enhancements + +None + +## Constraints + +None + +## Dependencies + +None + +## Related Pages + [Debugging](../../developer-guide/plpgsql/1-13-debugging.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/14-jdbc-client-load-balancing-and-readwrite-isolation.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/14-jdbc-client-load-balancing-and-readwrite-isolation.md index f03912b7..e586adb6 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/14-jdbc-client-load-balancing-and-readwrite-isolation.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/14-jdbc-client-load-balancing-and-readwrite-isolation.md @@ -1,40 +1,40 @@ ---- -title: JDBC Client Load Balancing and Read/Write Isolation -summary: JDBC Client Load Balancing and Read/Write Isolation -author: Guo Huan -date: 2022-05-07 ---- - -# JDBC Client Load Balancing and Read/Write Isolation - -## Availability - -This feature is available since MogDB 2.1.0. - -## Introduction - -The JDBC client provides load balancing and read/write isolation capabilities. - -## Benefits - -Load balancing and read/write isolation can be configured on the JDBC client. - -## Description - -The IP addresses and port numbers of multiple nodes on the client are configured to adapt to HA switchover between multiple AZs and remote DR switchover. The connection-level read/write isolation configuration is supported. Preferentially connecting to read-only nodes is supported. Multiple read-only nodes are evenly distributed. - -## Enhancements - -None. - -## Constraints - -None. - -## Dependencies - -None. - -## Related Pages - +--- +title: JDBC Client Load Balancing and Read/Write Isolation +summary: JDBC Client Load Balancing and Read/Write Isolation +author: Guo Huan +date: 2022-05-07 +--- + +# JDBC Client Load Balancing and Read/Write Isolation + +## Availability + +This feature is available since MogDB 2.1.0. + +## Introduction + +The JDBC client provides load balancing and read/write isolation capabilities. + +## Benefits + +Load balancing and read/write isolation can be configured on the JDBC client. + +## Description + +The IP addresses and port numbers of multiple nodes on the client are configured to adapt to HA switchover between multiple AZs and remote DR switchover. The connection-level read/write isolation configuration is supported. Preferentially connecting to read-only nodes is supported. Multiple read-only nodes are evenly distributed. + +## Enhancements + +None. + +## Constraints + +None. + +## Dependencies + +None. + +## Related Pages + [Example: JDBC Primary And Backup Cluster Load Balancing](../../developer-guide/dev/2-development-based-on-jdbc/example-jdbc-primary-and-backup-cluster-load-balancing.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/15-in-place-update-storage-engine.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/15-in-place-update-storage-engine.md index bedd094e..7733ab7a 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/15-in-place-update-storage-engine.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/15-in-place-update-storage-engine.md @@ -1,40 +1,40 @@ ---- -title: In-place Update Storage Engine -summary: In-place Update Storage Engine -author: Guo Huan -date: 2022-05-07 ---- - -# In-place Update Storage Engine - -## Availability - -This feature is available since MogDB 2.1.0. - -## Introduction - -The in-place update storage engine is a new storage mode added to MogDB. The row storage engine used by the earlier versions of MogDB is in append update mode. The append update has good performance in addition, deletion, and HOT (Heap Only Tuple) update (that is, update on the same page) in the service. However, in a non-HOT UPDATE scenario across data pages, garbage collection is not efficient. The Ustore storage engine can solve this problem. - -## Benefits - -The in-place update storage engine can effectively reduce storage space occupation after tuples are updated for multiple times. - -## Description - -The in-place update storage engine solves the problems of space expansion and large tuples of the Append update storage engine. The design of efficient rollback segments is the basis of the in-place update storage engine. - -## Enhancements - -None. - -## Constraints - -None. - -## Dependencies - -None. - -## Related Pages - +--- +title: In-place Update Storage Engine +summary: In-place Update Storage Engine +author: Guo Huan +date: 2022-05-07 +--- + +# In-place Update Storage Engine + +## Availability + +This feature is available since MogDB 2.1.0. + +## Introduction + +The in-place update storage engine is a new storage mode added to MogDB. The row storage engine used by the earlier versions of MogDB is in append update mode. The append update has good performance in addition, deletion, and HOT (Heap Only Tuple) update (that is, update on the same page) in the service. However, in a non-HOT UPDATE scenario across data pages, garbage collection is not efficient. The Ustore storage engine can solve this problem. + +## Benefits + +The in-place update storage engine can effectively reduce storage space occupation after tuples are updated for multiple times. + +## Description + +The in-place update storage engine solves the problems of space expansion and large tuples of the Append update storage engine. The design of efficient rollback segments is the basis of the in-place update storage engine. + +## Enhancements + +None. + +## Constraints + +None. + +## Dependencies + +None. + +## Related Pages + [Configuring Ustore](../../performance-tuning/system-tuning/configuring-ustore.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/16-publication-subscription.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/16-publication-subscription.md index 882ff8cb..3c36b327 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/16-publication-subscription.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/16-publication-subscription.md @@ -1,56 +1,56 @@ ---- -title: Publication-Subscription -summary: Publication-Subscription -author: Guo Huan -date: 2022-05-10 ---- - -# Publication-Subscription - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Publication-subscription is implemented based on logical replication, with one or more subscribers subscribing to one or more publications on a publisher node. The subscriber pulls data from the publications they subscribe to. Data across database clusters can be synchronized in real time. - -## Benefits - -The typical application scenarios of publication-subscription are as follows: - -- Sending incremental changes in a database or a subset of a database to subscribers as they occur -- Firing triggers when changes reach subscribers -- Consolidating multiple databases into a single one (for example, for analysis purposes) - -## Description - -Changes on the publisher are sent to the subscriber as they occur in real time. The subscriber applies the published data in the same order as the publisher, so that transactional consistency is guaranteed for publications within a single subscription. This method of data replication is sometimes called transactional replication. - -The subscriber database behaves in the same way as any other MogDB instance and can be used as a publisher for other databases by defining its own publications. When the subscriber is treated as read-only by an application, there will be no conflicts in a single subscription. On the other side, conflicts may occur if other write operations are performed by the application or by other subscribers in the same set of tables. - -## Enhancements - -In version 3.1.0, this feature is enhanced as follows: - -- gs_probackup can be used to back up the logical replication slot of the publisher. In this way, the replication slot is not lost after the publisher uses gs_probackup to back up and restore data, ensuring that the publication-subscription connections are normal. - -- Publication-subscription can synchronize basic data. Before a publication-subscription relationship is created, data already exists in the table of the publisher. The basic data is synchronized to the subscriber after the subscription is created. - -## Constraints - -Publication-subscription is implemented based on logical replication and inherits all restrictions of logical replication. In addition, publication-subscription has the following additional restrictions or missing functions. - -- Database schemas and DDL commands are not replicated. Initial schemas can be manually copied by using **gs_dump --schema-only**. Subsequent schema changes need to be manually synchronized. -- Sequence data is not replicated. The data in serial or identifier columns backed by the sequence in the background will be replicated as part of the table, but the sequence itself will still display the start value on the subscriber. If the subscriber is used as a read-only database, this is usually not a problem. However, if some kind of switchover or failover to the subscriber database is intended, the sequence needs to be updated to the latest value, either by copying the current data from the publisher (perhaps using **gs_dump**) or by determining a sufficiently large value from the tables themselves. -- Only tables, including partitioned tables, can be replicated. Attempts to replicate other types of relations, such as views, materialized views, or foreign tables, will result in errors. -- Multiple subscriptions in the same database cannot subscribe to the same publication (that is, the same published table). Otherwise, duplicate data or primary key conflicts may occur. -- If a published table contains data types that do not support B-tree or hash indexes (such as the geography types), the table must have a primary key so that UPDATE and DELETE operations can be successfully replicated to the subscription side. Otherwise, the replication will fail, and the message “FATAL: could not identify an equality operator for type xx” will be displayed on the subscription side. - -## Dependencies - -Publication-subscription depends on the logical replication function. - -## Related Pages - +--- +title: Publication-Subscription +summary: Publication-Subscription +author: Guo Huan +date: 2022-05-10 +--- + +# Publication-Subscription + +## Availability + +This feature is available since MogDB 3.0.0. + +## Introduction + +Publication-subscription is implemented based on logical replication, with one or more subscribers subscribing to one or more publications on a publisher node. The subscriber pulls data from the publications they subscribe to. Data across database clusters can be synchronized in real time. + +## Benefits + +The typical application scenarios of publication-subscription are as follows: + +- Sending incremental changes in a database or a subset of a database to subscribers as they occur +- Firing triggers when changes reach subscribers +- Consolidating multiple databases into a single one (for example, for analysis purposes) + +## Description + +Changes on the publisher are sent to the subscriber as they occur in real time. The subscriber applies the published data in the same order as the publisher, so that transactional consistency is guaranteed for publications within a single subscription. This method of data replication is sometimes called transactional replication. + +The subscriber database behaves in the same way as any other MogDB instance and can be used as a publisher for other databases by defining its own publications. When the subscriber is treated as read-only by an application, there will be no conflicts in a single subscription. On the other side, conflicts may occur if other write operations are performed by the application or by other subscribers in the same set of tables. + +## Enhancements + +In version 3.1.0, this feature is enhanced as follows: + +- gs_probackup can be used to back up the logical replication slot of the publisher. In this way, the replication slot is not lost after the publisher uses gs_probackup to back up and restore data, ensuring that the publication-subscription connections are normal. + +- Publication-subscription can synchronize basic data. Before a publication-subscription relationship is created, data already exists in the table of the publisher. The basic data is synchronized to the subscriber after the subscription is created. + +## Constraints + +Publication-subscription is implemented based on logical replication and inherits all restrictions of logical replication. In addition, publication-subscription has the following additional restrictions or missing functions. + +- Database schemas and DDL commands are not replicated. Initial schemas can be manually copied by using **gs_dump --schema-only**. Subsequent schema changes need to be manually synchronized. +- Sequence data is not replicated. The data in serial or identifier columns backed by the sequence in the background will be replicated as part of the table, but the sequence itself will still display the start value on the subscriber. If the subscriber is used as a read-only database, this is usually not a problem. However, if some kind of switchover or failover to the subscriber database is intended, the sequence needs to be updated to the latest value, either by copying the current data from the publisher (perhaps using **gs_dump**) or by determining a sufficiently large value from the tables themselves. +- Only tables, including partitioned tables, can be replicated. Attempts to replicate other types of relations, such as views, materialized views, or foreign tables, will result in errors. +- Multiple subscriptions in the same database cannot subscribe to the same publication (that is, the same published table). Otherwise, duplicate data or primary key conflicts may occur. +- If a published table contains data types that do not support B-tree or hash indexes (such as the geography types), the table must have a primary key so that UPDATE and DELETE operations can be successfully replicated to the subscription side. Otherwise, the replication will fail, and the message “FATAL: could not identify an equality operator for type xx” will be displayed on the subscription side. + +## Dependencies + +Publication-subscription depends on the logical replication function. + +## Related Pages + [CREATE PUBLICATION](../../reference-guide/sql-syntax/CREATE-PUBLICATION.md), [CREATE SUBSCRIPTION](../../reference-guide/sql-syntax/CREATE-SUBSCRIPTION.md), [Publication-Subscription](../../developer-guide/logical-replication/publication-subscription/publication-subscription.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/18-data-compression-in-oltp-scenarios.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/18-data-compression-in-oltp-scenarios.md index 8acd62e0..83cfa88a 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/18-data-compression-in-oltp-scenarios.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/18-data-compression-in-oltp-scenarios.md @@ -1,41 +1,41 @@ ---- -title: Data Compression in OLTP Scenarios -summary: Data Compression in OLTP Scenarios -author: Guo Huan -date: 2022-05-10 ---- - -# Data Compression in OLTP Scenarios - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -The feature supports row-store data compression in OLTP scenarios, provides a general compression algorithm, and implements transparent compression of data pages and maintenance of page storage locations to achieve high compression and high performance. Disk persistence is implemented using two types of files: compressed address file (with the file name extension .pca) and compressed data file (with the file name extension .pcd). - -## Benefits - -Typically, it is applicable where the database disk space needs to be reduced. - -## Description - -Data compression in OLTP scenarios can reduce the disk storage space of row tables and index data and improve performance in I/O-intensive database systems. - -## Constraints - -- Only heap-organized data table compression is supported, i.e., normal row-storage table, Btree index compression. -- The operating system must support punch hole operations. -- The data backup media must support punch hole operation. -- Do not support the modification of compression-related parameters. Do not support the conversion of uncompressed tables into compressed tables. -- Compression and decompression operations will have a certain impact on CPU and performance. The advantage is that it increases the storage capacity of the disk, improves disk utilization, and saves disk IO and reduces disk IO pressure. - -## Dependencies - -- Requires the database to support double write operations. -- Compression using open source compression algorithms PGLZ, ZSTD. - -## Related Pages - +--- +title: Data Compression in OLTP Scenarios +summary: Data Compression in OLTP Scenarios +author: Guo Huan +date: 2022-05-10 +--- + +# Data Compression in OLTP Scenarios + +## Availability + +This feature is available since MogDB 3.0.0. + +## Introduction + +The feature supports row-store data compression in OLTP scenarios, provides a general compression algorithm, and implements transparent compression of data pages and maintenance of page storage locations to achieve high compression and high performance. Disk persistence is implemented using two types of files: compressed address file (with the file name extension .pca) and compressed data file (with the file name extension .pcd). + +## Benefits + +Typically, it is applicable where the database disk space needs to be reduced. + +## Description + +Data compression in OLTP scenarios can reduce the disk storage space of row tables and index data and improve performance in I/O-intensive database systems. + +## Constraints + +- Only heap-organized data table compression is supported, i.e., normal row-storage table, Btree index compression. +- The operating system must support punch hole operations. +- The data backup media must support punch hole operation. +- Do not support the modification of compression-related parameters. Do not support the conversion of uncompressed tables into compressed tables. +- Compression and decompression operations will have a certain impact on CPU and performance. The advantage is that it increases the storage capacity of the disk, improves disk utilization, and saves disk IO and reduces disk IO pressure. + +## Dependencies + +- Requires the database to support double write operations. +- Compression using open source compression algorithms PGLZ, ZSTD. + +## Related Pages + [Parameters Related to Efficient Data Compression Algorithms](../../reference-guide/guc-parameters/parameters-related-to-efficient-data-compression-algorithms.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/2-sql-hints.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/2-sql-hints.md index 603b081e..a5036048 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/2-sql-hints.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/2-sql-hints.md @@ -1,44 +1,44 @@ ---- -title: SQL Hints -summary: SQL Hints -author: Guo Huan -date: 2022-05-07 ---- - -# SQL Hints - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -SQL hints can be used to override execution plans. - -## Benefits - -Improves SQL query performance. - -## Description - -In plan hints, you can specify a join order; join, stream, and scan operations, the number of rows in a result, and redistribution skew information to tune an execution plan, improving query performance. - -## Enhancements - -Support planhint to set session-level optimizer parameters. - -Support specifying subqueries not to be expanded. - -Support disabling gpc for single query. - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - +--- +title: SQL Hints +summary: SQL Hints +author: Guo Huan +date: 2022-05-07 +--- + +# SQL Hints + +## Availability + +This feature is available as of MogDB 1.1.0. + +## Introduction + +SQL hints can be used to override execution plans. + +## Benefits + +Improves SQL query performance. + +## Description + +In plan hints, you can specify a join order; join, stream, and scan operations, the number of rows in a result, and redistribution skew information to tune an execution plan, improving query performance. + +## Enhancements + +Support planhint to set session-level optimizer parameters. + +Support specifying subqueries not to be expanded. + +Support disabling gpc for single query. + +## Constraints + +None + +## Dependencies + +None + +## Related Pages + [Hint Based Tuning](../../performance-tuning/sql-tuning/hint-based-tuning.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/3-full-text-indexing.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/3-full-text-indexing.md index a47dfd9a..4168e4f7 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/3-full-text-indexing.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/3-full-text-indexing.md @@ -1,57 +1,57 @@ ---- -title: Full-Text Indexing -summary: Full-Text Indexing -author: Guo Huan -date: 2022-05-07 ---- - -# Full-Text Indexing - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -MogDB full-text indexing allows documents to be preprocessed and facilitates subsequent search. - -## Benefits - -MogDB full-text indexing provides the capability to identify natural-language documents that satisfy a query and sort them by relevance. - -## Description - -The preprocessing process of creating a full-text index includes: - -- Parsing documents into tokens - - It is useful to identify various classes of tokens, for example, numbers, words, compound words, and email addresses, so that they can be processed differently. In principle, token classes depend on the specific application, but for most purposes it is adequate to use a predefined set of classes. - -- Converting tokens into lexemes - - A lexeme is a string, just like a token, but it has been normalized so that different forms of the same word are made alike. For example, normalization almost always includes folding upper-case letters to lower-case, and often involves removal of suffixes (such as **s** or **es** in English). This allows searches to find variant forms of the same word, without entering all the possible variants. Also, this step typically eliminates stop words, which are so common and usually useless for searching. (In short, tokens are raw fragments of the document text, while lexemes are words that are believed useful for indexing and searching.) MogDB uses dictionaries to perform this step and provides various standard dictionaries. - -- Storing preprocessed documents optimized for searching - - For example, each document can be represented as a sorted array of normalized lexemes. Along with the lexemes, it is often desirable to store positional information for proximity ranking. Therefore, a document that contains a more “dense” area of query words is assigned with a higher rank than the one with scattered query words. Dictionaries allow fine-grained control over how tokens are normalized. With appropriate dictionaries, you can define stop words that should not be indexed. - -## Enhancements - -None - -## Constraints - -The current limitations of MogDB's text search features are: - -- The length of each lexeme must be less than 2 KB. -- The length of a **tsvector** (lexemes + positions) must be less than 1 MB. -- Position values in **tsvector** must be greater than 0 and less than or equal to 16383. -- No more than 256 positions per lexeme. Excessive positions, if any, will be discarded. - -## Dependencies - -None - -## Related Pages - +--- +title: Full-Text Indexing +summary: Full-Text Indexing +author: Guo Huan +date: 2022-05-07 +--- + +# Full-Text Indexing + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +MogDB full-text indexing allows documents to be preprocessed and facilitates subsequent search. + +## Benefits + +MogDB full-text indexing provides the capability to identify natural-language documents that satisfy a query and sort them by relevance. + +## Description + +The preprocessing process of creating a full-text index includes: + +- Parsing documents into tokens + + It is useful to identify various classes of tokens, for example, numbers, words, compound words, and email addresses, so that they can be processed differently. In principle, token classes depend on the specific application, but for most purposes it is adequate to use a predefined set of classes. + +- Converting tokens into lexemes + + A lexeme is a string, just like a token, but it has been normalized so that different forms of the same word are made alike. For example, normalization almost always includes folding upper-case letters to lower-case, and often involves removal of suffixes (such as **s** or **es** in English). This allows searches to find variant forms of the same word, without entering all the possible variants. Also, this step typically eliminates stop words, which are so common and usually useless for searching. (In short, tokens are raw fragments of the document text, while lexemes are words that are believed useful for indexing and searching.) MogDB uses dictionaries to perform this step and provides various standard dictionaries. + +- Storing preprocessed documents optimized for searching + + For example, each document can be represented as a sorted array of normalized lexemes. Along with the lexemes, it is often desirable to store positional information for proximity ranking. Therefore, a document that contains a more “dense” area of query words is assigned with a higher rank than the one with scattered query words. Dictionaries allow fine-grained control over how tokens are normalized. With appropriate dictionaries, you can define stop words that should not be indexed. + +## Enhancements + +None + +## Constraints + +The current limitations of MogDB's text search features are: + +- The length of each lexeme must be less than 2 KB. +- The length of a **tsvector** (lexemes + positions) must be less than 1 MB. +- Position values in **tsvector** must be greater than 0 and less than or equal to 16383. +- No more than 256 positions per lexeme. Excessive positions, if any, will be discarded. + +## Dependencies + +None + +## Related Pages + [Full Text Search](../../reference-guide/sql-reference/full-text-search/full-text-search.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/4-copy-interface-for-error-tolerance.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/4-copy-interface-for-error-tolerance.md index c2aa43cd..0dc37de4 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/4-copy-interface-for-error-tolerance.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/4-copy-interface-for-error-tolerance.md @@ -1,40 +1,40 @@ ---- -title: Copy Interface for Error Tolerance -summary: Copy Interface for Error Tolerance -author: Guo Huan -date: 2022-05-07 ---- - -# Copy Interface for Error Tolerance - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Certain errors that occur during the copy process are imported to a specified error table without interrupting the process. - -## Benefits - -Refine the copy function and improve the tolerance and robustness to common errors such as invalid formats. - -## Description - -MogDB provides the encapsulated copy error tables for creating functions and allows users to specify error tolerance options when using the **Copy From** statement. In this way, errors related to parsing, data format, and character set during the execution of the **Copy From** statement are recorded in the error table instead of being reported and interrupted. Even if a small amount of data in the target file of **Copy From** is incorrect, the data can be imported to the database. You can locate and rectify the fault in the error table later. - -## Enhancements - -None - -## Constraints - -For details, see “Importing Data > Running the COPY FROM STDIN Statement to Import Data > [Handling Import Errors](../../administrator-guide/importing-and-exporting-data/importing-data/3-running-the-COPY-FROM-STDIN-statement-to-import-data.md#handling-import-errors)” in the *Administrator Guide*. - -## Dependencies - -None - -## Related Pages - +--- +title: Copy Interface for Error Tolerance +summary: Copy Interface for Error Tolerance +author: Guo Huan +date: 2022-05-07 +--- + +# Copy Interface for Error Tolerance + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +Certain errors that occur during the copy process are imported to a specified error table without interrupting the process. + +## Benefits + +Refine the copy function and improve the tolerance and robustness to common errors such as invalid formats. + +## Description + +MogDB provides the encapsulated copy error tables for creating functions and allows users to specify error tolerance options when using the **Copy From** statement. In this way, errors related to parsing, data format, and character set during the execution of the **Copy From** statement are recorded in the error table instead of being reported and interrupted. Even if a small amount of data in the target file of **Copy From** is incorrect, the data can be imported to the database. You can locate and rectify the fault in the error table later. + +## Enhancements + +None + +## Constraints + +For details, see “Importing Data > Running the COPY FROM STDIN Statement to Import Data > [Handling Import Errors](../../administrator-guide/importing-and-exporting-data/importing-data/3-running-the-COPY-FROM-STDIN-statement-to-import-data.md#handling-import-errors)” in the *Administrator Guide*. + +## Dependencies + +None + +## Related Pages + [Handling Import Errors](../../administrator-guide/importing-and-exporting-data/importing-data/3-running-the-COPY-FROM-STDIN-statement-to-import-data.md#handling-import-errors) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/6-support-for-advanced-analysis-functions.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/6-support-for-advanced-analysis-functions.md index 7cba3baf..8a56fee7 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/6-support-for-advanced-analysis-functions.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/6-support-for-advanced-analysis-functions.md @@ -1,61 +1,61 @@ ---- -title: Support for Advanced Analysis Functions -summary: Support for Advanced Analysis Functions -author: Guo Huan -date: 2022-05-07 ---- - -# Support for Advanced Analysis Functions - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -None - -## Benefits - -Window functions are provided for advanced data analysis and processing. The window function groups the data in a table in advance. Each row belongs to a specific group. Then, a series of association analysis calculations are performed on the group. In this way, some attributes of each tuple in the set and association information with other tuples can be mined. - -## Description - -The following uses an example to describe the window analysis function: Compare the salary of each person in a department with the average salary of the department. - -```sql -SELECT depname, empno, salary, avg(salary) OVER (PARTITION BY depname) FROM empsalary; -depname | empno | salary | avg ------------+-------+--------+----------------------- -develop | 11 | 5200 | 5020.0000000000000000 -develop | 7 | 4200 | 5020.0000000000000000 -develop | 9 | 4500 | 5020.0000000000000000 -develop | 8 | 6000 | 5020.0000000000000000 -develop | 10 | 5200 | 5020.0000000000000000 -personnel | 5 | 3500 | 3700.0000000000000000 -personnel | 2 | 3900 | 3700.0000000000000000 -sales | 3 | 4800 | 4866.6666666666666667 -sales | 1 | 5000 | 4866.6666666666666667 -sales | 4 | 4800 | 4866.6666666666666667 -(10 rows) -``` - -The analysis function **avg(salary) OVER (PARTITION BY depname)** easily calculates each employee's salary and the average salary of the department. - -Currently, the system supports the following analysis functions: **row_number()**, **rank()**, **dense_rank()**, **percent_rank()**, **cume_dist()**, **ntile()**, **lag()**, **lead()**, **first_value()**, **last_value()**, and **nth_value()**. For details about functions and statements, see “Functions and Operators > [Window Functions](../../reference-guide/functions-and-operators/window-functions.md)” in the *Reference Guide*. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - +--- +title: Support for Advanced Analysis Functions +summary: Support for Advanced Analysis Functions +author: Guo Huan +date: 2022-05-07 +--- + +# Support for Advanced Analysis Functions + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +None + +## Benefits + +Window functions are provided for advanced data analysis and processing. The window function groups the data in a table in advance. Each row belongs to a specific group. Then, a series of association analysis calculations are performed on the group. In this way, some attributes of each tuple in the set and association information with other tuples can be mined. + +## Description + +The following uses an example to describe the window analysis function: Compare the salary of each person in a department with the average salary of the department. + +```sql +SELECT depname, empno, salary, avg(salary) OVER (PARTITION BY depname) FROM empsalary; +depname | empno | salary | avg +-----------+-------+--------+----------------------- +develop | 11 | 5200 | 5020.0000000000000000 +develop | 7 | 4200 | 5020.0000000000000000 +develop | 9 | 4500 | 5020.0000000000000000 +develop | 8 | 6000 | 5020.0000000000000000 +develop | 10 | 5200 | 5020.0000000000000000 +personnel | 5 | 3500 | 3700.0000000000000000 +personnel | 2 | 3900 | 3700.0000000000000000 +sales | 3 | 4800 | 4866.6666666666666667 +sales | 1 | 5000 | 4866.6666666666666667 +sales | 4 | 4800 | 4866.6666666666666667 +(10 rows) +``` + +The analysis function **avg(salary) OVER (PARTITION BY depname)** easily calculates each employee's salary and the average salary of the department. + +Currently, the system supports the following analysis functions: **row_number()**, **rank()**, **dense_rank()**, **percent_rank()**, **cume_dist()**, **ntile()**, **lag()**, **lead()**, **first_value()**, **last_value()**, and **nth_value()**. For details about functions and statements, see “Functions and Operators > [Window Functions](../../reference-guide/functions-and-operators/window-functions.md)” in the *Reference Guide*. + +## Enhancements + +None + +## Constraints + +None + +## Dependencies + +None + +## Related Pages + [Window Functions(Analysis Functions)](../../reference-guide/functions-and-operators/window-functions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/7-materialized-view.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/7-materialized-view.md index ea7a56d5..e6b36d25 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/7-materialized-view.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/7-materialized-view.md @@ -1,40 +1,40 @@ ---- -title: Materialized View -summary: Materialized View -author: Guo Huan -date: 2022-05-07 ---- - -# Materialized View - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -A materialized view is a special physical table, which is relative to a common view. A common view is a virtual table and has many application limitations. Any query on a view is actually converted into a query on an SQL statement, and performance is not actually improved. The materialized view actually stores the results of the statements executed by the SQL statement, and is used to cache the results. - -## Benefits - -The materialized view function is used to improve query efficiency. - -## Description - -Full materialized views and incremental materialized views are supported. Full materialized views can only be updated in full mode. Incremental materialized views can be updated asynchronously. You can run statements to update new data to materialized views. - -## Enhancements - -None - -## Constraints - -Only simple filter queries and UNION ALL statements are supported for base tables. - -## Dependencies - -None - -## Related Pages - +--- +title: Materialized View +summary: Materialized View +author: Guo Huan +date: 2022-05-07 +--- + +# Materialized View + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +A materialized view is a special physical table, which is relative to a common view. A common view is a virtual table and has many application limitations. Any query on a view is actually converted into a query on an SQL statement, and performance is not actually improved. The materialized view actually stores the results of the statements executed by the SQL statement, and is used to cache the results. + +## Benefits + +The materialized view function is used to improve query efficiency. + +## Description + +Full materialized views and incremental materialized views are supported. Full materialized views can only be updated in full mode. Incremental materialized views can be updated asynchronously. You can run statements to update new data to materialized views. + +## Enhancements + +None + +## Constraints + +Only simple filter queries and UNION ALL statements are supported for base tables. + +## Dependencies + +None + +## Related Pages + [Materialized View](../../developer-guide/materialized-view/1-materialized-view-overview.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/9-creating-an-index-online.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/9-creating-an-index-online.md index f5882fd6..b070763c 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/9-creating-an-index-online.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/9-creating-an-index-online.md @@ -1,45 +1,45 @@ ---- -title: Creating an Index Online -summary: Creating an Index Online -author: Guo Huan -date: 2022-05-07 ---- - -# Creating an Index Online - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Uses the CREATE INDEX CONCURRENTLY syntax to create indexes online without blocking DML. - -## Benefits - -When creating an index, you can specify the CONCURRENTLY keyword to ensure that the DML and online services are not blocked during the index creation. - -## Description - -A normal CREATE INDEX acquires exclusive lock on the table on which the index depends, blocking other accesses until the index drop can be completed. If the CONCURRENTLY keyword is specified, the ShareUpdateExclusiveLock lock is added to the table so that DML is not blocked during the creation. - -This keyword is specified when an index is created online. The entire table needs to be scanned twice and built. When the table is scanned for the first time, an index is created and the read and write operations are not blocked. During the second scan, changes that have occurred since the first scan are merged and updated. The table needs to be scanned and built twice, and all existing transactions that may modify the table must be completed. This means that the creation of the index takes a longer time than normal. In addition, the CPU and I/O consumption also affects other services. - -## Enhancements - -None - -## Constraints - -- Only one index name can be specified when an index is created online. -- The CREATE INDEX statement can be run within a transaction, but CREATE INDEX CONCURRENTLY cannot. -- Column-store tables and temporary tables do not support **CREATE INDEX CONCURRENTLY**. -- Partitioned tables support **CREATE GLOBAL INDEX CONCURRENTLY**, but do not support **CREATE LOCAL INDEX CONCURRENTLY**. - -## Dependencies - -None - -## Related Pages - +--- +title: Creating an Index Online +summary: Creating an Index Online +author: Guo Huan +date: 2022-05-07 +--- + +# Creating an Index Online + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +Uses the CREATE INDEX CONCURRENTLY syntax to create indexes online without blocking DML. + +## Benefits + +When creating an index, you can specify the CONCURRENTLY keyword to ensure that the DML and online services are not blocked during the index creation. + +## Description + +A normal CREATE INDEX acquires exclusive lock on the table on which the index depends, blocking other accesses until the index drop can be completed. If the CONCURRENTLY keyword is specified, the ShareUpdateExclusiveLock lock is added to the table so that DML is not blocked during the creation. + +This keyword is specified when an index is created online. The entire table needs to be scanned twice and built. When the table is scanned for the first time, an index is created and the read and write operations are not blocked. During the second scan, changes that have occurred since the first scan are merged and updated. The table needs to be scanned and built twice, and all existing transactions that may modify the table must be completed. This means that the creation of the index takes a longer time than normal. In addition, the CPU and I/O consumption also affects other services. + +## Enhancements + +None + +## Constraints + +- Only one index name can be specified when an index is created online. +- The CREATE INDEX statement can be run within a transaction, but CREATE INDEX CONCURRENTLY cannot. +- Column-store tables and temporary tables do not support **CREATE INDEX CONCURRENTLY**. +- Partitioned tables support **CREATE GLOBAL INDEX CONCURRENTLY**, but do not support **CREATE LOCAL INDEX CONCURRENTLY**. + +## Dependencies + +None + +## Related Pages + [CREATE INDEX](../../reference-guide/sql-syntax/CREATE-INDEX.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/enterprise-level-features.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/enterprise-level-features.md index 3fa30a1f..f245cdc8 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/enterprise-level-features.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/enterprise-level-features.md @@ -1,35 +1,40 @@ ---- -title: Enterprise-Level Features -summary: Enterprise-Level Features -author: Guo Huan -date: 2023-05-22 ---- - -# Enterprise-Level Features - -+ **[Support for Functions and Stored Procedures](1-support-for-functions-and-stored-procedures.md)** -+ **[SQL Hints](2-sql-hints.md)** -+ **[Full-Text Indexing](3-full-text-indexing.md)** -+ **[Copy Interface for Error Tolerance](4-copy-interface-for-error-tolerance.md)** -+ **[Partitioning](5-partitioning.md)** -+ **[Support for Advanced Analysis Functions](6-support-for-advanced-analysis-functions.md)** -+ **[Materialized View](7-materialized-view.md)** -+ **[HyperLogLog](8-hyperloglog.md)** -+ **[Creating an Index Online](9-creating-an-index-online.md)** -+ **[Autonomous Transaction](10-autonomous-transaction.md)** -+ **[Global Temporary Table](11-global-temporary-table.md)** -+ **[Pseudocolumn ROWNUM](12-pseudocolumn-rownum.md)** -+ **[Stored Procedure Debugging](13-stored-procedure-debugging.md)** -+ **[JDBC Client Load Balancing and Read/Write Isolation](14-jdbc-client-load-balancing-and-readwrite-isolation.md)** -+ **[In-place Update Storage Engine](15-in-place-update-storage-engine.md)** -+ **[Publication-Subscription](16-publication-subscription.md)** -+ **[Foreign Key Lock Enhancement](17-foreign-key-lock-enhancement.md)** -+ **[Data Compression in OLTP Scenarios](18-data-compression-in-oltp-scenarios.md)** -+ **[Transaction Asynchronous Submit](19-transaction-async-submit.md)** -+ **[Index Creation Parallel Control](23-index-creation-parallel-control.md)** -+ **[Dynamic Partition Pruning](21-dynamic-partition-pruning.md)** -+ **[COPY Import Optimization](20-copy-import-optimization.md)** -+ **[SQL Running Status Observation](22-sql-running-status-observation.md)** -+ **[BRIN Index](24-brin-index.md)** -+ **[BLOOM Index](25-bloom-index.md)** -+ **[Event Trigger](event-trigger.md)** \ No newline at end of file +--- +title: Enterprise-Level Features +summary: Enterprise-Level Features +author: Guo Huan +date: 2023-05-22 +--- + +# Enterprise-Level Features + ++ **[Support for Functions and Stored Procedures](1-support-for-functions-and-stored-procedures.md)** ++ **[SQL Hints](2-sql-hints.md)** ++ **[Full-Text Indexing](3-full-text-indexing.md)** ++ **[Copy Interface for Error Tolerance](4-copy-interface-for-error-tolerance.md)** ++ **[Partitioning](5-partitioning.md)** ++ **[Support for Advanced Analysis Functions](6-support-for-advanced-analysis-functions.md)** ++ **[Materialized View](7-materialized-view.md)** ++ **[HyperLogLog](8-hyperloglog.md)** ++ **[Creating an Index Online](9-creating-an-index-online.md)** ++ **[Autonomous Transaction](10-autonomous-transaction.md)** ++ **[Global Temporary Table](11-global-temporary-table.md)** ++ **[Pseudocolumn ROWNUM](12-pseudocolumn-rownum.md)** ++ **[Stored Procedure Debugging](13-stored-procedure-debugging.md)** ++ **[JDBC Client Load Balancing and Read/Write Isolation](14-jdbc-client-load-balancing-and-readwrite-isolation.md)** ++ **[In-place Update Storage Engine](15-in-place-update-storage-engine.md)** ++ **[Publication-Subscription](16-publication-subscription.md)** ++ **[Foreign Key Lock Enhancement](17-foreign-key-lock-enhancement.md)** ++ **[Data Compression in OLTP Scenarios](18-data-compression-in-oltp-scenarios.md)** ++ **[Transaction Asynchronous Submit](19-transaction-async-submit.md)** ++ **[Index Creation Parallel Control](23-index-creation-parallel-control.md)** ++ **[Dynamic Partition Pruning](21-dynamic-partition-pruning.md)** ++ **[COPY Import Optimization](20-copy-import-optimization.md)** ++ **[SQL Running Status Observation](22-sql-running-status-observation.md)** ++ **[BRIN Index](24-brin-index.md)** ++ **[BLOOM Index](25-bloom-index.md)** ++ **[Event Trigger](event-trigger.md)** ++ **[Scrollable Cursor Support for Reverse Retrieval](scroll-cursor.md)** ++ **[Support for Pruning Subquery Projection Columns](support-for-pruning-subquery-projection-columns.md)** ++ **[Pruning ORDER BY in Subqueries](pruning-order-by-in-subqueries.md)** ++ **[Automatic Creation of Indexes Supporting Fuzzy Matching](index-support-fuzzy-matching.md)** ++ **[Support for Importing and Exporting Specific Objects](./import-export-specific-objects.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/import-export-specific-objects.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/import-export-specific-objects.md new file mode 100644 index 00000000..6b8bcdd5 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/import-export-specific-objects.md @@ -0,0 +1,167 @@ +--- +title: Support for Importing and Exporting Specific Objects +summary: Support for Importing and Exporting Specific Objects +author: Guo Huan 周帅康 +date: 2024-07-03 +--- + +# Support for Importing and Exporting Specific Objects + +## Availability + +This feature is available as of MogDB 5.0.8. + +## Introduction + +This feature supports the logical backup tool (gs_dump) and the restore tool (gs_restore) to specify the import and export of packages, functions, procedures, triggers, and types. + +## Benefits + +Enhances the capabilities of logical backup and restore tools, improving the usability of MogDB. + +## Description + +### gs_dump supports the export of specified basic objects + +New command-line parameters have been added to specify the backup and export of specific objects, enabling the export functionality for packages, functions, procedures, triggers, and types. + +- You can specify one or more of these five basic objects; +- Multiple parameter names can be specified for the same basic object (e.g., --trigger name1 --trigger name2); +- Only the specified objects are exported, regardless of the object's dependencies; +- For triggers, both the definition and the associated functions that trigger the action must be exported; +- The exported backup files can be imported using the gs_restore tool; +- It does not affect the settings of existing parameters. + +**Usage Instructions** + +```shell +-- Specify exporting a trigger +gs_dump -f backup_dir/filename -F p --trigger trigger_name + +-- Specify exporting a function +gs_dump -f backup_dir/filename -F p --function function_name(args) + +-- Specify exporting a type +gs_dump -f backup_dir/filename -F p --type type_name + +-- Specify exporting a package +gs_dump -f backup_dir/filename -F p --package package_name + +-- Specify exporting a procedure +gs_dump -f backup_dir/filename -F p --procedure procedure_name(args) +``` + +- --trigger trigger_name + + Specify the trigger for export + +- --function function_name(args) + + Specify the function for export + +- --type type_name + + Specify the type for export + +- --package package_name + + Specify the package for export + +- --procedure procedure_name(args) + + Specify the procedure for export + +### gs_restore supports the import of specified basic objects + +New command-line parameters have been added to specify the backup and import of specific objects, enabling the import functionality for packages, functions, procedures, triggers, and types. + +- It supports importing custom archive formats, directory archive formats, and tar archive formats; +- Only the specified objects are imported, regardless of the object's dependencies; +- It supports importing specified types of objects from a full backup; +- It supports importing specified objects from an archive file exported with gs_dump; +- You can specify one or more of these five basic objects; +- Multiple parameter names can be specified for the same basic object (e.g., --trigger name1 --trigger name2); +- It does not affect the settings of existing parameters. + +**Usage Instructions** + +```shell +-- Specify importing a trigger +gs_restore backup/MPPDB_backup.tar -p 8000 -d backupdb -e -T trigger_name +-- or +gs_restore backup/MPPDB_backup.tar -p 8000 -d backupdb -e --trigger trigger_name + +-- Specify importing a function +gs_restore backup/MPPDB_backup.tar -p 8000 -d backupdb -e -P function_name(args) +-- or +gs_restore backup/MPPDB_backup.tar -p 8000 -d backupdb -e --function function_name(args) + +-- Specify importing a type +gs_restore backup/MPPDB_backup.tar -p 8000 -d backupdb -e --type type_name + +-- Specify importing a package +gs_restore backup/MPPDB_backup.tar -p 8000 -d backupdb -e --package package_name + +-- Specify importing a procedure +gs_restore backup/MPPDB_backup.tar -p 8000 -d backupdb -e --procedure procedure_name(args) +``` + +- -T, --trigger trigger_name + + Specify the trigger for import + +- -P, --function function_name(args) + + Specify the function for import + +- --type type_name + + Specify the type for import + +- --package package_name + + Specify the package for import + +- --procedure procedure_name(args) + + Specify the procedure for import + +## Notes + +For functions and stored procedures with parameters, the parameter types must be specified. + +For example, define a function `func(a INTEGER, b INTEGER)`, the function name should be indicated as: "func(integer, integer)" + +To ensure compatibility with other SQL syntax, the database may convert certain parameter types to another type, for example, VARCHAR2 will be converted to character varying, `func(a INTEGER, table_name IN VARCHAR2)` will be converted to: "func(integer, character varying)". To ensure the correct input of parameter types, you can use the following SQL statement to query the function parameter types in the database: + + ```sql + SELECT p.proname AS function_name, + p.proargtypes AS parameter_types, + pg_catalog.pg_get_function_identity_arguments(p.oid) AS funcargs + FROM PG_PROC AS p + WHERE p.proname = 'func_gs_dump_0001'; + ``` + +Query results: + + ```sql + function name | parameter types | funcargs + -------------------+---------------------+--------------------------------- + func gs dump 0001 | 1043 | table name character varing + ``` + +Through the SQL statement, you can check that the parameter type of `func_gs_dump_0001` is character varying, so the correct object name is "func_gs_dump_0001(character varying)". + +## Example + +```shell +-- Export a trigger named update_time +gs_dump -f backup_dir/db.sql -F p --trigger update_time + +-- Import a trigger named update_time +gs_restore backup/MPPDB_backup.tar -p 8000 -d backupdb -e --trigger update_time +``` + +## Related Pages + +[gs_dump](../../reference-guide/tool-reference/server-tools/gs_dump.md), [gs_restore](../../reference-guide/tool-reference/server-tools/gs_restore.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/index-support-fuzzy-matching.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/index-support-fuzzy-matching.md new file mode 100644 index 00000000..99625e72 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/index-support-fuzzy-matching.md @@ -0,0 +1,51 @@ +--- +title: Automatic Creation of Indexes Supporting Fuzzy Matching +summary: Automatic Creation of Indexes Supporting Fuzzy Matching +author: 郭欢 刘宗昊 +date: 2024-04-16 +--- + +# Automatic Creation of Indexes Supporting Fuzzy Matching + +## Availability + +This feature is available since MogDB 5.0.4. + +## Introduction + +In A compatibility mode, the automatic creation of indexes that support fuzzy matching is supported by enabling a GUC parameter. + +## Description + +Usually, in MogDB, to create an index that supports fuzzy matching, you need to specify the fuzzy matching operator class (such as text_pattern_ops, varchar_pattern_ops, or bpchar_pattern_ops) when creating the index. Since MogDB version 5.0.4, the use of these operator classes in indexes can be controlled using the allow_like_indexable option in the behavior_compat_options parameter. + +## Constraints + +The allow_like_indexable option is only used to control the usage behavior of operator classes. The following should be noted in actual use: + +1. Set this parameter option before creating an index. +2. Ensure that fuzzy matching operations meet the usage conditions of the operator class. + +## Example + +```sql +MogDB=# create table t1(a int, b text); +CREATE TABLE +MogDB=# insert into t1 values (1, 'foo'), (2, 'bar'); +INSERT 0 2 +MogDB=# set behavior_compat_options='allow_like_indexable'; +SET +MogDB=# create index on t1(b); +CREATE INDEX +MogDB=# explain select /*+ indexscan(t1) */ * from t1 where b like 'fo%'; + QUERY PLAN +-------------------------------------------------------------------- + Index Scan using t1_b_idx on t1 (cost=0.00..8.27 rows=1 width=36) + Index Cond: ((b >= 'fo'::text) AND (b < 'fp'::text)) + Filter: (b ~~ 'fo%'::text) +(3 rows) +``` + +## Related Pages + +[behavior_compat_options](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/pruning-order-by-in-subqueries.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/pruning-order-by-in-subqueries.md new file mode 100644 index 00000000..941e59df --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/pruning-order-by-in-subqueries.md @@ -0,0 +1,211 @@ +--- +title: Pruning ORDER BY in Subqueries +summary: Pruning ORDER BY in Subqueries +author: 郭欢 刘宗昊 +date: 2024-03-11 +--- + +# Pruning ORDER BY in Subqueries + +## Availability + +This feature is available since MogDB 5.0.6. + +## Introduction + +This feature allows subqueries to prune unnecessary ORDER BY columns as needed when the parent query involves aggregation, grouping, or sorting operations, reducing unnecessary performance waste. + +## Benefits + +Eliminate useless ORDER BY statements within SQL subqueries to improve query performance, simplify query logic, and reduce unnecessary overhead, thereby enhancing the overall efficiency and performance of the database system. + +## Description + +When the parent query has no need for order, useless ORDER BY statements in the SQL subquery can be pruned to enhance query performance and execution efficiency, generating a better execution plan. + +For example: + +```sql +SELECT a, b FROM (SELECT * FROM t1 ORDER BY a, b, c) s1 GROUP BY a, b; +``` + +In the above SQL statement, subquery s1 is sorted by a, b, and c, but the upper query only performs grouping operations on a and b. Column c will not be output, and the sorting of column c in the subquery is not used in the upper query, so it can be pruned. + +Pruning the sort keys in the subquery must meet the following conditions: + +- The parent query is valid SQL and can generate an execution plan normally. +- If the parent query does not change the output order of the subquery, the subquery is not pruned. +- The subquery cannot be a non-inline CTE or expression subquery. +- The subquery must not have order-sensitive operations (e.g., for update, limit, etc.). + +Note that when the natural order of the parent query comes entirely from the data source subquery, the sorting in the subquery will not be pruned. + +The feature is enabled by the GUC parameter [sort_key_pruning_level](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#sort_key_pruning_level), which takes effect when set to balanced/aggressive, with the default being on. + +## Parameter Description + +A new GUC parameter `sort_key_pruning_level` is added to control the optimizer's pruning rules for useless sort keys in subqueries. + +This parameter is of type USERSET, with a range of values from off, balanced, to aggressive. The values correspond to different optimization levels. The default is balanced. + +- off: Turns off the pruning feature for useless sort keys in subqueries. + +- balanced: A conservative pruning strategy. The database optimizer only attempts to prune completely useless sort keys and will not prune subqueries in set operations. + +- aggressive: A more aggressive pruning strategy. The database optimizer will attempt to prune all potentially unused sort keys, including those in set operations. + +## Constraints + +- When the [rewrite_rule](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#rewrite_rule) parameter has `reduce_orderby` enabled, it takes precedence. +- For the beta feature of the old version, you need to turn off the `canonical_pathkey` option in [sql_beta_feature](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#sql_beta_feature). This feature only supports pruning after the Pathkey is regularized. +- For set operations, in the balance mode, only UNION ALL will not be pruned, as other set operations require sorting or hashing for de-duplication, while UNION ALL does not; in aggressive mode, all set operations will be pruned. + +## Example + +```sql +-- Create a test table +MogDB=# CREATE TABLE t1(a int, b int, c int, d int); +CREATE TABLE + +-- Check for the presence of sorting in the plan +MogDB=# EXPLAIN (COSTS OFF) SELECT COUNT(*) FROM (SELECT * FROM t1 ORDER BY 1, 2) AS s1; + QUERY PLAN +---------------------- + Aggregate + -> Seq Scan on t1 +(2 rows) + +-- Turn off the reduce_orderby option in rewrite_rule +MogDB=# set rewrite_rule = 'magicset'; +SET + +-- Turn off the canonical_pathkey option in sql_beta_feature +MogDB=# set sql_beta_feature = 'none'; +SET + +-- Conservative pruning of sort keys +MogDB=# SET sort_key_pruning_level TO balanced; +SET + +-- Partial pruning +MogDB=# EXPLAIN (COSTS OFF) SELECT a FROM (SELECT * FROM t1 ORDER BY a, b) s1 GROUP BY a; + QUERY PLAN +---------------------------- + Group + Group By Key: t1.a + -> Sort + Sort Key: t1.a + -> Seq Scan on t1 +(5 rows) + +-- Partial pruning, incremental sorting +MogDB=# EXPLAIN (COSTS OFF) SELECT * FROM (SELECT * FROM t1 ORDER BY a, b) ORDER BY a, c; + QUERY PLAN +---------------------------- + Incremental Sort + Sort Key: t1.a, t1.c + Presorted Key: t1.a + -> Sort + Sort Key: t1.a + -> Seq Scan on t1 +(6 rows) + +-- Complete pruning +MogDB=# EXPLAIN (COSTS OFF) SELECT * FROM (SELECT * FROM t1 ORDER BY a, b) ORDER BY b, c; + QUERY PLAN +------------------------ + Sort + Sort Key: t1.b, t1.c + -> Seq Scan on t1 +(3 rows) + +-- Retain merge keys +MogDB=# EXPLAIN (COSTS OFF) SELECT * FROM (SELECT * FROM t1 ORDER BY a, b) s1 LEFT JOIN t1 ON s1.a = t1.a; + QUERY PLAN +------------------------------------------ + Hash Right Join + Hash Cond: (public.t1.a = public.t1.a) + -> Seq Scan on t1 + -> Hash + -> Sort + Sort Key: public.t1.a + -> Seq Scan on t1 +(7 rows) + +-- Do not prune sorting in append +MogDB=# EXPLAIN (COSTS OFF) SELECT v.b FROM (SELECT * FROM (SELECT a, b FROM t1 ORDER BY b) UNION ALL (SELECT a, b FROM t1 ORDER BY a)) v GROUP BY 1; + QUERY PLAN +--------------------------------------------------- + HashAggregate + Group By Key: __unnamed_subquery__.b + -> Append + -> Subquery Scan on __unnamed_subquery__ + -> Sort + Sort Key: public.t1.b + -> Seq Scan on t1 + -> Subquery Scan on "*SELECT* 2" + -> Sort + Sort Key: public.t1.a + -> Seq Scan on t1 +(11 rows) + +-- Aggressive pruning of sort keys +MogDB=# SET sort_key_pruning_level TO aggressive; +SET + +-- Complete pruning +MogDB=# EXPLAIN (COSTS OFF) SELECT a FROM (SELECT * FROM t1 ORDER BY a, b) s1 GROUP BY a; + QUERY PLAN +---------------------------- + HashAggregate + Group By Key: s1.a + -> Subquery Scan on s1 + -> Seq Scan on t1 +(4 rows) + +-- Complete pruning +MogDB=# EXPLAIN (COSTS OFF) SELECT * FROM (SELECT * FROM t1 ORDER BY a, b) ORDER BY a, c; + QUERY PLAN +------------------------ + Sort + Sort Key: t1.a, t1.c + -> Seq Scan on t1 +(3 rows) + +-- Complete pruning +MogDB=# EXPLAIN (COSTS OFF) SELECT * FROM (SELECT * FROM t1 ORDER BY a, b) ORDER BY b, c; + QUERY PLAN +------------------------ + Sort + Sort Key: t1.b, t1.c + -> Seq Scan on t1 +(3 rows) + +-- Do not retain merge keys +MogDB=# EXPLAIN (COSTS OFF) SELECT * FROM (SELECT * FROM t1 ORDER BY a, b) s1 LEFT JOIN t1 ON s1.a = t1.a; + QUERY PLAN +------------------------------------------ + Hash Left Join + Hash Cond: (public.t1.a = public.t1.a) + -> Seq Scan on t1 + -> Hash + -> Seq Scan on t1 +(5 rows) + +-- Pruning sorting in append +MogDB=# EXPLAIN (COSTS OFF) SELECT v.b FROM (SELECT * FROM (SELECT a, b FROM t1 ORDER BY b) UNION ALL (SELECT a, b FROM t1 ORDER BY a)) v GROUP BY 1; + QUERY PLAN +--------------------------------------------------- + HashAggregate + Group By Key: __unnamed_subquery__.b + -> Append + -> Subquery Scan on __unnamed_subquery__ + -> Seq Scan on t1 + -> Subquery Scan on "*SELECT* 2" + -> Seq Scan on t1 +(7 rows) +``` + +## Related Pages + +[sort_key_pruning_level](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#sort_key_pruning_level), [sql_beta_feature](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#sql_beta_feature), [rewrite_rule](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#rewrite_rule) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/scroll-cursor.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/scroll-cursor.md new file mode 100644 index 00000000..1ad4e828 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/scroll-cursor.md @@ -0,0 +1,49 @@ +--- +title: Scrollable Cursor Support for Reverse Retrieval +summary: Scrollable Cursor Support for Reverse Retrieval +author: 郭欢 张沫 +date: 2024-01-29 +--- + +# Scrollable Cursor Support for Reverse Retrieval + +## Availability + +This feature is available since MogDB 5.0.6. + +## Introduction + +A cursor refers to a handle or pointer to a context area. With the help of a cursor, stored procedures can control the changes in the context area. This feature supports specifying SCROLL when declaring a cursor, making it possible to retrieve data rows in reverse order (i.e., backward retrieval). Depending on the complexity of the execution plan of the query statement, specifying SCROLL may result in performance loss in query execution time. The relevant syntax is as follows: + +```sql +DECLARE cursor_name [ BINARY ] [ [ NO ] SCROLL ] CURSOR [ { WITH | WITHOUT } HOLD ] FOR query; +``` + +## Constraints + +- This feature is applicable to both A and PG compatibility modes. +- It does not support `for update/share`. +- It does not support the `refcursor` of the cursor. + +## Example + +```sql +-- Create table and insert data +drop table if exists t_scroll_cursor_0016 cascade; + +create table t_scroll_cursor_0016(c1 int constraint i_scroll_cursor_0016 primary key,c2 text); + +insert into t_scroll_cursor_0016 values(generate_series(1,100000),'t_scroll_cursor_0016' || generate_series(1,100000)); + +-- Declare cursor and fetch data +BEGIN; +DECLARE c_scroll_cursor_0016 SCROLL CURSOR FOR SELECT * FROM t_scroll_cursor_0016 where c1 between 1000 and 1200 ORDER BY c1; +FETCH 2 FROM c_scroll_cursor_0016; +FETCH BACKWARD 1 FROM c_scroll_cursor_0016; +FETCH 100 FROM c_scroll_cursor_0016; +END; +``` + +## Related Pages + +[DECLARE](../../reference-guide/sql-syntax/DECLARE.md), [CURSOR](../../reference-guide/sql-syntax/CURSOR.md), [Cursors](../../reference-guide/sql-reference/sql-reference-cursor.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/support-for-pruning-subquery-projection-columns.md b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/support-for-pruning-subquery-projection-columns.md new file mode 100644 index 00000000..c507c827 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/enterprise-level-features/support-for-pruning-subquery-projection-columns.md @@ -0,0 +1,88 @@ +--- +title: Support for Pruning Subquery Projection Columns +summary: Support for Pruning Subquery Projection Columns +author: 郭欢 孙久武 +date: 2024-01-29 +--- + +# Support for Pruning Subquery Projection Columns + +## Availability + +This feature is available since MogDB 5.0.6. + +## Introduction + +Supports column pruning to eliminate redundant projection columns in subqueries and improve SQL performance. + +## Benefits + +Identifies and removes unnecessary columns in the subquery results to improve query efficiency, reduce the overhead of query execution, and enhance performance. + +## Description + +Column Pruning is a database optimization technique that refers to the process where the database query execution engine identifies and removes unnecessary columns during query execution. These unnecessary columns are typically redundant in the query result set and are identified and eliminated in the query plan to reduce the overhead of query execution and improve performance. + +Eliminating redundant projection columns in subqueries means that the database system will identify and remove columns that are not needed in the subquery results to improve query efficiency. This helps to reduce the computational resources and memory consumption required for the query, thereby accelerating the execution of the query. + +When queries involve complex query plans and large amounts of data, the database system can execute queries more efficiently by identifying and eliminating unnecessary columns, reducing resource consumption, and improving the system's response speed. + +## Parameter Description + +The GUC parameter [rewrite_rule](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#rewrite_rule) has a new option `column_pruner` to control whether to enable the function of eliminating redundant projection columns in subqueries, which is turned off by default. Set `rewrite_rule` to `column_pruner` to enable this feature. + +```sql +SET rewrite_rule='column_pruner'; +``` + +## Constraints + +1. SQL statements that do not meet the rewrite rules will not throw errors and will execute normally without being processed by the rewrite rules. +2. Columns referenced by external queries will still be retained. +3. Columns involved in order/group/distinct/filter/SW clauses in the query will not be eliminated. +4. Only supports rewriting optimization for CTEs that are INLINE subqueries. +5. Supports rewriting rules for views and subqueries. +6. Columns in the query that include returning-set or volatile functions will not be eliminated. +7. This feature supports A/PG compatibility modes and does not support B mode temporarily. + +## Example + +```sql +CREATE TABLE t1(a INT, b INT); +CREATE TABLE t2(a INT PRIMARY KEY, b INT); +SET rewrite_rule='column_pruner'; + +explain verbose SELECT a +FROM ( + SELECT t1.a + ,count(t2.a) + ,DENSE_RANK() OVER ( + ORDER BY t1.a + ) AS RANK + FROM t1 + LEFT JOIN t2 ON t1.a = t2.a + GROUP BY t1.a + ); + + QUERY PLAN +------------------------------------------------------------------- + HashAggregate (cost=36.86..38.86 rows=200 width=4) + Output: t1.a + Group By Key: t1.a + -> Seq Scan on public.t1 (cost=0.00..31.49 rows=2149 width=4) + Output: t1.a +(5 rows) +``` + +## Performance Test Results + +1. Column pruning performance shows a significant improvement in scenarios with join elimination or multiple window functions, with the highest improvement of up to 90% in the multi-agg&windowfunc scenario on columnar tables, and up to 69% on row-oriented tables. +2. The performance improvement effect in the Sort scenario is not obvious and may not improve within the margin of error. +3. The performance improvement effect of the column pruning feature on columnar tables is generally better than that on row-oriented tables. +4. Parallel query execution can further optimize the performance improvement effect, but the performance improvement in the multi-agg&windowfunc scenario of columnar tables remains 90% before and after enabling. +5. In the test of the row-oriented table join elimination scenario, it is concluded that the work_mem parameter has no significant impact on the query time of the column pruning feature. +6. The customer scenario is a typical scenario for the column pruning feature, with a significant performance improvement effect, reaching 94%, and further improving to 98% when query_dop=8. + +## Related Pages + +[rewrite_rule](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#rewrite_rule) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/1-primary-standby.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/1-primary-standby.md index fc9e665b..4ed84ead 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/1-primary-standby.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/1-primary-standby.md @@ -1,46 +1,46 @@ ---- -title: Primary/Standby -summary: Primary/Standby -author: Guo Huan -date: 2022-05-07 ---- - -# Primary/Standby - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -To ensure that a fault can be rectified, data needs to be written into multiple copies. Multiple copies are configured for the primary and standby nodes, and logs are used for data synchronization. In this way, MogDB has no data lost when a node is faulty or the system restarts after a stop, meeting the ACID feature requirements. - -## Benefits - -Services can be switched to the standby node when the primary node is faulty. Therefore, data is not lost and services can be quickly restored. - -## Description - -You can deploy the one-primary-multiple-standby mode. In the one-primary-multiple-standby mode, all standby nodes need to redo logs and can be promoted to the primary. The one-primary-multiple-standby mode provides higher DR capabilities and is more suitable for the OLTP system that processes a large number of transactions. - -The **switchover** command can be used to trigger a switchover between the primary and standby nodes. If the primary node is faulty, the **failover** command can be used to promote the standby node to the primary. - -In scenarios such as initial installation or backup and restoration, data on the standby node needs to be rebuilt based on the primary node. In this case, the build function is required to send the data and WALs of the primary node to the standby node. When the primary node is faulty and joins again as a standby node, the build function needs to be used to synchronize data and WALs with those of the new primary node. In addition, in online capacity expansion scenarios, you need to use build to synchronize metadata to instances on new nodes. Build includes full build and incremental build. Full build depends on primary node data for rebuild. The amount of data to be copied is large and the time required is long. Incremental build copies only differential files. The amount of data to be copied is small and the time required is short. Generally, the incremental build is preferred for fault recovery. If the incremental build fails, the full build continues until the fault is rectified. - -To implement HA DR for all instances, in addition to the preceding primary/standby multi-copy replication configured for DNs, MogDB also provides other primary/standby DR capabilities, such as CM server (one primary and multiple standbys) and ETCD (one primary and multiple standbys). In this way, instances can be recovered as soon as possible without interrupting services, minimizing the impact of hardware, software, and human errors on services and ensuring service continuity. - -## Enhancements - -None. - -## Constraints - -None. - -## Dependencies - -None. - -## Related Pages - +--- +title: Primary/Standby +summary: Primary/Standby +author: Guo Huan +date: 2022-05-07 +--- + +# Primary/Standby + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +To ensure that a fault can be rectified, data needs to be written into multiple copies. Multiple copies are configured for the primary and standby nodes, and logs are used for data synchronization. In this way, MogDB has no data lost when a node is faulty or the system restarts after a stop, meeting the ACID feature requirements. + +## Benefits + +Services can be switched to the standby node when the primary node is faulty. Therefore, data is not lost and services can be quickly restored. + +## Description + +You can deploy the one-primary-multiple-standby mode. In the one-primary-multiple-standby mode, all standby nodes need to redo logs and can be promoted to the primary. The one-primary-multiple-standby mode provides higher DR capabilities and is more suitable for the OLTP system that processes a large number of transactions. + +The **switchover** command can be used to trigger a switchover between the primary and standby nodes. If the primary node is faulty, the **failover** command can be used to promote the standby node to the primary. + +In scenarios such as initial installation or backup and restoration, data on the standby node needs to be rebuilt based on the primary node. In this case, the build function is required to send the data and WALs of the primary node to the standby node. When the primary node is faulty and joins again as a standby node, the build function needs to be used to synchronize data and WALs with those of the new primary node. In addition, in online capacity expansion scenarios, you need to use build to synchronize metadata to instances on new nodes. Build includes full build and incremental build. Full build depends on primary node data for rebuild. The amount of data to be copied is large and the time required is long. Incremental build copies only differential files. The amount of data to be copied is small and the time required is short. Generally, the incremental build is preferred for fault recovery. If the incremental build fails, the full build continues until the fault is rectified. + +To implement HA DR for all instances, in addition to the preceding primary/standby multi-copy replication configured for DNs, MogDB also provides other primary/standby DR capabilities, such as CM server (one primary and multiple standbys) and ETCD (one primary and multiple standbys). In this way, instances can be recovered as soon as possible without interrupting services, minimizing the impact of hardware, software, and human errors on services and ensuring service continuity. + +## Enhancements + +None. + +## Constraints + +None. + +## Dependencies + +None. + +## Related Pages + [Primary and Standby Management](../../administrator-guide/primary-and-standby-management.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/10-adding-or-deleting-a-standby-server.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/10-adding-or-deleting-a-standby-server.md index af7212b4..aecc1c95 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/10-adding-or-deleting-a-standby-server.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/10-adding-or-deleting-a-standby-server.md @@ -1,62 +1,62 @@ ---- -title: Adding or Deleting a Standby Node -summary: Adding or Deleting a Standby Node -author: Guo Huan -date: 2022-05-07 ---- - -# Adding or Deleting a Standby Node - -## Availability - -This feature is available since MogDB 2.0.0. - -## Introduction - -Standby nodes can be added and deleted. - -## Benefits - -If the read pressure of the primary node is high or you want to improve the disaster recovery capability of the database, you need to add a standby node. If some standby nodes in a cluster are faulty and cannot be recovered within a short period of time, you can delete the faulty nodes to ensure that the cluster is running properly. - -## Description - -MogDB can be scaled out from a single node or one primary and multiple standbys to one primary and eight standbys. Cascaded standby nodes can be added. Standby nodes can be added when a faulty standby node exists in the cluster. One primary and multiple standbys can be scaled in to a single node. A faulty standby node can be deleted. - -Standby nodes can be added or deleted online without affecting the primary node. - -## Enhancements - -None. - -## Constraints - -For adding a standby node: - -- Ensure that the MogDB image package exists on the primary node. -- Ensure that the same users and user groups as those on the primary node have been created on the new standby node. -- Ensure that the mutual trust of user **root** and the database management user has been established between the existing database nodes and the new nodes. -- Ensure that the XML file has been properly configured and information about the standby node to be scaled has been added to the installed database configuration file. -- Ensure that only user **root** is authorized to run the scale-out command. -- Do not run the **gs_dropnode** command on the primary node to delete other standby nodes at the same time. -- Ensure that the environment variables of the primary node have been imported before the scale-out command is run. -- Ensure that the operating system of the new standby node is the same as that of the primary node. -- Do not perform an primary/standby switchover or failover on other standby nodes at the same time. - -For deleting a standby node: - -- Delete the standby node only on the primary node. -- Do not perform an primary/standby switchover or failover on other standby nodes at the same time. -- Do not run the **gs_expansion** command on the primary node for scale-out at the same time. -- Do not run the **gs_dropnode** command twice at the same time. -- Before deletion, ensure that the database management user trust relationship has been established between the primary and standby nodes. -- Run this command as a database administrator. -- Before running commands, run the **source** command to import environment variables of the primary node. - -## Dependencies - -None. - -## Related Pages - +--- +title: Adding or Deleting a Standby Node +summary: Adding or Deleting a Standby Node +author: Guo Huan +date: 2022-05-07 +--- + +# Adding or Deleting a Standby Node + +## Availability + +This feature is available since MogDB 2.0.0. + +## Introduction + +Standby nodes can be added and deleted. + +## Benefits + +If the read pressure of the primary node is high or you want to improve the disaster recovery capability of the database, you need to add a standby node. If some standby nodes in a cluster are faulty and cannot be recovered within a short period of time, you can delete the faulty nodes to ensure that the cluster is running properly. + +## Description + +MogDB can be scaled out from a single node or one primary and multiple standbys to one primary and eight standbys. Cascaded standby nodes can be added. Standby nodes can be added when a faulty standby node exists in the cluster. One primary and multiple standbys can be scaled in to a single node. A faulty standby node can be deleted. + +Standby nodes can be added or deleted online without affecting the primary node. + +## Enhancements + +None. + +## Constraints + +For adding a standby node: + +- Ensure that the MogDB image package exists on the primary node. +- Ensure that the same users and user groups as those on the primary node have been created on the new standby node. +- Ensure that the mutual trust of user **root** and the database management user has been established between the existing database nodes and the new nodes. +- Ensure that the XML file has been properly configured and information about the standby node to be scaled has been added to the installed database configuration file. +- Ensure that only user **root** is authorized to run the scale-out command. +- Do not run the **gs_dropnode** command on the primary node to delete other standby nodes at the same time. +- Ensure that the environment variables of the primary node have been imported before the scale-out command is run. +- Ensure that the operating system of the new standby node is the same as that of the primary node. +- Do not perform an primary/standby switchover or failover on other standby nodes at the same time. + +For deleting a standby node: + +- Delete the standby node only on the primary node. +- Do not perform an primary/standby switchover or failover on other standby nodes at the same time. +- Do not run the **gs_expansion** command on the primary node for scale-out at the same time. +- Do not run the **gs_dropnode** command twice at the same time. +- Before deletion, ensure that the database management user trust relationship has been established between the primary and standby nodes. +- Run this command as a database administrator. +- Before running commands, run the **source** command to import environment variables of the primary node. + +## Dependencies + +None. + +## Related Pages + [gs_dropnode](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_dropnode.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/11-delaying-entering-the-maximum-availability-mode.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/11-delaying-entering-the-maximum-availability-mode.md index 0a3bebe4..7f3ad021 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/11-delaying-entering-the-maximum-availability-mode.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/11-delaying-entering-the-maximum-availability-mode.md @@ -1,46 +1,46 @@ ---- -title: Delaying Entering the Maximum Availability Mode -summary: Delaying Entering the Maximum Availability Mode -author: Guo Huan -date: 2022-05-10 ---- - -# Delaying Entering the Maximum Availability Mode - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -The primary node can be delayed to enter the maximum availability mode. - -## Benefits - -When the primary node detects that the standby node exits due to network instability or other reasons and the maximum availability mode is enabled on the primary node, the primary node remains in the maximum protection mode within a specified time window. After the time window expires, the primary node enters the maximum availability mode. - -This prevents the primary node from frequently switching between the maximum protection mode and maximum availability mode due to factors such as network jitter and intermittent process disconnection. - -## Description - -If **most_available_sync** is set to **on**, when synchronous standby nodes are faulty in primary/standby scenarios and the number of configured synchronous standby nodes is insufficient (for details, see the meaning of **synchonous_standby_name**), setting **keep_sync_window** will retain the maximum protection mode within the time window specified by **keep_sync_window**. That is, committing transactions on the primary node is blocked, delaying the primary node to enter the maximum availability mode. - -If synchronous standby nodes recover from faults and the number of synchronous standby nodes meets the configuration requirements, transactions are not blocked. - -## Enhancements - -None. - -## Constraints - -- This feature takes effect only when the maximum availability mode is enabled. -- Enabling this feature may affect the RPO. If the primary node is faulty within the configured timeout window, its transactions are committed locally but not synchronized to the faulty synchronous standby nodes. -- This feature does not apply to cascaded standby nodes. - -## Dependencies - -This feature depends on the maximum availability mode. - -## Related Pages - +--- +title: Delaying Entering the Maximum Availability Mode +summary: Delaying Entering the Maximum Availability Mode +author: Guo Huan +date: 2022-05-10 +--- + +# Delaying Entering the Maximum Availability Mode + +## Availability + +This feature is available since MogDB 3.0.0. + +## Introduction + +The primary node can be delayed to enter the maximum availability mode. + +## Benefits + +When the primary node detects that the standby node exits due to network instability or other reasons and the maximum availability mode is enabled on the primary node, the primary node remains in the maximum protection mode within a specified time window. After the time window expires, the primary node enters the maximum availability mode. + +This prevents the primary node from frequently switching between the maximum protection mode and maximum availability mode due to factors such as network jitter and intermittent process disconnection. + +## Description + +If **most_available_sync** is set to **on**, when synchronous standby nodes are faulty in primary/standby scenarios and the number of configured synchronous standby nodes is insufficient (for details, see the meaning of **synchonous_standby_name**), setting **keep_sync_window** will retain the maximum protection mode within the time window specified by **keep_sync_window**. That is, committing transactions on the primary node is blocked, delaying the primary node to enter the maximum availability mode. + +If synchronous standby nodes recover from faults and the number of synchronous standby nodes meets the configuration requirements, transactions are not blocked. + +## Enhancements + +None. + +## Constraints + +- This feature takes effect only when the maximum availability mode is enabled. +- Enabling this feature may affect the RPO. If the primary node is faulty within the configured timeout window, its transactions are committed locally but not synchronized to the faulty synchronous standby nodes. +- This feature does not apply to cascaded standby nodes. + +## Dependencies + +This feature depends on the maximum availability mode. + +## Related Pages + [Primary Server](../../reference-guide/guc-parameters/ha-replication/primary-server.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/16-using-a-standby-node-to-build-a-standby-node.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/16-using-a-standby-node-to-build-a-standby-node.md index 468a0c7c..b53c2c59 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/16-using-a-standby-node-to-build-a-standby-node.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/16-using-a-standby-node-to-build-a-standby-node.md @@ -1,40 +1,40 @@ ---- -title: Using a Standby Node to Build a Standby Node -summary: Using a Standby Node to Build a Standby Node -author: Guo Huan -date: 2022-05-10 ---- - -# Using a Standby Node to Build a Standby Node - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -A standby node can be built by another standby node to accelerate standby node recovery from faults. The I/O and bandwidth pressure of the primary node can be reduced. - -## Benefits - -When the service load is heavy, building a standby node by using the primary node increases the resource burden on the primary node. As a result, the performance of the primary node deteriorates and the build becomes slow. Building a standby node by using a standby node does not affect services on the primary node. - -## Description - -You can run the **gs_ctl** command to specify a standby node to build another standby node to be repaired. For details, see “Tools Used in the Internal System > [gs_ctl](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_ctl.md)” in *Tool Reference*. - -## Enhancements - -None. - -## Constraints - -A standby node can only be used to build another standby node. You can only use a specified IP address and port number to build the standby node. Before building the standby node, ensure that the logs of the standby node to be repaired are later than those of the standby node that sends data. - -## Dependencies - -None. - -## Related Pages - +--- +title: Using a Standby Node to Build a Standby Node +summary: Using a Standby Node to Build a Standby Node +author: Guo Huan +date: 2022-05-10 +--- + +# Using a Standby Node to Build a Standby Node + +## Availability + +This feature is available since MogDB 3.0.0. + +## Introduction + +A standby node can be built by another standby node to accelerate standby node recovery from faults. The I/O and bandwidth pressure of the primary node can be reduced. + +## Benefits + +When the service load is heavy, building a standby node by using the primary node increases the resource burden on the primary node. As a result, the performance of the primary node deteriorates and the build becomes slow. Building a standby node by using a standby node does not affect services on the primary node. + +## Description + +You can run the **gs_ctl** command to specify a standby node to build another standby node to be repaired. For details, see “Tools Used in the Internal System > [gs_ctl](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_ctl.md)” in *Tool Reference*. + +## Enhancements + +None. + +## Constraints + +A standby node can only be used to build another standby node. You can only use a specified IP address and port number to build the standby node. Before building the standby node, ensure that the logs of the standby node to be repaired are later than those of the standby node that sends data. + +## Dependencies + +None. + +## Related Pages + [gs_ctl](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_ctl.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/17-two-city-three-dc-dr.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/17-two-city-three-dc-dr.md index cade8713..eeb0cba0 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/17-two-city-three-dc-dr.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/17-two-city-three-dc-dr.md @@ -1,47 +1,47 @@ ---- -title: Two-City Three-DC DR -summary: Two-City Three-DC DR -author: zhang cuiping -date: 2022-10-13 ---- - -# Two-City Three-DC DR - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -MogDB 3.1.0 supports two-city three-DC DR. - -## Benefits - -The services require the underlying database to provide cross-region DR capabilities to ensure data security and availability in case of extreme disasters. - -## Description - -Finance and banking industries require high data security. In the case of extreme disasters such as fires, earthquakes, and wars, data security must be ensured. Therefore, a cross-region DR solution is required. Cross-region DR refers to the scenario where the distance between the primary and standby data centers is greater than 200 km. When the primary data center encounters an extreme disaster, the standby data center can take over services. This feature provides a cross-region MogDB DR solution. - -This feature provides the following solutions: - -Streaming replication-based remote DR solution (This solution is provided in MogDB 3.1.0 and later versions.) - -## Enhancements - -In MogDB 3.1.0, the remote DR solution based on streaming replication is added for the two-city 3DC cross-region DR feature. - -- DR database failover -- Planned switchover between the primary and standby database instances - -**Streaming replication-based remote DR solution**: - -For details about the constraints, see section [Two-City Three-DC DR](../../administrator-guide/database-deployment-scenario/two-city-three-dc-dr.md) in *Administrator Guide*. - -## Dependencies - -None - -## Related Pages - +--- +title: Two-City Three-DC DR +summary: Two-City Three-DC DR +author: zhang cuiping +date: 2022-10-13 +--- + +# Two-City Three-DC DR + +## Availability + +This feature is available since MogDB 3.1.0. + +## Introduction + +MogDB 3.1.0 supports two-city three-DC DR. + +## Benefits + +The services require the underlying database to provide cross-region DR capabilities to ensure data security and availability in case of extreme disasters. + +## Description + +Finance and banking industries require high data security. In the case of extreme disasters such as fires, earthquakes, and wars, data security must be ensured. Therefore, a cross-region DR solution is required. Cross-region DR refers to the scenario where the distance between the primary and standby data centers is greater than 200 km. When the primary data center encounters an extreme disaster, the standby data center can take over services. This feature provides a cross-region MogDB DR solution. + +This feature provides the following solutions: + +Streaming replication-based remote DR solution (This solution is provided in MogDB 3.1.0 and later versions.) + +## Enhancements + +In MogDB 3.1.0, the remote DR solution based on streaming replication is added for the two-city 3DC cross-region DR feature. + +- DR database failover +- Planned switchover between the primary and standby database instances + +**Streaming replication-based remote DR solution**: + +For details about the constraints, see section [Two-City Three-DC DR](../../administrator-guide/database-deployment-scenario/two-city-three-dc-dr.md) in *Administrator Guide*. + +## Dependencies + +None + +## Related Pages + [Two-City Three-DC DR](../../administrator-guide/database-deployment-scenario/two-city-three-dc-dr.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/2-logical-replication.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/2-logical-replication.md index 6c7f931c..d7724b5f 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/2-logical-replication.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/2-logical-replication.md @@ -1,41 +1,41 @@ ---- -title: Logical Replication -summary: Logical Replication -author: Guo Huan -date: 2022-05-07 ---- - -# Logical Replication - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -MogDB provides the logical decoding function to reversely parse physical logs to logical logs. Logical replication tools such as DRS convert logical logs to SQL statements and replay the SQL statements in the peer database. In this way, data can be synchronized between heterogeneous databases. Currently, unidirectional and bidirectional logical replication between the MogDB database and the MySQL or Oracle database is supported. - -## Benefits - -Logical replication is applicable to real-time database data migration, dual-database active-active system, and rolling upgrades. - -## Description - -DNs reversely parse physical logs to logical logs. Logical replication tools such as DRS extract logical logs from DNs, convert the logs to SQL statements, and replay the SQL statements in MySQL. Logical replication tools also extract logical logs from a MySQL database, reversely parse the logs to SQL statements, and replay the SQL statements in MogDB. In this way, data can be synchronized between heterogeneous databases. - -## Enhancements - -- MogDB 1.1.0 logic decoding supports the extraction of logs from full and incremental logs. -- MogDB 1.1.0 supports logical decoding on a standby node. - -## Constraints - -Column-store replication and DDL replication are not supported. - -## Dependencies - -It depends on logical replication tools that decode logical logs. - -## Related Pages - +--- +title: Logical Replication +summary: Logical Replication +author: Guo Huan +date: 2022-05-07 +--- + +# Logical Replication + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +MogDB provides the logical decoding function to reversely parse physical logs to logical logs. Logical replication tools such as DRS convert logical logs to SQL statements and replay the SQL statements in the peer database. In this way, data can be synchronized between heterogeneous databases. Currently, unidirectional and bidirectional logical replication between the MogDB database and the MySQL or Oracle database is supported. + +## Benefits + +Logical replication is applicable to real-time database data migration, dual-database active-active system, and rolling upgrades. + +## Description + +DNs reversely parse physical logs to logical logs. Logical replication tools such as DRS extract logical logs from DNs, convert the logs to SQL statements, and replay the SQL statements in MySQL. Logical replication tools also extract logical logs from a MySQL database, reversely parse the logs to SQL statements, and replay the SQL statements in MogDB. In this way, data can be synchronized between heterogeneous databases. + +## Enhancements + +- MogDB 1.1.0 logic decoding supports the extraction of logs from full and incremental logs. +- MogDB 1.1.0 supports logical decoding on a standby node. + +## Constraints + +Column-store replication and DDL replication are not supported. + +## Dependencies + +It depends on logical replication tools that decode logical logs. + +## Related Pages + [Logical Replication](../../developer-guide/logical-replication/logical-replication.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/4-logical-backup.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/4-logical-backup.md index 8d5fa291..e4bc0779 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/4-logical-backup.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/4-logical-backup.md @@ -1,44 +1,44 @@ ---- -title: Logical Backup -summary: Logical Backup -author: Guo Huan -date: 2022-05-07 ---- - -# Logical Backup - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Data in user tables in the database is backed up to a specified storage medium in a general format. - -## Benefits - -Through logical backup, you can achieve the following purposes: - -- Back up user data to a reliable storage medium to secure data. -- Support cross-version recovery and heterogeneous recovery using a general data format. -- Archive cold data. - -## Description - -MogDB provides the logical backup capability to back up data in user tables to local disk files in text or CSV format and restore the data in homogeneous or heterogeneous databases. - -## Enhancements - -None - -## Constraints - -For details about the restrictions on logical backup, see [gs_dump](../../reference-guide/tool-reference/server-tools/gs_dump.md) in the *Tool Reference*. - -## Dependencies - -None - -## Related Pages - +--- +title: Logical Backup +summary: Logical Backup +author: Guo Huan +date: 2022-05-07 +--- + +# Logical Backup + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +Data in user tables in the database is backed up to a specified storage medium in a general format. + +## Benefits + +Through logical backup, you can achieve the following purposes: + +- Back up user data to a reliable storage medium to secure data. +- Support cross-version recovery and heterogeneous recovery using a general data format. +- Archive cold data. + +## Description + +MogDB provides the logical backup capability to back up data in user tables to local disk files in text or CSV format and restore the data in homogeneous or heterogeneous databases. + +## Enhancements + +None + +## Constraints + +For details about the restrictions on logical backup, see [gs_dump](../../reference-guide/tool-reference/server-tools/gs_dump.md) in the *Tool Reference*. + +## Dependencies + +None + +## Related Pages + [Logical Backup And Restoration](../../administrator-guide/backup-and-restoration/logical-backup-and-restoration.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/5-physical-backup.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/5-physical-backup.md index ab0e361c..04c86846 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/5-physical-backup.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/5-physical-backup.md @@ -1,56 +1,56 @@ ---- -title: Physical Backup -summary: Physical Backup -author: Guo Huan -date: 2022-05-07 ---- - -# Physical Backup - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Data in the entire database is backed up to a specified storage medium in an internal format. - -## Benefits - -Through physical backup, you can achieve the following purposes: - -- Back up data of the entire database to a reliable storage medium, improving system reliability. -- Improve backup and restoration performance using an internal data format. -- Archive cold data. - -The typical physical backup policy and application scenario are as follows: - -- On Monday, perform a full backup of the database. -- On Tuesday, perform an incremental backup based on the full backup on Monday. -- On Wednesday, perform an incremental backup based on the incremental backup on Tuesday. -- … -- On Sunday, perform an incremental backup based on the incremental backup on Saturday. - -The preceding backup operations are executed every week. - -## Description - -MogDB provides the physical backup capability to back up data of the entire database to local disk files, OBS objects, NBU objects, or EISOO objects in the internal database format, and restore data of the entire database in a homogeneous database. In addition to the preceding functions, it also provides advanced functions such as compression, flow control, and resumable backup. - -Physical backup is classified into full backup and incremental backup. The difference is as follows: Full backup includes the full data of the database at the backup time point. The time required for full backup is long (in direct proportion to the total data volume of the database), and a complete database can be restored. An incremental backup involves only incremental data modified after a specified time point. It takes a short period of time (in direct proportion to the incremental data volume and irrelevant to the total data volume). However, a complete database can be restored only after the incremental backup and full backup are performed. - -## Enhancements - -Supports full backup and incremental backup simultaneously. - -## Constraints - -For details about the constraints of physical backup, see Backup and Restoration > [Physical Backup and Restoration](../../administrator-guide/backup-and-restoration/physical-backup-and-restoration.md) in *Administrator Guide*. - -## Dependencies - -None. - -## Related Pages - +--- +title: Physical Backup +summary: Physical Backup +author: Guo Huan +date: 2022-05-07 +--- + +# Physical Backup + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +Data in the entire database is backed up to a specified storage medium in an internal format. + +## Benefits + +Through physical backup, you can achieve the following purposes: + +- Back up data of the entire database to a reliable storage medium, improving system reliability. +- Improve backup and restoration performance using an internal data format. +- Archive cold data. + +The typical physical backup policy and application scenario are as follows: + +- On Monday, perform a full backup of the database. +- On Tuesday, perform an incremental backup based on the full backup on Monday. +- On Wednesday, perform an incremental backup based on the incremental backup on Tuesday. +- … +- On Sunday, perform an incremental backup based on the incremental backup on Saturday. + +The preceding backup operations are executed every week. + +## Description + +MogDB provides the physical backup capability to back up data of the entire database to local disk files, OBS objects, NBU objects, or EISOO objects in the internal database format, and restore data of the entire database in a homogeneous database. In addition to the preceding functions, it also provides advanced functions such as compression, flow control, and resumable backup. + +Physical backup is classified into full backup and incremental backup. The difference is as follows: Full backup includes the full data of the database at the backup time point. The time required for full backup is long (in direct proportion to the total data volume of the database), and a complete database can be restored. An incremental backup involves only incremental data modified after a specified time point. It takes a short period of time (in direct proportion to the incremental data volume and irrelevant to the total data volume). However, a complete database can be restored only after the incremental backup and full backup are performed. + +## Enhancements + +Supports full backup and incremental backup simultaneously. + +## Constraints + +For details about the constraints of physical backup, see Backup and Restoration > [Physical Backup and Restoration](../../administrator-guide/backup-and-restoration/physical-backup-and-restoration.md) in *Administrator Guide*. + +## Dependencies + +None. + +## Related Pages + [Physical Backup and Restoration](../../administrator-guide/backup-and-restoration/physical-backup-and-restoration.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/6-automatic-job-retry-upon-failure.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/6-automatic-job-retry-upon-failure.md index 7688bec4..f703ecbe 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/6-automatic-job-retry-upon-failure.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/6-automatic-job-retry-upon-failure.md @@ -1,91 +1,91 @@ ---- -title: Automatic Job Retry upon Failure -summary: Automatic Job Retry upon Failure -author: Guo Huan -date: 2022-05-07 ---- - -# Automatic Job Retry upon Failure - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -If an error occurs in batch processing jobs due to network exceptions or deadlocks, failed jobs are automatically retried. - -## Benefits - -In common fault scenarios, such as network exception and deadlock, queries retry automatically in case of failure to improve database usability. - -## Description - -MogDB provides the job retry mechanism: gsql Retry. - -- The gsql retry mechanism uses a unique error code (SQL STATE) to identify an error that requires a retry. The function of the client tool gsql is enhanced. The error code configuration file **retry_errcodes.conf** is used to configure the list of errors that require a retry. The file is stored in the installation directory at the same level as gsql. **gsql** provides the **\set RETRY** [*number*] command to enable or disable the retry function. The number of retry times ranges from 5 to 10, and the default value is **5**. When this function is enabled, **gsql** reads the preceding configuration file. The error retry controller records the error code list through the container. If an error occurs in the configuration file after the function is enabled, the controller sends the cached query statement to the server for retry until the query is successful or an error is reported when the number of retry times exceeds the maximum. - -## Enhancements - -None - -## Constraints - -- Functionality constraints: - - - Retrying increases execution success rate but does not guarantee success. - -- Error type constraints: - - Only the error types in Table 1 are supported. - - **Table 1** Supported error types - - | Error Type | Error Code | Remarks | - | :-------------------------------- | :--------- | :----------------------------------------------------------- | - | CONNECTION_RESET_BY_PEER | YY001 | TCP communication error. Print information: “Connection reset by peer” | - | STREAM_CONNECTION_RESET_BY_PEER | YY002 | TCP communication error. Print information: “Stream connection reset by peer” (communication between DNs) | - | LOCK_WAIT_TIMEOUT | YY003 | Lock wait timeout. Print information: “Lock wait timeout” | - | CONNECTION_TIMED_OUT | YY004 | TCP communication error. Print information: “Connection timed out” | - | SET_QUERY_ERROR | YY005 | Failed to deliver the **SET** command. Print information: “Set query error” | - | OUT_OF_LOGICAL_MEMORY | YY006 | Failed to apply for memory. Print information: “Out of logical memory” | - | SCTP_MEMORY_ALLOC | YY007 | SCTP communication error. Print information: “Memory allocate error” | - | SCTP_NO_DATA_IN_BUFFER | YY008 | SCTP communication error. Print information: “SCTP no data in buffer” | - | SCTP_RELEASE_MEMORY_CLOSE | YY009 | SCTP communication error. Print information: “Release memory close” | - | SCTP_TCP_DISCONNECT | YY010 | SCTP and TCP communication error. Print information: “SCTP, TCP disconnect” | - | SCTP_DISCONNECT | YY011 | SCTP communication error. Print information: “SCTP disconnect” | - | SCTP_REMOTE_CLOSE | YY012 | SCTP communication error. Print information: “Stream closed by remote” | - | SCTP_WAIT_POLL_UNKNOW | YY013 | Waiting for an unknown poll. Print information: “SCTP wait poll unknow” | - | SNAPSHOT_INVALID | YY014 | Invalid snapshot. Print information: “Snapshot invalid” | - | ERRCODE_CONNECTION_RECEIVE_WRONG | YY015 | Failed to receive a connection. Print information: “Connection receive wrong” | - | OUT_OF_MEMORY | 53200 | Out of memory. Print information: “Out of memory” | - | CONNECTION_EXCEPTION | 08000 | Failed to communicate with DNs due to connection errors. Print information: “Connection exception” | - | ADMIN_SHUTDOWN | 57P01 | System shutdown by the administrator. Print information: “Admin shutdown” | - | STREAM_REMOTE_CLOSE_SOCKET | XX003 | Remote socket disabled. Print information: “Stream remote close socket” | - | ERRCODE_STREAM_DUPLICATE_QUERY_ID | XX009 | Duplicate query. Print information: “Duplicate query id” | - | ERRCODE_STREAM_CONCURRENT_UPDATE | YY016 | Concurrent stream query and update. Print information: “Stream concurrent update” | - -- Statement type constraints: - - Support single-statement stored procedures, functions, and anonymous blocks. Statements in transaction blocks are not supported. - -- Statement constraints of a stored procedure: - - - If an error occurs during the execution of a stored procedure containing EXCEPTION (including statement block execution and statement execution in EXCEPTION), the stored procedure can be retried. If the error is captured by EXCEPTION, the stored procedure cannot be retried. - - Advanced packages that use global variables are not supported. - - DBE_TASK is not supported. - - PKG_UTIL file operation is not supported. - -- Data import constraints: - - - The **COPY FROM STDIN** statement is not supported. - - The **gsql \copy from** metacommand is not supported. - - Data cannot be imported using **JDBC CopyManager copyIn**. - -## Dependencies - -Valid only if the **gsql** tool works normally and the error list is correctly configured. - -## Related Pages - +--- +title: Automatic Job Retry upon Failure +summary: Automatic Job Retry upon Failure +author: Guo Huan +date: 2022-05-07 +--- + +# Automatic Job Retry upon Failure + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +If an error occurs in batch processing jobs due to network exceptions or deadlocks, failed jobs are automatically retried. + +## Benefits + +In common fault scenarios, such as network exception and deadlock, queries retry automatically in case of failure to improve database usability. + +## Description + +MogDB provides the job retry mechanism: gsql Retry. + +- The gsql retry mechanism uses a unique error code (SQL STATE) to identify an error that requires a retry. The function of the client tool gsql is enhanced. The error code configuration file **retry_errcodes.conf** is used to configure the list of errors that require a retry. The file is stored in the installation directory at the same level as gsql. **gsql** provides the **\set RETRY** [*number*] command to enable or disable the retry function. The number of retry times ranges from 5 to 10, and the default value is **5**. When this function is enabled, **gsql** reads the preceding configuration file. The error retry controller records the error code list through the container. If an error occurs in the configuration file after the function is enabled, the controller sends the cached query statement to the server for retry until the query is successful or an error is reported when the number of retry times exceeds the maximum. + +## Enhancements + +None + +## Constraints + +- Functionality constraints: + + - Retrying increases execution success rate but does not guarantee success. + +- Error type constraints: + + Only the error types in Table 1 are supported. + + **Table 1** Supported error types + + | Error Type | Error Code | Remarks | + | :-------------------------------- | :--------- | :----------------------------------------------------------- | + | CONNECTION_RESET_BY_PEER | YY001 | TCP communication error. Print information: “Connection reset by peer” | + | STREAM_CONNECTION_RESET_BY_PEER | YY002 | TCP communication error. Print information: “Stream connection reset by peer” (communication between DNs) | + | LOCK_WAIT_TIMEOUT | YY003 | Lock wait timeout. Print information: “Lock wait timeout” | + | CONNECTION_TIMED_OUT | YY004 | TCP communication error. Print information: “Connection timed out” | + | SET_QUERY_ERROR | YY005 | Failed to deliver the **SET** command. Print information: “Set query error” | + | OUT_OF_LOGICAL_MEMORY | YY006 | Failed to apply for memory. Print information: “Out of logical memory” | + | SCTP_MEMORY_ALLOC | YY007 | SCTP communication error. Print information: “Memory allocate error” | + | SCTP_NO_DATA_IN_BUFFER | YY008 | SCTP communication error. Print information: “SCTP no data in buffer” | + | SCTP_RELEASE_MEMORY_CLOSE | YY009 | SCTP communication error. Print information: “Release memory close” | + | SCTP_TCP_DISCONNECT | YY010 | SCTP and TCP communication error. Print information: “SCTP, TCP disconnect” | + | SCTP_DISCONNECT | YY011 | SCTP communication error. Print information: “SCTP disconnect” | + | SCTP_REMOTE_CLOSE | YY012 | SCTP communication error. Print information: “Stream closed by remote” | + | SCTP_WAIT_POLL_UNKNOW | YY013 | Waiting for an unknown poll. Print information: “SCTP wait poll unknow” | + | SNAPSHOT_INVALID | YY014 | Invalid snapshot. Print information: “Snapshot invalid” | + | ERRCODE_CONNECTION_RECEIVE_WRONG | YY015 | Failed to receive a connection. Print information: “Connection receive wrong” | + | OUT_OF_MEMORY | 53200 | Out of memory. Print information: “Out of memory” | + | CONNECTION_EXCEPTION | 08000 | Failed to communicate with DNs due to connection errors. Print information: “Connection exception” | + | ADMIN_SHUTDOWN | 57P01 | System shutdown by the administrator. Print information: “Admin shutdown” | + | STREAM_REMOTE_CLOSE_SOCKET | XX003 | Remote socket disabled. Print information: “Stream remote close socket” | + | ERRCODE_STREAM_DUPLICATE_QUERY_ID | XX009 | Duplicate query. Print information: “Duplicate query id” | + | ERRCODE_STREAM_CONCURRENT_UPDATE | YY016 | Concurrent stream query and update. Print information: “Stream concurrent update” | + +- Statement type constraints: + + Support single-statement stored procedures, functions, and anonymous blocks. Statements in transaction blocks are not supported. + +- Statement constraints of a stored procedure: + + - If an error occurs during the execution of a stored procedure containing EXCEPTION (including statement block execution and statement execution in EXCEPTION), the stored procedure can be retried. If the error is captured by EXCEPTION, the stored procedure cannot be retried. + - Advanced packages that use global variables are not supported. + - DBE_TASK is not supported. + - PKG_UTIL file operation is not supported. + +- Data import constraints: + + - The **COPY FROM STDIN** statement is not supported. + - The **gsql \copy from** metacommand is not supported. + - Data cannot be imported using **JDBC CopyManager copyIn**. + +## Dependencies + +Valid only if the **gsql** tool works normally and the error list is correctly configured. + +## Related Pages + [gsql](../../reference-guide/tool-reference/client-tool/gsql/client-tool-gsql.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/7-ultimate-rto.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/7-ultimate-rto.md index abc2b859..f5c9305a 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/7-ultimate-rto.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/7-ultimate-rto.md @@ -1,43 +1,43 @@ ---- -title: Ultimate RTO -summary: Ultimate RTO -author: Guo Huan -date: 2022-05-07 ---- - -# Ultimate RTO - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -- The database host can be quickly restored after being restarted. -- Logs can be synchronized between the primary and standby nodes to accelerate playback on the standby node. - -## Benefits - -When the service load is heavy, the playback speed of the standby node cannot catch up with that of the primary node. After the system runs for a long time, logs are accumulated on the standby node. If a host is faulty, data restoration takes a long time and the database is unavailable, which severely affects system availability. - -The ultimate recovery time object (RTO) is enabled to reduce the data recovery time after a host fault occurs and improve availability. - -## Description - -After the ultimate RTO function is enabled, multi-level pipelines are established for Xlog log playback to improve the concurrency and log playback speed. - -## Enhancements - -None. - -## Constraints - -The ultimate RTO focuses only on whether the RTO of the standby node meets the requirements. The ultimate RTO removes the built-in flow control and uses the **recovery_time_target** parameter for flow control. This feature does not support the read operation on the standby node. If you query the standby node, a core dump may occur on the standby node. - -## Dependencies - -None. - -## Related Pages - +--- +title: Ultimate RTO +summary: Ultimate RTO +author: Guo Huan +date: 2022-05-07 +--- + +# Ultimate RTO + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +- The database host can be quickly restored after being restarted. +- Logs can be synchronized between the primary and standby nodes to accelerate playback on the standby node. + +## Benefits + +When the service load is heavy, the playback speed of the standby node cannot catch up with that of the primary node. After the system runs for a long time, logs are accumulated on the standby node. If a host is faulty, data restoration takes a long time and the database is unavailable, which severely affects system availability. + +The ultimate recovery time object (RTO) is enabled to reduce the data recovery time after a host fault occurs and improve availability. + +## Description + +After the ultimate RTO function is enabled, multi-level pipelines are established for Xlog log playback to improve the concurrency and log playback speed. + +## Enhancements + +None. + +## Constraints + +The ultimate RTO focuses only on whether the RTO of the standby node meets the requirements. The ultimate RTO removes the built-in flow control and uses the **recovery_time_target** parameter for flow control. This feature does not support the read operation on the standby node. If you query the standby node, a core dump may occur on the standby node. + +## Dependencies + +None. + +## Related Pages + [Log Replay](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/8-cascaded-standby-server.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/8-cascaded-standby-server.md index c84748ee..ebdffa5e 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/8-cascaded-standby-server.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/8-cascaded-standby-server.md @@ -1,49 +1,49 @@ ---- -title: Cascaded Standby Server -summary: Cascaded Standby Server -author: Guo Huan -date: 2022-05-07 ---- - -# Cascaded Standby Server - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -A cascaded standby server can be connected to a standby server based on the one-primary-multiple-standby architecture. - -## Benefits - -The one-primary-multiple-standby architecture cannot support a flexible structure in special service scenarios. The multi-equipment room deployment cannot meet requirements of the complete structure in the HA switchover scenario \(three instances in the primary-standby equipment rooms and two or three instances in the standby-standby equipment rooms\). If the number of standby servers increases, the primary server may be overloaded. Queries that have low real-time requirements can be implemented on cascaded standby servers. Therefore, the cascading backup capability is required. - -## Description - -The primary server replicates logs to the standby server in synchronous or asynchronous mode. The standby server replicates logs to the cascaded standby server only in asynchronous mode. - -In the current one-primary-multiple-standby architecture, the primary server uses the WAL sender process (walsender) to replicate logs to the standby server. The standby server uses the WAL receiver process (walreceiver) to receive and then flushes logs to local disks. The standby server reads redo logs to complete data replication between the primary and standby servers. There is a one-to-one mapping between walsender and walreceiver on the primary and standby servers. Logs are sent between the standby and cascaded standby servers in asynchronous mode using walsender and walreceiver, reducing the streaming replication pressure on the primary server. - -## Enhancements - -None - -## Constraints - -- A cascaded standby server can only replicate data from a standby server and cannot directly replicate data from the primary server. -- A cascaded standby server does not support data build from a standby server. Currently, data can be built only from the primary server. If the standby server is fully built, the cascaded standby server needs to be fully built. -- The cascaded standby node is in asynchronous replication mode. -- The cascaded standby server cannot be promoted. -- The cascaded standby server cannot be notified. -- Currently, the overall architecture of the primary-standby-cascaded standby cluster cannot be queried. You need to find the standby server through the primary server and then find the cascaded standby server based on the standby server. -- A cascaded standby server cannot own another cascaded standby server. -- When the ultimate RTO is enabled, no cascaded standby server is supported. - -## Dependencies - -None - -## Related Pages - +--- +title: Cascaded Standby Server +summary: Cascaded Standby Server +author: Guo Huan +date: 2022-05-07 +--- + +# Cascaded Standby Server + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +A cascaded standby server can be connected to a standby server based on the one-primary-multiple-standby architecture. + +## Benefits + +The one-primary-multiple-standby architecture cannot support a flexible structure in special service scenarios. The multi-equipment room deployment cannot meet requirements of the complete structure in the HA switchover scenario \(three instances in the primary-standby equipment rooms and two or three instances in the standby-standby equipment rooms\). If the number of standby servers increases, the primary server may be overloaded. Queries that have low real-time requirements can be implemented on cascaded standby servers. Therefore, the cascading backup capability is required. + +## Description + +The primary server replicates logs to the standby server in synchronous or asynchronous mode. The standby server replicates logs to the cascaded standby server only in asynchronous mode. + +In the current one-primary-multiple-standby architecture, the primary server uses the WAL sender process (walsender) to replicate logs to the standby server. The standby server uses the WAL receiver process (walreceiver) to receive and then flushes logs to local disks. The standby server reads redo logs to complete data replication between the primary and standby servers. There is a one-to-one mapping between walsender and walreceiver on the primary and standby servers. Logs are sent between the standby and cascaded standby servers in asynchronous mode using walsender and walreceiver, reducing the streaming replication pressure on the primary server. + +## Enhancements + +None + +## Constraints + +- A cascaded standby server can only replicate data from a standby server and cannot directly replicate data from the primary server. +- A cascaded standby server does not support data build from a standby server. Currently, data can be built only from the primary server. If the standby server is fully built, the cascaded standby server needs to be fully built. +- The cascaded standby node is in asynchronous replication mode. +- The cascaded standby server cannot be promoted. +- The cascaded standby server cannot be notified. +- Currently, the overall architecture of the primary-standby-cascaded standby cluster cannot be queried. You need to find the standby server through the primary server and then find the cascaded standby server based on the standby server. +- A cascaded standby server cannot own another cascaded standby server. +- When the ultimate RTO is enabled, no cascaded standby server is supported. + +## Dependencies + +None + +## Related Pages + [Common Primary Backup Deployment Scenarios](../../administrator-guide/database-deployment-scenario/common-primary-backup-deployment-scenarios.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/9-delayed-replay.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/9-delayed-replay.md index ad27985c..657bde68 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/9-delayed-replay.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/9-delayed-replay.md @@ -1,50 +1,50 @@ ---- -title: Delayed Replay -summary: Delayed Replay -author: Guo Huan -date: 2022-05-07 ---- - -# Delayed Replay - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The time for a standby node to replay can be delayed. - -## Benefits - -By default, the standby server restores the Xlog records from the primary server as soon as possible. This function allows you to delay the time for a standby node to replay Xlog records. In this case, you can query a copy that records data before a period of time, which helps correct errors such as misoperations. - -## Description - -The GUC parameter **recovery_min_apply_delay** can be used to set the delay time so that a standby server can replay Xlog records from the primary server after a delay time. - -Value range: an integer ranging from 0 to INT_MAX. The unit is ms. - -Default value: **0** (no delay) - -## Enhancements - -None - -## Constraints - -- The **recovery_min_apply_delay** parameter is invalid on the primary node. It must be set on the standby node to be delayed. -- The delay time is calculated based on the timestamp of transaction commit on the primary server and the current time on the standby server. Therefore, ensure that the clocks of the primary and standby servers are the same. -- Operations without transactions are not delayed. -- After the primary/standby switchover, if the original primary server needs to be delayed, you need to manually set this parameter. -- When **synchronous_commit** is set to **remote_apply**, synchronous replication is affected by the delay. Each commit message is returned only after the replay on the standby server is complete. -- Using this feature also delays **hot_standby_feedback**, which may cause the primary server to bloat, so be careful when using both. -- If a DDL operation (such as DROP or TRUNCATE) that holds an AccessExclusive lock is performed on the primary server, the query operation on the operation object on the standby server will be returned only after the lock is released during the delayed replay of the record on the standby server. - -## Dependencies - -None - -## Related Pages - +--- +title: Delayed Replay +summary: Delayed Replay +author: Guo Huan +date: 2022-05-07 +--- + +# Delayed Replay + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +The time for a standby node to replay can be delayed. + +## Benefits + +By default, the standby server restores the Xlog records from the primary server as soon as possible. This function allows you to delay the time for a standby node to replay Xlog records. In this case, you can query a copy that records data before a period of time, which helps correct errors such as misoperations. + +## Description + +The GUC parameter **recovery_min_apply_delay** can be used to set the delay time so that a standby server can replay Xlog records from the primary server after a delay time. + +Value range: an integer ranging from 0 to INT_MAX. The unit is ms. + +Default value: **0** (no delay) + +## Enhancements + +None + +## Constraints + +- The **recovery_min_apply_delay** parameter is invalid on the primary node. It must be set on the standby node to be delayed. +- The delay time is calculated based on the timestamp of transaction commit on the primary server and the current time on the standby server. Therefore, ensure that the clocks of the primary and standby servers are the same. +- Operations without transactions are not delayed. +- After the primary/standby switchover, if the original primary server needs to be delayed, you need to manually set this parameter. +- When **synchronous_commit** is set to **remote_apply**, synchronous replication is affected by the delay. Each commit message is returned only after the replay on the standby server is complete. +- Using this feature also delays **hot_standby_feedback**, which may cause the primary server to bloat, so be careful when using both. +- If a DDL operation (such as DROP or TRUNCATE) that holds an AccessExclusive lock is performed on the primary server, the query operation on the operation object on the standby server will be returned only after the lock is released during the delayed replay of the record on the standby server. + +## Dependencies + +None + +## Related Pages + [Log Replay](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/cm-dual-network-segment-deployment.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/cm-dual-network-segment-deployment.md new file mode 100644 index 00000000..e281a236 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/cm-dual-network-segment-deployment.md @@ -0,0 +1,175 @@ +--- +title: MogDB/CM/PTK Dual Network Segment Support +summary: MogDB/CM/PTK Dual Network Segment Support +author: 郭欢 朱陈 姚前 +date: 2024-01-29 +--- + +# MogDB/CM/PTK Dual Network Segment Support + +## Availability + +This feature is available since MogDB 5.0.6. + +## Introduction + +MogDB/CM/PTK now supports dual network segment configurations in system deployment to strengthen the high availability of database clusters. + +## Benefits + +With dual network segments deployed, the database cluster can continue to operate normally even if a single network segment fails, thus providing high availability management at the network segment level. + +## Description + +Traditionally, dual network segments rely on hardware configuration, binding two independent network segments together through VLAN technology, and then configuring them to the database cluster. This feature provides dual network segment support for MogDB and CM at the software level, eliminating the dependency on switching equipment. + +### CM Support for Dual Network Segment Deployment + +As the high availability management tool for MogDB, CM supports dual network segment deployment with the following fault handling logic: + +- In the case of a single-node single-network segment failure, the mogdb and cm_server processes on that node continue to work normally, maintaining their roles, and the communication between cm_agent and cm_server will automatically switch to the normal network segment. +- In the case of a single-node dual-network segment failure, the node's network is completely interrupted, and the mogdb and cm_server processes will be terminated, stopping the service. +- In the case of multi-node single-network segment failures, the cluster operates normally. + +### PTK Support for MogDB Dual Network Segment Installation and Deployment + +PTK adapts to CM, enabling dual network segment deployment by configuring the [ha_ips](https://docs.mogdb.io/zh/ptk/v1.1/config#ha_ips) parameter in the config.yaml file. ha_ips should be configured under each node in db_servers as a list, including the IP of the second network segment. The current version supports a maximum of one IP configuration. + +When deploying a two-node (one primary and one standby) setup, the [third_party_gateway_ip](../../high-available-guide/cluster-management/cm-configuration-parameter/cm-cm_server.md#third_party_gateway_ip) parameter in the yaml configuration file (which is a third-party gateway for a single network segment) should be set as a dual network segment parameter, separated by a comma, such as `third_party_gateway_ip: 172.0.0.17,10.0.0.17`. + +For single network segment deployment, `third_party_gateway_ip` only needs to be configured with one IP; for dual network segment deployment, `third_party_gateway_ip` requires two third-party gateway IPs for each network segment, separated by a comma. + +> Note: +> +> - The network segments referred to in this feature are at the internal network level of the database cluster, that is, the HA management network and the data network, and do not involve the application layer network. +> - When deploying dual network segments, PTK will automatically establish SSH mutual trust between the two network segments of the nodes. + +Example configuration file for two-node (one primary and one standby) installation: + +```yaml +# config.yaml +global: + cluster_name: test51 + user: test51 + group: test51 + db_password: pTk6NjM4ZmRkOWE8Qz08PUQ/RXU5NTFmaDZwZTdmSU9zcGh4OWJ1S2ZzcGxKdG40R0lyaHdaMGJKb3lTa2s= + db_port: 24635 + base_dir: /data/test51/mogdb + db_conf: + log_min_messages: 'DEBUG5' + ssh_option: + port: 22 + user: root + key_file: "/root/.ssh/id_rsa" + cm_option: + cm_server_port: 24678 + cm_server_conf: + ddb_type: 1 + enable_ssl: on + third_party_gateway_ip: 172.0.0.17,10.0.0.17 + cms_enable_failover_on2nodes: 'true' + cms_enable_db_crash_recovery: 'true' + log_min_messages: 'DEBUG5' + cm_agent_conf: + enable_ssl: on + log_min_messages: 'DEBUG5' +db_servers: + - host: 172.0.0.12 + role: primary + az_name: AZ1 + az_priority: 1 + ha_ips: [10.0.0.12] + ssh_option: + password: pTk6NjM4ZmRkOWE8Qz08PUQ/RXU5NTFmaDZwZTdmSU9zcGh4OWJ1S2ZzcGxKdG40R0lyaHdaMGJKb3lTa2s= + - host: 172.0.0.22 + role: standby + az_name: AZ1 + az_priority: 1 + ha_ips: [10.0.0.22] + ssh_option: + password: pTk6NjM4ZmRkOWE8Qz08PUQ/RXU5NTFmaDZwZTdmSU9zcGh4OWJ1S2ZzcGxKdG40R0lyaHdaMGJKb3lTa2s= +``` + +Example configuration file for three-node (one primary and two standby) installation: + +```yaml +# config.yaml +global: + cluster_name: test50 + user: test50 + group: test50 + db_password: pTk6NjM4ZmRkOWE8Qz08PUQ/RXU5NTFmaDZwZTdmSU9zcGh4OWJ1S2ZzcGxKdG40R0lyaHdaMGJKb3lTa2s= + db_port: 24635 + base_dir: /data/test50/mogdb + db_conf: + log_min_messages: 'DEBUG5' + ssh_option: + port: 22 + user: root + key_file: "/root/.ssh/id_rsa" + cm_option: + cm_server_port: 24678 + cm_server_conf: + ddb_type: 1 + enable_ssl: on + cms_enable_failover_on2nodes: 'true' + cms_enable_db_crash_recovery: 'true' + log_min_messages: 'DEBUG5' + cm_agent_conf: + enable_ssl: on + log_min_messages: 'DEBUG5' +db_servers: + - host: 172.0.0.12 + role: primary + az_name: AZ1 + az_priority: 1 + ha_ips: [10.0.0.12] + ssh_option: + password: pTk6NjM4ZmRkOWE8Qz08PUQ/RXU5NTFmaDZwZTdmSU9zcGh4OWJ1S2ZzcGxKdG40R0lyaHdaMGJKb3lTa2s= + - host: 172.0.0.22 + role: standby + az_name: AZ1 + az_priority: 1 + ha_ips: [10.0.0.22] + ssh_option: + password: pTk6NjM4ZmRkOWE8Qz08PUQ/RXU5NTFmaDZwZTdmSU9zcGh4OWJ1S2ZzcGxKdG40R0lyaHdaMGJKb3lTa2s= + - host: 172.0.0.43 + role: standby + az_name: AZ1 + az_priority: 1 + ha_ips: [10.0.0.43] + ssh_option: + password: pTk6NjM4ZmRkOWE8Qz08PUQ/RXU5NTFmaDZwZTdmSU9zcGh4OWJ1S2ZzcGxKdG40R0lyaHdaMGJKb3lTa2s= +``` + +### Mounting VIP By PTK + +Use the PTK command to mount VIP for the cluster + +``` shell +ptk cluster -n CLUSTER_NAME load-cm-vip --action install --vip VIP +``` + +CLUSTER_NAME is the cluster name and VIP is the vip to be mounted, note that the mounted VIP segment needs to be consistent with the target segment. A single-segment cluster environment can mount one VIP, and a two-segment cluster environment can mount two VIPs (one for each segment). + +## Constraints + +- This feature depends on PTK version 1.4 or above. +- The VIP mounted by this feature is limited to IPV4 and does not support IPV6 at the moment. +- Each node in the database cluster configured for dual network segments must have at least two network segments. +- Dual network segment deployment does not support cross-segment fault scenarios (e.g., node 1 has a fault in segment 1, and node 2 has a fault in segment 2). +- Ping permissions must be enabled on the corresponding segments of the deployment nodes. +- After a network segment failure, the stream replication switchover of MogDB occurs after the time specified by the [wal_receiver_timeout](../../reference-guide/guc-parameters/ha-replication/standby-server.md#wal_receiver_timeout) parameter. Reducing the value of this parameter can speed up the switchover time of MogDB's stream replication. +- When a single network segment fault occurs on a node, the cluster continues to operate normally. After the segment is restored, due to the time required for the DCF to establish TCP connections and acquire resources, it takes about 15 seconds for the segment to reconnect normally. +- Due to the configuration limits of MogDB's stream replication, dual network segment deployment supports up to one primary and four standbys at most. +- When installing and deploying clusters, it is not supported to have some nodes configured with a single network segment and some with dual network segments (the number of network segments for each node in the cluster should be consistent). +- When scaling up or down, it is not supported to scale a single network segment cluster with dual network segment nodes, nor to scale a dual network segment cluster with single network segment nodes (i.e., the number of network segments for each node in the cluster should be consistent). +- When deploying a two-node cluster with dual network segments, the `third_party_gateway_ip` parameter in the `cm_sever.conf` file should be configured with the third-party gateway IPs for both network segments, such as "172.0.0.17,10.0.0.17", meaning each network segment should have its own third-party gateway IP, separated by a comma. + +## Dependency + +DCF(Distributed Consensus Framework)、DCC(Distributed Consensus Center) + +## Related Pages + +[Unified Database Management Tool](../../high-available-guide/cluster-management/cluster-management.md)、[PTK Configuration](https://docs.mogdb.io/en/ptk/v1.1/config)、[DCF](./13-dcf.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/enhanced-efficiency-of-logical-backup-and-restore.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/enhanced-efficiency-of-logical-backup-and-restore.md new file mode 100644 index 00000000..58f867ba --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/enhanced-efficiency-of-logical-backup-and-restore.md @@ -0,0 +1,175 @@ +--- +title: Enhanced Efficiency of Logical Backup and Restore +summary: Enhanced Efficiency of Logical Backup and Restore +author: 郭欢 王焱 +date: 2024-03-22 +--- + +# Enhanced Efficiency of Logical Backup and Restore + +## Availability + +This feature is available since MogDB 5.0.6. + +## Introduction + +This feature improves the efficiency of logical backup and restore, supporting parallel execution of logical backups when the export file format is a directory (-F, --format=d), and also supports parallel import of directory-formatted export files. + +## Benefits + +It meets the customer's requirement for backup efficiency in scenarios with large amounts of data, saving time and space costs for database users, with excellent performance improvements in parallel import and export, achieving up to 4 to 10 times enhancement in optimal scenarios. + +## Description + +The gs_dump tool introduces a new parameter: -j, --jobs=NUM, which supports inter-table parallel data export when the export file format is a directory, specifying the number of workers for the backup task to improve the efficiency of backup data export. + +The gs_restore tool supports parallel import of files with directory and custom archive formats (.dmp), achieving efficiency improvement in backup data import. + +Additionally, this feature supports parallel import/export of data for each slice by sharding single table data; starting from MogDB 5.0.8, it supports grouping each partition of a partitioned table and parallel execution of data import/export for each partition within the group, enhancing backup efficiency. + +> Note: +> +> - Setting the -j/--jobs parameter to 1 is equivalent to turning off the parallel import/export feature. +> - Worker refers to the process executing the backup import/export. +> - Parallel import/export will increase MogDB's CPU usage accordingly with different degrees of parallelism, leading to increased machine load. + +## Constraints + +- Parallel export of single table sharding and parallel export of partitioned table grouping only apply to large tables over 1G. + +- Only single tables exported in parallel can be imported in parallel (the -j parameters of gs_dump and gs_restore need to be used in conjunction, and the parameter value must be greater than 1). For example: + + ```shell + gs_dump -f backupdir/dir_bdat postgres -F d -j 4 -t + gs_restore backupdir/dir_bdat -d postgres -j 4 -t + ``` + +- If you specify the --inserts/-column-inserts parameter when using gs_dump, you cannot perform a single-table parallel export. + +## Examples + +```shell +-- Specify the number of parallel workers for export as 4 +-- Method one: +gs_dump -f backupdir/dir_bdat postgres -F d -j 4 +-- Method two: +gs_dump -f backupdir/dir_bdat postgres -F d --jobs=4 + +-- Specify the number of parallel workers for import as 4 +-- Method one: +gs_restore backupdir/dir_bdat -d postgres -j 4 +-- Method two: +gs_restore backupdir/dir_bdat -d postgres --jobs=4 +``` + +## Performance Testing + +There are 7 sets of performance tests, which are: + +1. Parallel export and import of the standard TPCC dataset +2. Parallel export and import of the standard TPCH dataset +3. Parallel export and import of 1000 small tables +4. Parallel export and import of a large single table +5. Parallel export and import of a 17GB partitioned large table +6. Parallel export and import of a 51GB partitioned large table +7. Parallel export and import of a 103GB partitioned large table + +**1. Parallel export and import of the standard TPCC dataset** + +Export: + +![1](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency1.png) + +Import: + +![2](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency2.png) + +**2. Parallel export and import of the standard TPCH dataset** + +Export: + +![3](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency3.png) + +Import: + +![4](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency4.png) + +**3. Parallel export and import of 1000 small tables** + +Export: + +![5](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency5.png) + +Import: + +![6](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency6.png) + +**4. Parallel export and import of a large single table** + +Export: + +![7](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency7.png) + +Import: + +![8](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency8.png) + +**Results Analysis for Groups 1-4** + +**gs_dump** + +- gs_dump shows superior performance in scenarios with a large number of tables and single large tables. + +- The export efficiency of the TPCC dataset can be improved by up to 12.5 times, and TPCH by 7.1 times. With 1000 small tables, parallelism can enhance efficiency by up to 7.9 times, and ordinary large tables can be improved by 6.3 to 7.9 times. + +- The optimal performance is observed with a parallelism degree of 8 to 20. Increasing the parallelism degree further does not increase export efficiency, and the CPU usage of MogDB during export is directly proportional to the number of concurrent tasks. + +**gs_restore** + +- gs_restore shows superior performance with a large number of tables or a single table in directory format. The dmp format, which cannot leverage data parallelism due to the inability of gs_dump to split data, has limited performance enhancement. However, it still shows excellent performance in scenarios with a large number of small tables. + +- The import performance of the TPCC dataset in directory format can be improved by up to 3.1 times, and TPCH by 2 times. With 1000 small tables, parallelism can enhance efficiency by up to 3.8 times, and ordinary large tables can be improved by up to 5.5 times. + +- The import performance of the TPCC dataset in dmp format can be improved by up to 1.5 times, and TPCH by 1.2 times. With 1000 small tables, parallelism can enhance efficiency by up to 3.8 times, while ordinary large tables show no improvement due to the inability of gs_dump to split data. + +- The optimal performance is observed with a parallelism degree of 10 to 20. Increasing the parallelism degree further does not increase import efficiency, and the CPU usage of MogDB during import is directly proportional to the number of concurrent tasks. + +**5. Parallel export and import of a 17GB partitioned large table** + +Export: + +![9](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency9.png) + +Import: + +![10](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency10.png) + +**6. Parallel export and import of a 51GB partitioned large table** + +Export: + +![11](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency11.png) + +Import: + +![12](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency12.png) + +**7. Parallel export and import of a 103GB partitioned large table** + +Export: + +![13](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency13.png) + +Import: + +![14](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/concurrency14.png) + +**Results Analysis for Groups 5-7** + +For the 103GB partitioned large table, compared to serial import and export, the performance (import/export time) with parallel degrees set to 2, 4, and 8 has improved by 1 times, 3 times, and 7 times, respectively. + +It can be seen that as the degree of parallelism increases, the performance improvement of parallel export and import for partitioned large tables meets expectations, with the 17GB, 51GB, and 103GB partitioned tables showing consistent linear scalability. + +## Related Pages + +[gs_dump](../../reference-guide/tool-reference/server-tools/gs_dump.md), [gs_restore](../../reference-guide/tool-reference/server-tools/gs_restore.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/high-availability.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/high-availability.md index f0212e45..3f71de9a 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/high-availability.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-availability/high-availability.md @@ -25,4 +25,6 @@ date: 2023-05-22 + **[Using a Standby Node to Build a Standby Node](16-using-a-standby-node-to-build-a-standby-node.md)** + **[Two-City Three-DC DR](17-two-city-three-dc-dr.md)** + **[CM Supporting Two-Node Deployment](cm-cluster-management-component-supporting-two-node-deployment.md)** -+ **[Query of the Original DDL Statement for a View](ddl-query-of-view.md)** \ No newline at end of file ++ **[Query of the Original DDL Statement for a View](ddl-query-of-view.md)** ++ **[MogDB/CM/PTK Dual Network Segment Support](cm-dual-network-segment-deployment.md)** ++ **[Enhanced Efficiency of Logical Backup and Restore](enhanced-efficiency-of-logical-backup-and-restore.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/1-cbo-optimizer.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/1-cbo-optimizer.md index 0eee0d37..089cd4b8 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/1-cbo-optimizer.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/1-cbo-optimizer.md @@ -1,40 +1,36 @@ ---- -title: CBO Optimizer -summary: CBO Optimizer -author: Guo Huan -date: 2022-05-07 ---- - -# CBO Optimizer - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The MogDB optimizer is cost-based optimization (CBO). - -## Benefits - -The MogDB CBO optimizer can select the most efficient execution plan among multiple plans based on the cost to meet customer service requirements to the maximum extent. - -## Description - -By using CBO, the database calculates the number of tuples and the execution cost for each step under each execution plan based on the number of table tuples, column width, null record ratio, and characteristic values, such as distinct, MCV, and HB values, and certain cost calculation methods. The database then selects the execution plan that takes the lowest cost for the overall execution or for the return of the first tuple. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - -[Optimizer](../../performance-tuning/sql-tuning/sql-tuning-optimizer.md) \ No newline at end of file +--- +title: CBO Optimizer +summary: CBO Optimizer +author: Guo Huan +date: 2022-05-07 +--- + +# CBO Optimizer + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +The MogDB optimizer is cost-based optimization (CBO). + +## Benefits + +The MogDB CBO optimizer can select the most efficient execution plan among multiple plans based on the cost to meet customer service requirements to the maximum extent. + +## Description + +By using CBO, the database calculates the number of tuples and the execution cost for each step under each execution plan based on the number of table tuples, column width, null record ratio, and characteristic values, such as distinct, MCV, and HB values, and certain cost calculation methods. The database then selects the execution plan that takes the lowest cost for the overall execution or for the return of the first tuple. + +## Enhancements + +None + +## Constraints + +None + +## Dependencies + +None \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/12-row-store-execution-to-vectorized-execution.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/12-row-store-execution-to-vectorized-execution.md index b20830bd..5a0ae7c6 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/12-row-store-execution-to-vectorized-execution.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/12-row-store-execution-to-vectorized-execution.md @@ -1,110 +1,110 @@ ---- -title: Row-Store Execution to Vectorized Execution -summary: Row-Store Execution to Vectorized Execution -author: Guo Huan -date: 2022-05-10 ---- - -# Row-Store Execution to Vectorized Execution - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Row-store table queries are converted into vectorized execution plans for execution, improving the execution performance of complex queries. - -## Benefits - -Effectively improve the query performance of complex queries. - -## Description - -This feature adds a RowToVec operation to scan operators. After the data in the row-store table is converted into the vectorized format in the memory, the upper-layer operators can be converted into vectorized operators. In this way, the vectorized executor can be used for calculation. Scan operators that support this conversion include SeqScan, IndexOnlyscan, IndexScan, BitmapScan, FunctionScan, ValueScan and TidScan. - -## Constraints - -- Vectorization is not supported in the following scenarios: - - The targetList contains a function that returns set. - - The targetList or qual contains expressions that do not support vectorization, such as array expressions, multi-subquery expressions, field expressions, and system catalog columns. - - The following types do not support vectorization: POINTOID, LSEGOID, BOXOID, LINEOID, CIRCLEOID, POLYGONOID, PATHOID, and user-defined types. -- MOTs do not support vectorization. -- The vectorization engine needs to be turned on, i.e. set `enable_vector_engine = on`. - -## Example - -1. Create a table, insert data, and turn on the vectorization engine. - - ```sql - -- Create table, insert test data - create table mogdb_rowtovec_1 (id int, rating int, match text); - - insert into - mogdb_rowtovec_1 - values - ( - generate_series(1, 20000), - floor(random() * 100), - 'match# ' || generate_series(1, 113) - ); - - vacuum analyze mogdb_rowtovec_1; - - set enable_vector_engine = on; - ``` - -2. When `try_vector_engine_strategy = 'optimal'`, the optimizer evaluates the statement in terms of the amount of data to be processed, the expression complexity, and the overhead of row-column transformation, etc., and then automatically selects whether or not to vectoredize the row-store table plan based on the cost. The vectorized plan will be added with the `Vector Adapter / Row Adaptor` operator for row-column transformation. - - ```sql - -- Set vector strategy to optimal - set try_vector_engine_strategy = 'optimal'; - - -- Simple batch processing scenarios with no implementation of the vectorized row inventory schedule - MogDB=# explain (costs off) select id, rating from mogdb_rowtovec_1; - QUERY PLAN - ------------------------------ - Seq Scan on mogdb_rowtovec_1 - (1 row) - - -- Vectorization scenario, selecting the execution of a vectorized bank table plan - MogDB=# explain (costs off) - select - sum(rating), - avg(rating), - sum(rating + 10), - match - from - mogdb_rowtovec_1 - group by - rating, - match; - QUERY PLAN - ------------------------------------------------ - Row Adapter - -> Vector Sonic Hash Aggregate - Group By Key: rating, match - -> Vector Adapter(type: BATCH MODE) - -> Seq Scan on mogdb_rowtovec_1 - (5 rows) - ``` - -3. MogDB also supports Force mode (`try_vector_engine_strategy = 'force'`), in which the optimizer aggressively performs row and column transformations to the extent supported by the column-storage engine while ignoring the effects of cost. - - ```sql - -- Set vector strategy to Force - set try_vector_engine_strategy = 'force'; - - -- Re-execute simple batch processing scenarios - MogDB=# explain (costs off) select id, rating from mogdb_rowtovec_1; -- vectorized - QUERY PLAN - ------------------------------------------ - Row Adapter - -> Vector Adapter(type: BATCH MODE) - -> Seq Scan on mogdb_rowtovec_1 - (3 rows) - ``` - -## Related Pages - +--- +title: Row-Store Execution to Vectorized Execution +summary: Row-Store Execution to Vectorized Execution +author: Guo Huan +date: 2022-05-10 +--- + +# Row-Store Execution to Vectorized Execution + +## Availability + +This feature is available since MogDB 3.0.0. + +## Introduction + +Row-store table queries are converted into vectorized execution plans for execution, improving the execution performance of complex queries. + +## Benefits + +Effectively improve the query performance of complex queries. + +## Description + +This feature adds a RowToVec operation to scan operators. After the data in the row-store table is converted into the vectorized format in the memory, the upper-layer operators can be converted into vectorized operators. In this way, the vectorized executor can be used for calculation. Scan operators that support this conversion include SeqScan, IndexOnlyscan, IndexScan, BitmapScan, FunctionScan, ValueScan and TidScan. + +## Constraints + +- Vectorization is not supported in the following scenarios: + - The targetList contains a function that returns set. + - The targetList or qual contains expressions that do not support vectorization, such as array expressions, multi-subquery expressions, field expressions, and system catalog columns. + - The following types do not support vectorization: POINTOID, LSEGOID, BOXOID, LINEOID, CIRCLEOID, POLYGONOID, PATHOID, and user-defined types. +- MOTs do not support vectorization. +- The vectorization engine needs to be turned on, i.e. set `enable_vector_engine = on`. + +## Example + +1. Create a table, insert data, and turn on the vectorization engine. + + ```sql + -- Create table, insert test data + create table mogdb_rowtovec_1 (id int, rating int, match text); + + insert into + mogdb_rowtovec_1 + values + ( + generate_series(1, 20000), + floor(random() * 100), + 'match# ' || generate_series(1, 113) + ); + + vacuum analyze mogdb_rowtovec_1; + + set enable_vector_engine = on; + ``` + +2. When `try_vector_engine_strategy = 'optimal'`, the optimizer evaluates the statement in terms of the amount of data to be processed, the expression complexity, and the overhead of row-column transformation, etc., and then automatically selects whether or not to vectoredize the row-store table plan based on the cost. The vectorized plan will be added with the `Vector Adapter / Row Adaptor` operator for row-column transformation. + + ```sql + -- Set vector strategy to optimal + set try_vector_engine_strategy = 'optimal'; + + -- Simple batch processing scenarios with no implementation of the vectorized row inventory schedule + MogDB=# explain (costs off) select id, rating from mogdb_rowtovec_1; + QUERY PLAN + ------------------------------ + Seq Scan on mogdb_rowtovec_1 + (1 row) + + -- Vectorization scenario, selecting the execution of a vectorized bank table plan + MogDB=# explain (costs off) + select + sum(rating), + avg(rating), + sum(rating + 10), + match + from + mogdb_rowtovec_1 + group by + rating, + match; + QUERY PLAN + ------------------------------------------------ + Row Adapter + -> Vector Sonic Hash Aggregate + Group By Key: rating, match + -> Vector Adapter(type: BATCH MODE) + -> Seq Scan on mogdb_rowtovec_1 + (5 rows) + ``` + +3. MogDB also supports Force mode (`try_vector_engine_strategy = 'force'`), in which the optimizer aggressively performs row and column transformations to the extent supported by the column-storage engine while ignoring the effects of cost. + + ```sql + -- Set vector strategy to Force + set try_vector_engine_strategy = 'force'; + + -- Re-execute simple batch processing scenarios + MogDB=# explain (costs off) select id, rating from mogdb_rowtovec_1; -- vectorized + QUERY PLAN + ------------------------------------------ + Row Adapter + -> Vector Adapter(type: BATCH MODE) + -> Seq Scan on mogdb_rowtovec_1 + (3 rows) + ``` + +## Related Pages + [enable_vector_engine](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_vector_engine) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/2-llvm.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/2-llvm.md index 9eb1e203..0cd7fdb7 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/2-llvm.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/2-llvm.md @@ -1,40 +1,40 @@ ---- -title: LLVM -summary: LLVM -author: Guo Huan -date: 2022-05-07 ---- - -# LLVM - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -MogDB provides the Low Level Virtual Machine (LLVM) technology to query dynamic compilation execution. - -## Benefits - -The requery performance is greatly improved by dynamically building and executing queries. - -## Description - -Based on the query execution plan tree, with the library functions provided by the LLVM, MogDB moves the process of determining the actual execution path from the executor phase to the execution initialization phase. In this way, problems such as function calling, logic condition branch determination, and a large amount of data reading that are related to the original query execution are avoided, to improve the query performance. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -It depends on the LLVM open-source component. Currently, the open-source version 10.0.0 is used. - -## Related Pages - +--- +title: LLVM +summary: LLVM +author: Guo Huan +date: 2022-05-07 +--- + +# LLVM + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +MogDB provides the Low Level Virtual Machine (LLVM) technology to query dynamic compilation execution. + +## Benefits + +The requery performance is greatly improved by dynamically building and executing queries. + +## Description + +Based on the query execution plan tree, with the library functions provided by the LLVM, MogDB moves the process of determining the actual execution path from the executor phase to the execution initialization phase. In this way, problems such as function calling, logic condition branch determination, and a large amount of data reading that are related to the original query execution are avoided, to improve the query performance. + +## Enhancements + +None + +## Constraints + +None + +## Dependencies + +It depends on the LLVM open-source component. Currently, the open-source version 10.0.0 is used. + +## Related Pages + [LLVM](../../reference-guide/sql-reference/sql-reference-llvm.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/3-vectorized-engine.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/3-vectorized-engine.md index 34a3b3d8..27f88a42 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/3-vectorized-engine.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/3-vectorized-engine.md @@ -1,47 +1,47 @@ ---- -title: Vectorized Engine -summary: Vectorized Engine -author: Guo Huan -date: 2022-05-07 ---- - -# Vectorized Engine - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The vectorized execution engine, provided by MogDB, is usually used in OLAP data warehouse systems because analytical systems are usually data-intensive and access most data in a table in a sequential manner, perform calculation, and finally output a calculation result to an end user. - -## Benefits - -Batch calculation greatly improves the performance of complex query. - -## Description - -The traditional database query execution uses the tuple-based pipeline execution mode. In most time, the CPU is not used to actually process data, but to traverse the query operation tree. As a result, the effective utilization of the CPU is not high. This also results in low instruction cache performance and frequent jumps. Worse still, this approach does not take advantage of the new capabilities of the new hardware to speed up the execution of queries. In the execution engine, another solution is to change a tuple to a column at a time. This is also the basis of our vectorized execution engine. - -The vectorized engine is bound to the column-store technology, because data of each column is stored together, and it may be considered that the data is stored in an array manner. Based on such a feature, when a same operation needs to be performed on the column data, calculation of each value of the data block may be efficiently completed by using a cycle. - -The advantages of the vectorized execution engine are as follows: - -- This reduces inter-node scheduling and improves CPU usage. -- Because the same type of data is put together, it is easier to leverage the new optimization features of hardware and compilation. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -It depends on column store. - -## Related Pages - +--- +title: Vectorized Engine +summary: Vectorized Engine +author: Guo Huan +date: 2022-05-07 +--- + +# Vectorized Engine + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +The vectorized execution engine, provided by MogDB, is usually used in OLAP data warehouse systems because analytical systems are usually data-intensive and access most data in a table in a sequential manner, perform calculation, and finally output a calculation result to an end user. + +## Benefits + +Batch calculation greatly improves the performance of complex query. + +## Description + +The traditional database query execution uses the tuple-based pipeline execution mode. In most time, the CPU is not used to actually process data, but to traverse the query operation tree. As a result, the effective utilization of the CPU is not high. This also results in low instruction cache performance and frequent jumps. Worse still, this approach does not take advantage of the new capabilities of the new hardware to speed up the execution of queries. In the execution engine, another solution is to change a tuple to a column at a time. This is also the basis of our vectorized execution engine. + +The vectorized engine is bound to the column-store technology, because data of each column is stored together, and it may be considered that the data is stored in an array manner. Based on such a feature, when a same operation needs to be performed on the column data, calculation of each value of the data block may be efficiently completed by using a cycle. + +The advantages of the vectorized execution engine are as follows: + +- This reduces inter-node scheduling and improves CPU usage. +- Because the same type of data is put together, it is easier to leverage the new optimization features of hardware and compilation. + +## Enhancements + +None + +## Constraints + +None + +## Dependencies + +It depends on column store. + +## Related Pages + [Using the Vectorized Executor for Tuning](../../performance-tuning/system-tuning/configuring-vector-engine.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/enhancement-of-dirty-pages-flushing-performance.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/enhancement-of-dirty-pages-flushing-performance.md new file mode 100644 index 00000000..663e15b1 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/enhancement-of-dirty-pages-flushing-performance.md @@ -0,0 +1,102 @@ +--- +title: Enhancement of Dirty Pages Flushing Performance +summary: Enhancement of Dirty Pages Flushing Performance +author: 郭欢 王云龙 +date: 2024-06-26 +--- + +# Enhancement of Dirty Pages Flushing Performance + +## Availability + +This feature is available since MogDB 5.0.8. + +## Introduction + +In MogDB's incremental checkpoint mode (which is also the default dirty page flushing mode), when the database faces a scenario with high write pressure, a large number of dirty pages can accumulate, leading to the following consequences: + +1. Long checkpoint duration +2. Long switchover duration +3. Long downtime, etc. + +MogDB 5.0.8 supports the ultimate dirty page flushing feature, which can be enabled by setting the parameter `extreme_flush_dirty_page = on`. If the current system's operations mentioned above have high latency, this parameter can be activated to improve the flushing speed under high-pressure scenarios, allowing upper-level operations to respond quickly and reducing the time required for executing checkpoints, switchovers, restarts, and RTO operations. + +## New GUC Parameters + +### extreme_flush_dirty_page + +**Parameter Description**: Whether to enable the ultimate dirty page flushing mode (enabling it can speed up flushing, but increases write amplification) + +This parameter is of the POSTMASTER type. + +**Value Range**: Boolean + +**Default Value**: off + +**Note**: Please ensure that the current system's slow flushing is not due to system I/O capacity before turning on this parameter. Monitoring tools such as iostat and Node-exporter can be used to confirm that there are no disk I/O bottlenecks. For shared storage services, also ensure that the I/O capacity limit of the shared storage service is not reached. + +### checkpoint_target_time + +**Parameter Description**: The desired maximum duration for executing a checkpoint (the smaller the value, the faster the flushing, the smaller the actual duration of the checkpoint, but write amplification increases. If I/O becomes a bottleneck, a very low value may affect the business); corresponding upstream operations include: shutdown (stop), switchover (primary-standby switch), manually executing the checkpoint statement. + +This parameter is of the POSTMASTER type. + +**Value Range**: 5 - 60s + +**Default Value**: 30s + +## New Function + +local_pagewriter_flush_detail() + +**Description**: Displays detailed information about the flushing process, including GUC parameters related to flushing, variable information in the flushing process, etc. When the system's flushing is slow, calling this function can help analyze the bottleneck. + +**Permissions**: Any user can call it. + +**Return Values**: + +| Column Name | Description | +| ---------------------------- | ------------------------------------------------------------ | +| node_name | Node name | +| pagewriter_sleep(ms) | Flushing cycle duration | +| max_io_capacity(M) | Maximum I/O capacity | +| dirty_page_percent_max | Maximum dirty page ratio | +| candidate_buf_percent_target | Target ratio of candidate buffer | +| max_redo_log_size(M) | Maximum log redo size | +| main_pagewriter_detail | Main pagewriter details: start time, waiting time, flush time | +| sub_pagewriter_detail | ID: Sub-pagewriter number; wait_cost: Waiting time of the last flush cycle; flush_cost: Actual flush time of the last flush cycle | +| theoritical_max_io | Theoretical maximum = (Theoretical maximum for 'scanning buffer to candidate queue' + Theoretical maximum for flushing from the dirty page queue) | +| lsn_percent | LSN ratio | +| actual_max_io | Actual maximum = (Actual maximum for 'scanning buffer to candidate queue' + Actual maximum for flushing from the dirty page queue) | +| actual_flush_num | Actual flush value = (Actual value for 'scanning buffer to candidate queue' + Actual value for flushing from the dirty page queue) | +| remain_actual_dirty_page_num | Remaining actual dirty page count | +| list_flush_detail | Details for scanning buffer to candidate queue: current candidate buffer count, total buffer count | +| queue_flush_detail | Details for flushing from the dirty page queue: dirty percent | +| forecast | Forecast: current speed, estimated time for current checkpoint execution | + +## Constraints + +- Enabling the ultimate dirty page flushing mode means that write amplification will increase. If I/O is already a bottleneck, enabling it will not be significantly optimized and may lead to a decrease in tPMC. Therefore, the premise of enabling the ultimate dirty page flushing mode is that machine I/O is not the current system's bottleneck. + +## Performance Improvement + +After enabling the flush optimization, the checkpoint time and switchover time during SwitchOver have been improved by more than 47%, and the average value of TPMC is not significantly lost. + +- The average value of SwitchOver RTO has decreased by 47% to 67.5% + + Without enabling, the average value is 41.55 seconds, which is reduced to 13.5 seconds when checkpoint_target_time=5, and 22 seconds when checkpoint_target_time=30. + +- The average duration of checkpoint during SwitchOver has decreased by 49% to 73% + + Without enabling, the average value is 38.68 seconds, which is reduced to 10.42 seconds when checkpoint_target_time=5, and 19.67 seconds when checkpoint_target_time=30. + +- The average value of TPMC after enabling the flush optimization is close to the average value of TPMC without enabling the optimization. + +TPCC and hardware configuration: + +1. TPCC: 3000 warehouses 500/600 terminals 10 minutes Run +2. Hardware configuration: arm 48 CPU 200G Mem 3T Disk(RAID 0, 2 nvme SSD) + +## Related Pages + +[extreme_flush_dirty_page](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#extreme_flush_dirty_page), [checkpoint_target_time](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#checkpoint_target_time), [local_pagewriter_flush_detail()](../../reference-guide/functions-and-operators/system-management-functions/other-functions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/high-performance.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/high-performance.md index 62b86049..9640c456 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/high-performance.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/high-performance.md @@ -27,4 +27,7 @@ date: 2023-05-22 + **[Sorting Operator Optimization](ordering-operator-optimization.md)** + **[OCK-accelerated Data Transmission](ock-accelerated-data-transmission.md)** + **[OCK SCRLock Accelerate Distributed Lock](ock-scrlock-accelerate-distributed-lock.md)** -+ **[Enhancement of WAL Redo Performance](enhancement-of-wal-redo-performance.md)** \ No newline at end of file ++ **[Enhancement of WAL Redo Performance](enhancement-of-wal-redo-performance.md)** ++ **[Enhancement of Dirty Pages Flushing Performance](enhancement-of-dirty-pages-flushing-performance.md)** ++ **[Sequential Scan Prefetch](seqscan-prefetch.md)** ++ **[Ustore SMP Parallel Scanning](ustore-smp.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/seqscan-prefetch.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/seqscan-prefetch.md new file mode 100644 index 00000000..b9a12b08 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/seqscan-prefetch.md @@ -0,0 +1,158 @@ +--- +title: Sequential Scan Prefetch +summary: Sequential Scan Prefetch +author: Guo Huan 郑小进 赵金 陈先 +date: 2023-12-20 +--- + +# Sequential Scan Prefetch + +## Availability + +This feature is available since MogDB 5.0.8. + +## Introduction + +MogDB's sequential scan prefetching is optimized for scenarios involving large volumes of data in sequential scans of pure data tables (full table scan scenarios), enhancing the scanning performance. This feature supports both Astore and Ustore storage engines and is also available for parallel scan prefetching. + +## Benefits + +Parallelizes CPU processing and I/O operations during sequential scans, reducing I/O blockage to CPU, improving CPU utilization, and enhancing the performance of sequential scans. + +## Description + +Data in the database is organized and managed in pages, with the CPU processing data in page units, creating an alternating serial execution model between CPU processing and I/O. In this model, due to the I/O latency of a page being significantly greater than the time for CPU processing, the CPU processing is frequently interrupted by I/O operations, leading to low CPU utilization, which is the main reason for poor performance in scenarios such as full table scans. + +The sequential scan prefetching mechanism changes this model by parallelizing the CPU processing and I/O operations of sequential scans, avoiding CPU blockage due to waiting for I/O as much as possible. The ideal state is when the CPU is about to process the next data page, the I/O service routine has already prepared this data page in memory. This model is defined as the data page prefetching mechanism (data prefetch). + +This feature achieves a 20%-60% performance improvement for the SeqScan operator in full table scan query scenarios (such as TPCH), with a 10%-20% end-to-end performance improvement. + +Note: + +Not all SQL statements experience the above performance improvements in any testing scenario. The performance improvement of prefetching is mainly related to the complexity of the query statement (CPU calculation and I/O time) and disk bandwidth, with other influencing factors including whether it is a full cache scenario and whether it is a mixed query load. + +- SQL features with significant operator performance improvements: CPU calculation time is heavy, and I/O bandwidth has not reached the maximum disk bandwidth. +- SQL features with significant end-to-end performance improvements: CPU calculation and I/O time account for about 50% each, and I/O bandwidth has not reached the maximum disk bandwidth. + +This feature is turned off by default. Set the GUC parameters `enable_ios = on` and `enable_heap_async_prefetch = on` to enable Astore sequential scan prefetching. Set the GUC parameters `enable_ios = on` and `enable_uheap_async_prefetch = on` to enable Ustore sequential scan prefetching. + +## Performance Comparison + +The performance improvement results of this feature under different degrees of parallelism for the master node executing TPCH tests, as well as the performance improvement results under mixed load (tpcc+tpch) and the impact on TPMC, are as follows. + +> Note: In the figures below, the vertical axis represents the execution time of the operator or SQL (unit: seconds), and the horizontal axis represents the executed SQL. + +- Astore performance data + + - dop=1: TPCH sequential scan operator improvement is 52%, end-to-end improvement is 27%. + + ![img1](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/seqscan-prefetch-1.png) + + ![img2](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/seqscan-prefetch-2.png) + + - dop=8: TPCH sequential scan operator improvement is 28%, end-to-end improvement is 13%. + + ![img3](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/seqscan-prefetch-3.png) + + ![img4](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/seqscan-prefetch-4.png) + + - Performance improvement results and impact on TPMC under mixed load (tpcc+tpch): + + TPCH sequential scan operator improvement is 19%, end-to-end improvement is 10%, and tpmc is not affected by prefetching. + + > Note: The tpmc without prefetching is 410204, and with prefetching enabled, tpmc is 414793. + + ![img5](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/seqscan-prefetch-5.png) + + ![img6](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/seqscan-prefetch-6.png) + +- Ustore performance data + + Results of TPCH testing on the master node under different degrees of parallelism: + + - dop=1: Overall operator improvement is 41%, end-to-end improvement is 19%. + + ![img7](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/seqscan-prefetch-7.png) + + - dop=4: Overall operator improvement is 43%, end-to-end improvement is 21%. + + ![img8](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/seqscan-prefetch-8.png) + + - dop=8: Overall operator improvement is 45%, end-to-end improvement is 23%. + + ![img9](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/seqscan-prefetch-9.png) + + - dop=16: Overall operator improvement is 37%, end-to-end improvement is 13%. + + ![img10](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/seqscan-prefetch-10.png) + + Performance improvement results and impact on TPMC under mixed load (tpcc+tpch): + + - dop=1: Overall operator improvement is 32%, end-to-end improvement is 19%, tpmc effect improvement is 3%, tpmc is not affected by prefetching. + + ![img11](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/seqscan-prefetch-11.png) + + - dop=4: Overall operator improvement is 38%, end-to-end improvement is 22%, tpmc effect improvement is 2%, tpmc is not affected by prefetching. + + ![img12](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/seqscan-prefetch-12.png) + +## Constraints + +- Supports prefetching for serial and parallel scan scenarios. +- The current version supports Astore and Ustore engines, and does not support Cstore or segmented page engines. +- It is recommended to enable prefetching for NVMe SSDs and not for disk prefetching. + +## Usage Guidance + +### Usage Restrictions + +1. If using a regular mechanical hard disk, disk I/O bandwidth may be the system bottleneck, so the advantages of prefetching cannot be demonstrated. +2. The sequential prefetching mechanism is mainly suitable for tables with a large amount of data (at least GB level). For tables with a small amount of data, it is not recommended to enable prefetching. The current default triggers prefetching at 1G, and the minimum table size for triggering prefetching can be adjusted to 512MB by the user through the GUC parameters `min_table_block_num_enable_ios` and `min_uheap_table_block_num_enable_ios`. + +### Configuration Steps + +- Configure Astore prefetching + + ```sql + enable_ios = true // System level, takes effect after restarting the database, default is false + enable_heap_async_prefetch=true // Session level, supports online configuration, default is false + ``` + +- Configure Ustore prefetching + + ```sql + enable_ios = true // System level, takes effect after restarting the database, default is false + enable_uheap_async_prefetch=true // Session level, supports online configuration, default is false + ``` + +### GUC Parameters + +Note: In addition to `enable_ios` and `ios_worker_num` which require a database restart to take effect, other GUC parameters support online configuration. + +| No. | Parameter Description | +| ---- | ------------------------------------------------------------ | +| 1 | [enable_ios](../../reference-guide/guc-parameters/thread-pool.md#enable_ios): Controls whether to start the IOS service. | +| 2 | [enable_heap_async_prefetch](../../reference-guide/guc-parameters/thread-pool.md#enable_heap_async_prefetch): Controls whether to enable prefetching for Astore full table scan scenarios. | +| 3 | [enable_uheap_async_prefetch](../../reference-guide/guc-parameters/thread-pool.md#enable_uheap_async_prefetch): Controls whether to enable prefetching for Ustore full table scan scenarios. | +| 4 | [ios_worker_num](../../reference-guide/guc-parameters/thread-pool.md#ios_worker_num): The number of ios_workers in the IOS thread pool. | +| 5 | [parallel_scan_gap](../../reference-guide/guc-parameters/thread-pool.md#parallel_scan_gap): The number of pages each worker thread handles at a time when parallel scanning is enabled (query_dop > 1). | +| 6 | [ios_batch_read_size](../../reference-guide/guc-parameters/thread-pool.md#ios_batch_read_size): The number of pre-read pages issued to the disk by ios_worker in each batch. | +| 7 | [max_requests_per_worker](../../reference-guide/guc-parameters/thread-pool.md#max_requests_per_worker): The maximum queue depth for each ios_worker. | +| 8 | [min_table_block_num_enable_ios](../../reference-guide/guc-parameters/thread-pool.md#min_table_block_num_enable_ios): The minimum table size threshold for triggering prefetching of Astore tables. prefetching can only be triggered when the total number of data pages in the table is greater than or equal to this threshold. The current data page size is 8kB. | +| 9 | [min_uheap_table_block_num_enable_ios](../../reference-guide/guc-parameters/thread-pool.md#min_table_block_num_enable_ios): The minimum table size threshold for triggering prefetching of Ustore tables. prefetching can only be triggered when the total number of data pages in the table is greater than or equal to this threshold. The current data page size is 8kB. | +| 10 | [prefetch_protect_time](../../reference-guide/guc-parameters/thread-pool.md#prefetch_protect_time): The maximum protection time for pre-read buffers. | +| 11 | [ios_status_update_gap](../../reference-guide/guc-parameters/thread-pool.md#ios_status_update_gap): The time interval for updating IOS performance status. | + +### Operations and Monitoring Capabilities + +1. Users can perceive the effect of enabling prefetching through the shared buffer hit metric in the execution plan, which can significantly show a very high buffer hit rate. Combined with the GUC parameter [track_io_timing](../../reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md#track_io_timing) = on, observe I/O Timings: read, i.e., the IO read latency is very low. + +2. Related performance view: [IOS_STATUS](../../reference-guide/system-catalogs-and-system-views/system-views/IOS_STATUS.md) + + Usage method: `select * from ios_status();` + + Used to view the performance status of the IO thread pool responsible for prefetching in the recent period, including IOSCtl dispatched requests, IO latency/bandwidth, queue congestion, and other metrics. When the main query thread has high IO latency or low cache hit rate issues, users or developers can help locate problems by intuitively viewing the performance of the prefetching thread pool. + +## Related Pages + +[In-place Update Storage Engine Ustore](../../performance-tuning/system-tuning/configuring-ustore.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/ustore-smp.md b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/ustore-smp.md new file mode 100644 index 00000000..98943f33 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/high-performance/ustore-smp.md @@ -0,0 +1,96 @@ +--- +title: Ustore SMP Parallel Scanning +summary: Ustore SMP Parallel Scanning +author: Guo Huan 赵森 刘静怡 +date: 2024-07-02 +--- + +# Ustore SMP Parallel Scanning + +## Availability + +This feature is available since MogDB 5.0.8. + +## Introduction + +MogDB's SMP parallel technology utilizes the multi-core CPU architecture of computers to achieve multi-threaded parallel computing, fully leveraging CPU resources to enhance query performance. Previously, SMP parallel technology only supported the Astore storage engine. MogDB 5.0.8 introduces parallel capabilities for the Ustore engine. + +## Description + +In complex query scenarios, the execution of a single query takes a long time, and the system concurrency is low. By implementing operator-level parallelism through SMP parallel execution technology, the query execution time can be effectively reduced, enhancing query performance and resource utilization. + +This feature includes the following scenarios for SMP parallel capabilities in the Ustore engine: + +1. Parallel Seq Scan + +2. Parallel Index Scan + +3. Parallel Index Only Scan + +4. Parallel Bitmap Scan + +## Performance Comparison with Parallelism Enabled or Disabled + +- Seq Scan + + ![1](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/ustore10.png) + + With parallelism enabled, the performance of sequential queries increases with the degree of parallelism. The best performance is observed in Agg scenarios, where a 12 to 13 times faster query performance is achieved compared to serial execution at a parallel degree of 16. + +- Index Scan + + ![2](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/ustore11.png) + + With parallelism enabled, the performance of Index Scan queries improves with the degree of parallelism. The best performance is seen in Agg scenarios, where a 11 to 15 times faster query performance is achieved compared to serial execution at a parallel degree of 16. + +- Index Only Scan + + ![3](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/ustore12.png) + + With parallelism enabled, the performance of Index Only Scan queries improves with the degree of parallelism. The best performance is seen in Agg scenarios, where a 5 to 13 times faster query performance is achieved compared to serial execution at a parallel degree of 16. Moreover, the Ustore's Index Only Scan operator outperforms Astore in both serial and parallel scenarios. + +- Bitmap Scan + + Bitmap scans perform well in scenarios with random reads and large volumes of data. At a parallel degree of 16, the BitmapScan operator sees a 70% to 100% performance improvement. However, the end-to-end performance improvement is not significant, especially when the data volume is small. + +## Constraints + +- Cursors do not support parallel execution. +- Queries within stored procedures and functions do not support parallel execution. +- Subqueries (subplan) and initplan do not support parallelism, nor do operators containing subqueries. +- Queries with median operations do not support parallel execution. +- Queries involving global temporary tables do not support parallel execution. + +## Usage Guidance + +### Usage Restrictions + +To leverage SMP for improved query performance, consider the following conditions: + +- Resource Impact on SMP Performance + + Sufficient CPU, memory, I/O, and network bandwidth resources are required. The SMP architecture is a scheme that uses surplus resources to save time. After planning for parallelism, resource consumption will inevitably increase. When the aforementioned resources become bottlenecks, SMP cannot improve performance and may even lead to performance degradation. In cases of resource bottlenecks, it is recommended to disable SMP. + +- Other Factors Impacting SMP Performance + + Parallelism performs poorly when there is severe data skew in the data. For example, if a join column in a table has a value with a data volume much larger than other values, enabling parallelism will cause a hash redistribution of the table data based on the join column's value. This can result in one parallel thread having much more data than other threads, leading to a long-tail problem and poor parallel performance. + + The SMP feature increases resource usage, and in high-concurrency scenarios, fewer resources are available. Therefore, in high-concurrency scenarios where SMP parallelism is enabled, especially when the amount of data to be processed is small, it can lead to severe resource competition among queries. Once resource competition occurs, whether it be CPU, I/O, or memory, it can lead to an overall performance decline. Thus, in high-concurrency scenarios, enabling SMP often fails to improve performance and may even cause performance degradation. + +### Configuration Steps + +1. Observe the current system load. If system resources are abundant (resource utilization is less than 50%), proceed to step 2; otherwise, exit. + +2. Configure: Enable parallelism by setting `query_dop = ${thread_num};`, default is 1. + +3. After executing the query statement, set `query_dop` to 1. + + ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/ustore-smp.png) + + In this plan, parallelism of the Seq Scan operator is achieved, and a Local Gather data exchange operator is added. The "dop: 1/2" marked on the Local Gather operator indicates that the sender thread's parallel degree is 2, while the receiver thread's parallel degree is 1. That is, the lower-level Seq Scan operator executes with a parallel degree of 2, and the Streaming operator achieves data aggregation of parallel threads within the instance. + + With resource permission, the higher the degree of parallelism, the better the performance improvement effect. It's not always the case that higher parallelism is better; beyond a certain point, the performance improvement may not be significant. + +## Related Pages + +[Configuring Ustore](../../performance-tuning/system-tuning/configuring-ustore.md), [query_dop](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#query_dop) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/2-workload-diagnosis-report.md b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/2-workload-diagnosis-report.md index f9a66695..f555ffb6 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/2-workload-diagnosis-report.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/2-workload-diagnosis-report.md @@ -1,76 +1,76 @@ ---- -title: WDR -summary: WDR -author: Guo Huan -date: 2022-05-07 ---- - -# WDR - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The workload diagnosis report (WDR) provides database performance diagnosis reports based on the baseline performance and incremental data that reflects performance changes. - -## Benefits - -- The WDR is the main method for diagnosing long-term performance problems. Based on the performance baseline of a snapshot, performance analysis is performed from multiple dimensions, helping DBAs understand the system load, performance of each component, and performance bottlenecks. -- Snapshots are also an important data source for self-diagnosis and self-optimization suggestions on subsequent performance problems. - -## Description - -The WDR generates a performance report between two different time points based on the system performance snapshot data at these time points. The report is used to diagnose database kernel performance faults. - -You can use generate_wdr_report(…) to generate a performance report based on two performance snapshots. - -The WDR depends on the following two components: - -- Snapshot: The performance snapshot can be configured to collect a certain amount of performance data from the kernel at a specified interval and store the data in the user tablespace. Any snapshot can be used as a performance baseline for comparison with other snapshots. -- WDR Reporter: This tool analyzes the overall system performance based on two snapshots, calculates the changes of more specific performance indicators between the two time points, and generates summarized and detailed performance data. For details, see Table 1 and Table 2. - -**Table 1** Summarized diagnosis report - -| Diagnosis Type | Description | -| :------------------------------ | :----------------------------------------------------------- | -| Database Stat | Evaluates the load and I/O status of the current database. Load and I/O are the most important indicators of the TP system.
The statistics include the number of sessions connected to the database, number of committed and rolled back transactions, number of read disk blocks, number of disk blocks found in the cache, number of rows returned, captured, inserted, updated, and deleted through database query, number of conflicts and deadlocks, usage of temporary files, and I/O read/write time. | -| Load Profile | Evaluates the current system load from the time, I/O, transaction, and SQL dimensions.
The statistics include the job running elapse time, CPU time, daily transaction quality, logical and physical read volume, read and write I/O times and size, login and logout times, SQL, transaction execution volume, and SQL P80 and P95 response time. | -| Instance Efficiency Percentages | Evaluates the cache efficiency of the current system.
The statistics include the database cache hit ratio. | -| Events | Evaluates the performance of key system kernel resources and key events.
The statistics include the number of times that the key events of the database kernel occur and the waiting time. | -| Wait Classes | Evaluates the performance of key events in the system.
The statistics include the release of the data kernel in the main types of wait events, such as **STATUS**, **LWLOCK_EVENT**, **LOCK_EVENT**, and **IO_EVENT**. | -| CPU | Includes time release of the CPU in user mode, kernel mode, I/O wait mode, or idle mode. | -| IO Profile | Includes the number of database I/O times, database I/O data volume, number of redo I/O times, and redo I/O volume. | -| Memory Statistics | Includes maximum process memory, used process memory, maximum shared memory, and used shared memory. | - -**Table 2** Detailed diagnosis report - -| Diagnosis Type | Description | -| :--------------------- | :----------------------------------------------------------- | -| Time Model | Evaluates the performance of the current system in the time dimension.
The statistics include time consumed by the system in each phase, including the kernel time, CPU time, execution time, parsing time, compilation time, query rewriting time, plan generation time, network time, and I/O time. | -| SQL Statistics | Diagnoses SQL statement performance problems.
The statistics include normalized SQL performance indicators in multiple dimensions: elapsed time, CPU time, rows returned, tuple reads, executions, physical reads, and logical reads. The indicators can be classified into execution time, number of execution times, row activity, and cache I/O. | -| Wait Events | Diagnoses performance of key system resources and key time in detail.
The statistics include the performance of all key events in a period of time, including the number of events and the time consumed. | -| Cache IO Stats | Diagnoses the performance of user tables and indexes.
The statistics include read and write operations on all user tables and indexes, and the cache hit ratio. | -| Utility status | Diagnoses the background task performance.
The statistics include the performance of background tasks such as replication. | -| Object stats | Diagnoses the performance of database objects.
The statistics include user tables, tables on indexes, index scan activities, as well as insert, update, and delete activities, number of valid rows, and table maintenance status. | -| Configuration settings | Determines whether the configuration is changed.
It is a snapshot that contains all current configuration parameters. | -| SQL detail | Displays information about unique query text. | - -## Enhancements - -None. - -## Constraints - -- The WDR snapshot collects performance data of different databases. If there are a large number of databases or tables in the database instance, it takes a long time to create a WDR snapshot. -- If WDR snapshot is performed when a large number of DDL statements are executed, WDR snapshot may fail. -- When the database is dropped, WDR snapshot may fail. - -## Dependencies - -None. - -## Related Pages - +--- +title: WDR +summary: WDR +author: Guo Huan +date: 2022-05-07 +--- + +# WDR + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +The workload diagnosis report (WDR) provides database performance diagnosis reports based on the baseline performance and incremental data that reflects performance changes. + +## Benefits + +- The WDR is the main method for diagnosing long-term performance problems. Based on the performance baseline of a snapshot, performance analysis is performed from multiple dimensions, helping DBAs understand the system load, performance of each component, and performance bottlenecks. +- Snapshots are also an important data source for self-diagnosis and self-optimization suggestions on subsequent performance problems. + +## Description + +The WDR generates a performance report between two different time points based on the system performance snapshot data at these time points. The report is used to diagnose database kernel performance faults. + +You can use generate_wdr_report(…) to generate a performance report based on two performance snapshots. + +The WDR depends on the following two components: + +- Snapshot: The performance snapshot can be configured to collect a certain amount of performance data from the kernel at a specified interval and store the data in the user tablespace. Any snapshot can be used as a performance baseline for comparison with other snapshots. +- WDR Reporter: This tool analyzes the overall system performance based on two snapshots, calculates the changes of more specific performance indicators between the two time points, and generates summarized and detailed performance data. For details, see Table 1 and Table 2. + +**Table 1** Summarized diagnosis report + +| Diagnosis Type | Description | +| :------------------------------ | :----------------------------------------------------------- | +| Database Stat | Evaluates the load and I/O status of the current database. Load and I/O are the most important indicators of the TP system.
The statistics include the number of sessions connected to the database, number of committed and rolled back transactions, number of read disk blocks, number of disk blocks found in the cache, number of rows returned, captured, inserted, updated, and deleted through database query, number of conflicts and deadlocks, usage of temporary files, and I/O read/write time. | +| Load Profile | Evaluates the current system load from the time, I/O, transaction, and SQL dimensions.
The statistics include the job running elapse time, CPU time, daily transaction quality, logical and physical read volume, read and write I/O times and size, login and logout times, SQL, transaction execution volume, and SQL P80 and P95 response time. | +| Instance Efficiency Percentages | Evaluates the cache efficiency of the current system.
The statistics include the database cache hit ratio. | +| Events | Evaluates the performance of key system kernel resources and key events.
The statistics include the number of times that the key events of the database kernel occur and the waiting time. | +| Wait Classes | Evaluates the performance of key events in the system.
The statistics include the release of the data kernel in the main types of wait events, such as **STATUS**, **LWLOCK_EVENT**, **LOCK_EVENT**, and **IO_EVENT**. | +| CPU | Includes time release of the CPU in user mode, kernel mode, I/O wait mode, or idle mode. | +| IO Profile | Includes the number of database I/O times, database I/O data volume, number of redo I/O times, and redo I/O volume. | +| Memory Statistics | Includes maximum process memory, used process memory, maximum shared memory, and used shared memory. | + +**Table 2** Detailed diagnosis report + +| Diagnosis Type | Description | +| :--------------------- | :----------------------------------------------------------- | +| Time Model | Evaluates the performance of the current system in the time dimension.
The statistics include time consumed by the system in each phase, including the kernel time, CPU time, execution time, parsing time, compilation time, query rewriting time, plan generation time, network time, and I/O time. | +| SQL Statistics | Diagnoses SQL statement performance problems.
The statistics include normalized SQL performance indicators in multiple dimensions: elapsed time, CPU time, rows returned, tuple reads, executions, physical reads, and logical reads. The indicators can be classified into execution time, number of execution times, row activity, and cache I/O. | +| Wait Events | Diagnoses performance of key system resources and key time in detail.
The statistics include the performance of all key events in a period of time, including the number of events and the time consumed. | +| Cache IO Stats | Diagnoses the performance of user tables and indexes.
The statistics include read and write operations on all user tables and indexes, and the cache hit ratio. | +| Utility status | Diagnoses the background task performance.
The statistics include the performance of background tasks such as replication. | +| Object stats | Diagnoses the performance of database objects.
The statistics include user tables, tables on indexes, index scan activities, as well as insert, update, and delete activities, number of valid rows, and table maintenance status. | +| Configuration settings | Determines whether the configuration is changed.
It is a snapshot that contains all current configuration parameters. | +| SQL detail | Displays information about unique query text. | + +## Enhancements + +None. + +## Constraints + +- The WDR snapshot collects performance data of different databases. If there are a large number of databases or tables in the database instance, it takes a long time to create a WDR snapshot. +- If WDR snapshot is performed when a large number of DDL statements are executed, WDR snapshot may fail. +- When the database is dropped, WDR snapshot may fail. + +## Dependencies + +None. + +## Related Pages + [WDR Snapshot](../../performance-tuning/wdr/wdr.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/5-system-kpi-aided-diagnosis.md b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/5-system-kpi-aided-diagnosis.md index 66a14a8f..74f5caf3 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/5-system-kpi-aided-diagnosis.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/5-system-kpi-aided-diagnosis.md @@ -1,70 +1,70 @@ ---- -title: System KPI-aided Diagnosis -summary: System KPI-aided Diagnosis -author: Guo Huan -date: 2022-05-07 ---- - -# System KPI-aided Diagnosis - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -KPIs are views of key performance indicators for kernel components or the entire system. Based on KPIs, users can learn about the real-time and historical running status of the system. - -## Benefits - -- Summarized system load diagnosis - - Precise alarms for system load exceptions (overload, stall, and SLA exceptions) and precise system load profile - -- Summarized system time model diagnosis - - Instance-level and query-level time model segmentation, diagnosing the root causes of instance and query performance problems - -- Query performance diagnosis - - Database-level query summary, including top SQL, SQL CPU usage, I/O consumption, execution plan, and excessive hard parsing - -- Diagnosis of disk I/O, index, and buffer performance problems - -- Diagnosis of connection and thread pool problems - -- Diagnosis of checkpoint and redo (RTO) performance problems - -- Diagnosis of system I/O, LWLock, and wait performance problems - - Diagnosis of over 60 modules and over 240 key operation performance problems - -- Function-level performance monitoring and diagnosis (by GSTRACE) - - Tracing of over 50 functions at the storage and execution layers - -## Description - -MogDB provides KPIs of 11 categories and 26 sub-categories, covering instances, files, objects, workload, communication, sessions, threads, cache I/O, locks, wait events, and clusters. - -Figure 1 shows the distribution of kernel KPIs. - -**Figure 1** Distribution of kernel KPIs -![distribution-of-kernel-kpis](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/system-kpi-aided-diagnosis-2.png) - -## Enhancements - -None. - -## Constraints - -- Utility statements do not support normalization. Non-DML statements, such as CREATE, DROP, COPY, and VACUUM, are not supported. -- Currently, only the top-level normalized SQL statements are recorded. SQL statements in a stored procedure are not normalized, and only the SQL statements that call the stored procedure are recorded. - -## Dependencies - -None. - -## Related Pages - +--- +title: System KPI-aided Diagnosis +summary: System KPI-aided Diagnosis +author: Guo Huan +date: 2022-05-07 +--- + +# System KPI-aided Diagnosis + +## Availability + +This feature is available since MogDB 1.1.0. + +## Introduction + +KPIs are views of key performance indicators for kernel components or the entire system. Based on KPIs, users can learn about the real-time and historical running status of the system. + +## Benefits + +- Summarized system load diagnosis + + Precise alarms for system load exceptions (overload, stall, and SLA exceptions) and precise system load profile + +- Summarized system time model diagnosis + + Instance-level and query-level time model segmentation, diagnosing the root causes of instance and query performance problems + +- Query performance diagnosis + + Database-level query summary, including top SQL, SQL CPU usage, I/O consumption, execution plan, and excessive hard parsing + +- Diagnosis of disk I/O, index, and buffer performance problems + +- Diagnosis of connection and thread pool problems + +- Diagnosis of checkpoint and redo (RTO) performance problems + +- Diagnosis of system I/O, LWLock, and wait performance problems + + Diagnosis of over 60 modules and over 240 key operation performance problems + +- Function-level performance monitoring and diagnosis (by GSTRACE) + + Tracing of over 50 functions at the storage and execution layers + +## Description + +MogDB provides KPIs of 11 categories and 26 sub-categories, covering instances, files, objects, workload, communication, sessions, threads, cache I/O, locks, wait events, and clusters. + +Figure 1 shows the distribution of kernel KPIs. + +**Figure 1** Distribution of kernel KPIs +![distribution-of-kernel-kpis](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/system-kpi-aided-diagnosis-2.png) + +## Enhancements + +None. + +## Constraints + +- Utility statements do not support normalization. Non-DML statements, such as CREATE, DROP, COPY, and VACUUM, are not supported. +- Currently, only the top-level normalized SQL statements are recorded. SQL statements in a stored procedure are not normalized, and only the SQL statements that call the stored procedure are recorded. + +## Dependencies + +None. + +## Related Pages + [DBE_PERF Schema](../../reference-guide/schema/DBE_PERF/DBE_PERF.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/autonomous-transaction-management.md b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/autonomous-transaction-management.md new file mode 100644 index 00000000..3707d1da --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/autonomous-transaction-management.md @@ -0,0 +1,72 @@ +--- +title: Autonomous Transaction Management View and Termination +summary: Autonomous Transaction Management View and Termination +author: 郭欢 赵金 +date: 2024-03-20 +--- + +# Autonomous Transaction Management View and Termination + +## Availability + +This feature is available since MogDB 5.0.6. + +## Introduction + +This feature supports viewing the number of active autonomous transactions and related details, as well as terminating autonomous transactions without stopping the database. + +## Benefits + +Enhances the observability of autonomous transactions, facilitating the observation of system autonomous transaction session details and their relationship with main transaction sessions, making it easier for users to control the number of autonomous transactions running in the system. + +## Description + +In the current design of MogDB, a main transaction initiates the creation of a session connection process after receiving an autonomous transaction command for the first time. This is an autonomous transaction session independent of the main transaction. Even if the main transaction executes multiple autonomous transaction commands later, it reuses the same session connection. When the main transaction commits or rolls back, the autonomous transaction session connection is closed (destroyed). + +This feature supports viewing the number of autonomous transaction session connections and specific details through the `pg_running_xacts` view. If you want to query the specific SQL execution content of autonomous transactions, you can construct a new view based on this view and other existing views (such as `pg_stat_activity`). + +When users query an excessive number of autonomous transaction connections through the `pg_running_xacts` view, they can use the `SELECT pg_terminate_backend(pid)` command to forcibly terminate active autonomous transactions (the pid of the autonomous transaction is obtained according to the `pg_running_xacts` view). At this point, the number of autonomous transaction session connections in the system will decrease. + +## View Description + +The `pg_running_xacts` view adds the following three columns: + +| Name | Type | Description | +| --------------------- | ------ | ----------------------------------------------- | +| sessionid | bigint | Session ID | +| parent_sessionid | bigint | Parent session ID of the autonomous transaction | +| is_autonomous_session | bool | Whether it is an autonomous transaction session | + +## Example + +```sql +-- Create table +Create table test_auto_dataa (a int); + +-- Create autonomous transaction stored procedure +create or replace procedure taest_auto_pp() +as +PRAGMA AUTONOMOUS_TRANSACTION; +begin +insert into test_auto_dataa select generate_series(1,700000); +commit; +end; +/ + +Begin -- Main transaction begins +taest_auto_pp(); -- Execute autonomous transaction +insert into test_auto_dataa select generate_series(1,7000000); +end +/ + +-- During the execution of the autonomous transaction (the autonomous transaction can be confirmed to be running using the pg_stat_activity view), query the pg_running_xacts view to see the session connections of the autonomous transaction. The sessionid corresponding to is_autonomous_session is 't', and parent_sessionid is not 0. Before the main transaction ends, the session connection of the autonomous transaction still exists. Note: In thread pool mode, after the autonomous transaction ends, its sessionid and pid are reset to zero. After the main transaction is completed (committed or rolled back), the session connection of the autonomous transaction disappears. +select * from pg_running_xacts; +select * from pg_get_running_xacts(); + +-- During the execution of the autonomous transaction, query the thread ID of the autonomous transaction from the above view, record the session ID of the main session, and then use pg_terminate_backend(pid) to terminate the autonomous transaction. +select pg_terminate_backend(pid); +``` + +## Related Pages + +[pg_running_xacts](../../reference-guide/system-catalogs-and-system-views/system-views/PG_RUNNING_XACTS.md), [pg_get_running_xacts()](../../reference-guide/functions-and-operators/system-management-functions/other-functions.md), [PG_STAT_ACTIVITY](../../reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_ACTIVITY.md), [pg_terminate_backend(pid int)](../../reference-guide/functions-and-operators/system-management-functions/server-signal-functions.md), [Autonomous Transaction](../../developer-guide/autonomous-transaction/1-introduction-to-autonomous-transaction.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/corrupt-files-handling.md b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/corrupt-files-handling.md new file mode 100644 index 00000000..4e2c366b --- /dev/null +++ b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/corrupt-files-handling.md @@ -0,0 +1,64 @@ +--- +title: Corrupt Files Handling +summary: Corrupt Files Handling +author: 郭欢 王云龙 +date: 2024-03-25 +--- + +# Corrupt Files Handling + +## Availability + +This feature is available since MogDB 5.0.6. + +## Introduction + +This feature provides handling capabilities for abnormal behaviors caused by database file corruption or loss. + +## Benefits + +Ensures MogDB's ability to continue providing service in extreme scenarios. + +## Description + +Operating system or human factors may lead to database file corruption or loss, which can cause the following abnormal behaviors: + +- Inability to complete the checkpoint due to missing files when stopping the database, preventing the database from being stopped; +- The checkpoint command hangs or encounters a core issue; +- Hangs or core issues when creating a database or tablespace; +- System functions such as `pg_switch_xlog`, `create_physical_replication_slot_for_archive`, etc., hang or encounter a core issue; +- Page checksum corruption, and the database stops when a corrupted page is used during the restart redo or when the standby database applies the corrupted page, causing a failure to start. + +This feature introduces two new GUC parameters to provide handling capabilities for the above abnormal scenarios. + +The `data_sync_failed_ignore` parameter is used to control whether to discard items pending sync when the pagewriter's fsync fails. Setting it to 'off' allows normal stopping of the database. + +The `damage_page_ignore` controls whether to ignore corrupted pages during redo, setting it to 'on' can force the database to start (requires setting [force_promote](../../reference-guide/guc-parameters/write-ahead-log/settings.md#force_promote) = on). + +## Parameter Description + +**data_sync_failed_ignore** + +Parameter Description: Controls whether to discard items pending sync when the pagewriter's fsync fails. This parameter is of the SIGHUP type. + +Value Range: Boolean + +- 'on' indicates that fsync failure will cause the database to stop. +- 'off' indicates that fsync failure will be retried and will not cause a core, allowing normal stopping of the database. + +Default Value: 'off' + +**damage_page_ignore** + +Parameter Description: Controls whether to ignore corrupted pages during redo to force the database to start. This parameter is of the SIGHUP type. + +Value Range: Boolean + +- 'on' indicates that corrupted pages will be ignored, and the database will be forced to start. +- 'off' indicates that corrupted pages will not be ignored, and the database will fail to start. + +Default Value: 'off' + +## Related Pages + +[data_sync_failed_ignore](../../reference-guide/guc-parameters/fault-tolerance.md#data_sync_failed_ignore), [damage_page_ignore](../../reference-guide/guc-parameters/fault-tolerance.md#damage_page_ignore) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/fault-diagnosis.md b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/fault-diagnosis.md index 6a150f06..c957bbba 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/fault-diagnosis.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/fault-diagnosis.md @@ -1,34 +1,34 @@ ---- -title: Fault Diagnosis -summary: Fault Diagnosis -author: Zhang Cuiping -date: 2022-06-17 ---- - -# Fault Diagnosis - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -In order to quickly locate faults, collect system fault information, export fault data and then rectify faults, MogDB 3.0 has enhanced OM functions and gstrace diagnostic capabilities. - -## Benefits - -The enhanced fault diagnosis capability can facilitate R&D personnel to rectify faults in time and ensure the normal operation of the system. - -## Description - -The gs_check tool can compare the difference of scenario check results and output a difference analysis report to help users locate the problem quickly. - -The gs_watch tool can monitor MogDB processes and automatically call gs_collector to collect the system status when a process crash is found for later analysis. - -The gs_gucquery tool can automatically collect, organize, and export GUC values, and compare the changes of GUC values at different moment. - -gstrace diagnostic capability is enhanced. It supports opening the trace item of one or more component (module) and function by module name and function name. It enhances the number of gstrace points in the code and the ability to express gstrace output information. It supports export of new key data structures PGPROC and user session data. It realizes fault injection, including system call error report simulation and variable content saving write. - -## Related Pages - -[gs_check](../../reference-guide/tool-reference/server-tools/gs_check.md), [gstrace](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gstrace.md), [gs_watch](../../reference-guide/tool-reference/server-tools/gs_watch.md) +--- +title: Fault Diagnosis +summary: Fault Diagnosis +author: Zhang Cuiping +date: 2022-06-17 +--- + +# Fault Diagnosis + +## Availability + +This feature is available since MogDB 3.0.0. + +## Introduction + +In order to quickly locate faults, collect system fault information, export fault data and then rectify faults, MogDB 3.0 has enhanced OM functions and gstrace diagnostic capabilities. + +## Benefits + +The enhanced fault diagnosis capability can facilitate R&D personnel to rectify faults in time and ensure the normal operation of the system. + +## Description + +The gs_check tool can compare the difference of scenario check results and output a difference analysis report to help users locate the problem quickly. + +The gs_watch tool can monitor MogDB processes and automatically call gs_collector to collect the system status when a process crash is found for later analysis. + +The gs_gucquery tool can automatically collect, organize, and export GUC values, and compare the changes of GUC values at different moment. + +gstrace diagnostic capability is enhanced. It supports opening the trace item of one or more component (module) and function by module name and function name. It enhances the number of gstrace points in the code and the ability to express gstrace output information. It supports export of new key data structures PGPROC and user session data. It realizes fault injection, including system call error report simulation and variable content saving write. + +## Related Pages + +[gs_check](../../reference-guide/tool-reference/server-tools/gs_check.md), [gstrace](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gstrace.md), [gs_watch](../../reference-guide/tool-reference/server-tools/gs_watch.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/maintainability.md b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/maintainability.md index 792d95a5..472a5d0e 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/maintainability.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/maintainability.md @@ -17,4 +17,6 @@ date: 2023-05-22 + **[SQL PATCH](sql-patch.md)** + **[DCF Module Tracing](dcf-module-tracing.md)** + **[Error When Writing Illegal Characters](error-when-writing-illegal-characters.md)** -+ **[Support For Pageinspect & Pagehack](pageinspect-pagehack.md)** \ No newline at end of file ++ **[Support For Pageinspect & Pagehack](pageinspect-pagehack.md)** ++ **[Autonomous Transaction Management View and Termination](autonomous-transaction-management.md)** ++ **[Corrupt Files Handling](corrupt-files-handling.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/sql-patch.md b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/sql-patch.md index 77bc6685..8610c6d1 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/sql-patch.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/maintainability/sql-patch.md @@ -1,133 +1,133 @@ ---- -title: SQL PATCH -summary: SQL PATCH -author: Guo Huan -date: 2023-04-04 ---- - -# SQL PATCH - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -SQL PATCH adjusts the query execution plan without directly modifying users' service statements. If the execution plan or execution mode of a query statement does not meet the expectation, you can create a query patch and use hints to optimize the query plan or handle errors for specific statements by short-circuiting. - -## Benefits - -If performance problems occur due to poor query plans or services are unavailable due to internal system errors, you can invoke O&M functions in the database to optimize specific scenarios or report errors in advance to avoid more serious problems and greatly reduce O&M costs. - -## Description - -SQL PATCH is designed for database administrators (DBAs), O&M personnel, and other roles who need to optimize SQL statements. If performance problems caused by poor plans of service statements are identified through other O&M views or fault locating methods, you can create an SQL patch to optimize service statements based on hints. Currently, the following hints are supported: number of rows, scanning mode, join mode, join sequence, PBE custom/generic plan selection, statement-level parameter setting, and parameterized path. In addition, in case that services are unavailable due to internal system errors that are triggered by specific statements, you can create SQL patches to rectify single-point failures without changing service statements. In this way, errors can be reported in advance to avoid greater loss. - -SQL PATCH is implemented based on the unique SQL ID. Therefore, to use SQL PATCH, related O&M parameters (for details, see [Feature Constraints](#Constraints)) must be enabled for the SQL patch to take effect. The unique SQL ID can be obtained from both the WDR and slow SQL view. You must specify the unique SQL ID when creating an SQL patch. The following provides a simple example. - -Scenario 1: Use SQL PATCH to optimize specific statements based on hints. - -```sql -MogDB=# set track_stmt_stat_level = 'L1,L1'; -- Enable full SQL statistics. -SET -MogDB=# select * from hint_t1 t1 where t1.a = 1; -- Execute the SQL statement. - a | b | c ----+---+--- - 1 | 1 | 1 -(1 row) -MogDB=# select unique_query_id, query, query_plan from dbe_perf.statement_history where query like '%hint_t1%'; -- Obtain the query plan and unique SQL ID. --[ RECORD 1 ]---+---------------------------------------------------------------------------------------------- -unique_query_id | 2578396627 -query | select * from hint_t1 t1 where t1.a = ?; -query_plan | Datanode Name: sgnode - | Bitmap Heap Scan on hint_t1 t1 (cost=4.33..15.70 rows=10 p-time=0 p-rows=0 width=12) - | Recheck Cond: (a = '***') - | -> Bitmap Index Scan on hint_t1_a_idx (cost=0.00..4.33 rows=10 p-time=0 p-rows=0 width=0) - | Index Cond: (a = '***') - | - | -MogDB=# select * from dbe_sql_util.create_hint_sql_patch('patch1', 2578396627, 'indexscan(t1)'); -- Specify a hint patch for the specified unique SQL ID. --[ RECORD 1 ]---------+-- -create_hint_sql_patch | t -MogDB=# explain select * from hint_t1 t1 where t1.a = 1; -- Check whether the hint takes effect. -NOTICE: Plan influenced by SQL hint patch - QUERY PLAN ------------------------------------------------------------------------------------ - [Bypass] - Index Scan using hint_t1_a_idx on hint_t1 t1 (cost=0.00..32.43 rows=10 width=12) - Index Cond: (a = 1) -(3 rows) -MogDB=# select * from hint_t1 t1 where t1.a = 1; -- Execute the statement again. - a | b | c ----+---+--- - 1 | 1 | 1 -(1 row) -MogDB=# select unique_query_id, query, query_plan from dbe_perf.statement_history where query like '%hint_t1%'; -- The query plan has been changed. --[ RECORD 1 ]---+-------------------------------------------------------------------------------------------------- -unique_query_id | 2578396627 -query | select * from hint_t1 t1 where t1.a = ?; -query_plan | Datanode Name: sgnode - | Bitmap Heap Scan on hint_t1 t1 (cost=4.33..15.70 rows=10 p-time=0 p-rows=0 width=12) - | Recheck Cond: (a = '***') - | -> Bitmap Index Scan on hint_t1_a_idx (cost=0.00..4.33 rows=10 p-time=0 p-rows=0 width=0) - | Index Cond: (a = '***') - | - | --[ RECORD 2 ]---+-------------------------------------------------------------------------------------------------- -unique_query_id | 2578396627 -query | select * from hint_t1 t1 where t1.a = ?; -query_plan | Datanode Name: sgnode - | Index Scan using hint_t1_a_idx on hint_t1 t1 (cost=0.00..8.27 rows=1 p-time=0 p-rows=0 width=12) - | Index Cond: (a = '***') - | - | -``` - -Scenario 2: Run the SQL PATCH command to report an error for a specific statement in advance. - -```sql --- Delete patch 1. -MogDB=# select * from dbe_sql_util.drop_sql_patch('patch1'); - drop_sql_patch ----------------- - t -(1 row) - --- Create an abort patch for the statement of the unique SQL ID. -MogDB=# select * from dbe_sql_util.create_abort_sql_patch('patch2', 2578396627); - create_abort_sql_patch - create_abort_sql_patch ------------------------- - t -(1 row) - --- An error is reported in advance when the statement is executed again. -MogDB=# select * from hint_t1 t1 where t1.a = 1; -ERROR: Statement 2578396627 canceled by abort patch patch2 -``` - -## Enhancements - -None - -## Constraints - -1. Patches can be created only by unique SQL ID. If unique SQL IDs conflict, SQL patches that are used for hint-based optimization may affect performance but do not affect semantic correctness. -2. Only hints that do not change SQL semantics can be used as patches. SQL rewriting is not supported. -3. This tool is not applicable to logical backup and restoration. -4. The patch validity cannot be verified during patch creation. If the patch hint has syntax or semantic errors, the query execution is not affected. -5. Only the initial user, O&M administrator, monitoring administrator, and system administrator have the permission to perform this operation. -6. Patches are not shared between databases. When creating SQL patches, you need to connect to the target database. -7. In the centralized deployment scenario where the standby node is readable, you must specify the primary node to run the SQL PATCH command to create, modify, or delete functions and the standby node to report errors. -8. There is a delay in synchronizing an SQL patch to the standby node. The patch takes effect after the standby node replays related logs. -9. This function does not take effect for SQL statements in stored procedures because no unique SQL ID is generated for statements in stored procedures. -10. It is not recommended that the abort patch be used in the database for a long time. It should be used only as a workaround. If the database service is unavailable due to a kernel fault triggered by a specific statement, you must rectify the service fault or upgrade the kernel as soon as possible. After the upgrade, the method of generating unique SQL IDs may change. Therefore, the workaround may become invalid. -11. Currently, except DML statements, unique SQL IDs of SQL statements (such as CREATE TABLE) are generated by hashing the statement text. Therefore, SQL PATCH is sensitive to uppercase and lowercase letters, spaces, and line breaks. That is, even statements of different texts are semantically relative, you still need to create different SQL patches for them. For DML operations, SQL PATCH can take effect for the same statement with different input parameters, regardless of uppercase letters, lowercase letters, and spaces. - -## Dependencies - -This feature depends on the real-time resource monitoring function. To use this feature, set the **enable_resource_track** parameter to **on** and set **instr_unique_sql_count** to a value greater than 0. For different statements, if the generated unique SQL IDs conflict, the SQL PATCH statement incorrectly hits other statements that are not expected. Compared with hint patches that are used for optimization, abort patches have more side effects and should be used with caution. - -## Related Pages - -[Load Management](../../reference-guide/guc-parameters/load-management.md), [Query](../../reference-guide/guc-parameters/query.md), [Hint Based Tuning](../../performance-tuning/sql-tuning/hint-based-tuning.md) +--- +title: SQL PATCH +summary: SQL PATCH +author: Guo Huan +date: 2023-04-04 +--- + +# SQL PATCH + +## Availability + +This feature is available since MogDB 5.0.0. + +## Introduction + +SQL PATCH adjusts the query execution plan without directly modifying users' service statements. If the execution plan or execution mode of a query statement does not meet the expectation, you can create a query patch and use hints to optimize the query plan or handle errors for specific statements by short-circuiting. + +## Benefits + +If performance problems occur due to poor query plans or services are unavailable due to internal system errors, you can invoke O&M functions in the database to optimize specific scenarios or report errors in advance to avoid more serious problems and greatly reduce O&M costs. + +## Description + +SQL PATCH is designed for database administrators (DBAs), O&M personnel, and other roles who need to optimize SQL statements. If performance problems caused by poor plans of service statements are identified through other O&M views or fault locating methods, you can create an SQL patch to optimize service statements based on hints. Currently, the following hints are supported: number of rows, scanning mode, join mode, join sequence, PBE custom/generic plan selection, statement-level parameter setting, and parameterized path. In addition, in case that services are unavailable due to internal system errors that are triggered by specific statements, you can create SQL patches to rectify single-point failures without changing service statements. In this way, errors can be reported in advance to avoid greater loss. + +SQL PATCH is implemented based on the unique SQL ID. Therefore, to use SQL PATCH, related O&M parameters (for details, see [Feature Constraints](#Constraints)) must be enabled for the SQL patch to take effect. The unique SQL ID can be obtained from both the WDR and slow SQL view. You must specify the unique SQL ID when creating an SQL patch. The following provides a simple example. + +Scenario 1: Use SQL PATCH to optimize specific statements based on hints. + +```sql +MogDB=# set track_stmt_stat_level = 'L1,L1'; -- Enable full SQL statistics. +SET +MogDB=# select * from hint_t1 t1 where t1.a = 1; -- Execute the SQL statement. + a | b | c +---+---+--- + 1 | 1 | 1 +(1 row) +MogDB=# select unique_query_id, query, query_plan from dbe_perf.statement_history where query like '%hint_t1%'; -- Obtain the query plan and unique SQL ID. +-[ RECORD 1 ]---+---------------------------------------------------------------------------------------------- +unique_query_id | 2578396627 +query | select * from hint_t1 t1 where t1.a = ?; +query_plan | Datanode Name: sgnode + | Bitmap Heap Scan on hint_t1 t1 (cost=4.33..15.70 rows=10 p-time=0 p-rows=0 width=12) + | Recheck Cond: (a = '***') + | -> Bitmap Index Scan on hint_t1_a_idx (cost=0.00..4.33 rows=10 p-time=0 p-rows=0 width=0) + | Index Cond: (a = '***') + | + | +MogDB=# select * from dbe_sql_util.create_hint_sql_patch('patch1', 2578396627, 'indexscan(t1)'); -- Specify a hint patch for the specified unique SQL ID. +-[ RECORD 1 ]---------+-- +create_hint_sql_patch | t +MogDB=# explain select * from hint_t1 t1 where t1.a = 1; -- Check whether the hint takes effect. +NOTICE: Plan influenced by SQL hint patch + QUERY PLAN +----------------------------------------------------------------------------------- + [Bypass] + Index Scan using hint_t1_a_idx on hint_t1 t1 (cost=0.00..32.43 rows=10 width=12) + Index Cond: (a = 1) +(3 rows) +MogDB=# select * from hint_t1 t1 where t1.a = 1; -- Execute the statement again. + a | b | c +---+---+--- + 1 | 1 | 1 +(1 row) +MogDB=# select unique_query_id, query, query_plan from dbe_perf.statement_history where query like '%hint_t1%'; -- The query plan has been changed. +-[ RECORD 1 ]---+-------------------------------------------------------------------------------------------------- +unique_query_id | 2578396627 +query | select * from hint_t1 t1 where t1.a = ?; +query_plan | Datanode Name: sgnode + | Bitmap Heap Scan on hint_t1 t1 (cost=4.33..15.70 rows=10 p-time=0 p-rows=0 width=12) + | Recheck Cond: (a = '***') + | -> Bitmap Index Scan on hint_t1_a_idx (cost=0.00..4.33 rows=10 p-time=0 p-rows=0 width=0) + | Index Cond: (a = '***') + | + | +-[ RECORD 2 ]---+-------------------------------------------------------------------------------------------------- +unique_query_id | 2578396627 +query | select * from hint_t1 t1 where t1.a = ?; +query_plan | Datanode Name: sgnode + | Index Scan using hint_t1_a_idx on hint_t1 t1 (cost=0.00..8.27 rows=1 p-time=0 p-rows=0 width=12) + | Index Cond: (a = '***') + | + | +``` + +Scenario 2: Run the SQL PATCH command to report an error for a specific statement in advance. + +```sql +-- Delete patch 1. +MogDB=# select * from dbe_sql_util.drop_sql_patch('patch1'); + drop_sql_patch +---------------- + t +(1 row) + +-- Create an abort patch for the statement of the unique SQL ID. +MogDB=# select * from dbe_sql_util.create_abort_sql_patch('patch2', 2578396627); + create_abort_sql_patch + create_abort_sql_patch +------------------------ + t +(1 row) + +-- An error is reported in advance when the statement is executed again. +MogDB=# select * from hint_t1 t1 where t1.a = 1; +ERROR: Statement 2578396627 canceled by abort patch patch2 +``` + +## Enhancements + +None + +## Constraints + +1. Patches can be created only by unique SQL ID. If unique SQL IDs conflict, SQL patches that are used for hint-based optimization may affect performance but do not affect semantic correctness. +2. Only hints that do not change SQL semantics can be used as patches. SQL rewriting is not supported. +3. This tool is not applicable to logical backup and restoration. +4. The patch validity cannot be verified during patch creation. If the patch hint has syntax or semantic errors, the query execution is not affected. +5. Only the initial user, O&M administrator, monitoring administrator, and system administrator have the permission to perform this operation. +6. Patches are not shared between databases. When creating SQL patches, you need to connect to the target database. +7. In the centralized deployment scenario where the standby node is readable, you must specify the primary node to run the SQL PATCH command to create, modify, or delete functions and the standby node to report errors. +8. There is a delay in synchronizing an SQL patch to the standby node. The patch takes effect after the standby node replays related logs. +9. This function does not take effect for SQL statements in stored procedures because no unique SQL ID is generated for statements in stored procedures. +10. It is not recommended that the abort patch be used in the database for a long time. It should be used only as a workaround. If the database service is unavailable due to a kernel fault triggered by a specific statement, you must rectify the service fault or upgrade the kernel as soon as possible. After the upgrade, the method of generating unique SQL IDs may change. Therefore, the workaround may become invalid. +11. Currently, except DML statements, unique SQL IDs of SQL statements (such as CREATE TABLE) are generated by hashing the statement text. Therefore, SQL PATCH is sensitive to uppercase and lowercase letters, spaces, and line breaks. That is, even statements of different texts are semantically relative, you still need to create different SQL patches for them. For DML operations, SQL PATCH can take effect for the same statement with different input parameters, regardless of uppercase letters, lowercase letters, and spaces. + +## Dependencies + +This feature depends on the real-time resource monitoring function. To use this feature, set the **enable_resource_track** parameter to **on** and set **instr_unique_sql_count** to a value greater than 0. For different statements, if the generated unique SQL IDs conflict, the SQL PATCH statement incorrectly hits other statements that are not expected. Compared with hint patches that are used for optimization, abort patches have more side effects and should be used with caution. + +## Related Pages + +[Load Management](../../reference-guide/guc-parameters/load-management.md), [Query](../../reference-guide/guc-parameters/query.md), [Hint Based Tuning](../../performance-tuning/sql-tuning/hint-based-tuning.md) diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/middleware/deploying-a-distributed-database-using-kubernetes.md b/product/en/docs-mogdb/v5.0/characteristic-description/middleware/deploying-a-distributed-database-using-kubernetes.md index 68169c7c..819e1cab 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/middleware/deploying-a-distributed-database-using-kubernetes.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/middleware/deploying-a-distributed-database-using-kubernetes.md @@ -1,36 +1,36 @@ ---- -title: Deploying a Distributed Database Using Kubernetes -summary: Deploying a Distributed Database Using Kubernetes -author: Guo Huan -date: 2022-05-10 ---- - -# Deploying a Distributed Database Using Kubernetes - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Deploys a distributed database in one-click mode. - -## Benefits - -Quickly builds a distributed database, and verifies and uses the distributed capability. - -## Description - -Patroni is used to implement planned switchover and automatic failover in case of faults. HAProxy is used to implement read and write load balancing between the primary and standby MogDB nodes. ShardingSphere is used to implement distributed capabilities. All functions are packaged into images and one-click deployment scripts are provided. - -## Enhancements - -None. - -## Constraints - -Only CentOS and openEuler are supported. - -## Dependencies - +--- +title: Deploying a Distributed Database Using Kubernetes +summary: Deploying a Distributed Database Using Kubernetes +author: Guo Huan +date: 2022-05-10 +--- + +# Deploying a Distributed Database Using Kubernetes + +## Availability + +This feature is available since MogDB 3.0.0. + +## Introduction + +Deploys a distributed database in one-click mode. + +## Benefits + +Quickly builds a distributed database, and verifies and uses the distributed capability. + +## Description + +Patroni is used to implement planned switchover and automatic failover in case of faults. HAProxy is used to implement read and write load balancing between the primary and standby MogDB nodes. ShardingSphere is used to implement distributed capabilities. All functions are packaged into images and one-click deployment scripts are provided. + +## Enhancements + +None. + +## Constraints + +Only CentOS and openEuler are supported. + +## Dependencies + ShardingSphere, Patroni, HAProxy \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/middleware/distributed-database-capability.md b/product/en/docs-mogdb/v5.0/characteristic-description/middleware/distributed-database-capability.md index 1c8fbb08..4693652d 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/middleware/distributed-database-capability.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/middleware/distributed-database-capability.md @@ -1,36 +1,36 @@ ---- -title: Distributed Database Capability -summary: Distributed Database Capability -author: Guo Huan -date: 2022-05-10 ---- - -# Distributed Database Capability - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -This feature uses the distributed middleware shardingsphere to provide MogDB the distributed database capability. When 16 Kunpeng 920 (128 cores) nodes are used for networking (1 x shardingsphere-proxy, 7 x shardingsphere-jdbc, 8 x MogDB), the perfect sharding performance is greater than 10 million transactions per minute C (tpmC). - -## Benefits - -A distributed database that logically has no resource restriction can be built over the middleware. - -## Description - -With the sharding capability of shardingsphere, multiple MogDB databases can logically form a larger database with distributed transactions and elastic scaling capabilities. The usage method is the same as that of an MogDB database. - -## Enhancements - -None. - -## Constraints - -None. - -## Dependencies - +--- +title: Distributed Database Capability +summary: Distributed Database Capability +author: Guo Huan +date: 2022-05-10 +--- + +# Distributed Database Capability + +## Availability + +This feature is available since MogDB 3.0.0. + +## Introduction + +This feature uses the distributed middleware shardingsphere to provide MogDB the distributed database capability. When 16 Kunpeng 920 (128 cores) nodes are used for networking (1 x shardingsphere-proxy, 7 x shardingsphere-jdbc, 8 x MogDB), the perfect sharding performance is greater than 10 million transactions per minute C (tpmC). + +## Benefits + +A distributed database that logically has no resource restriction can be built over the middleware. + +## Description + +With the sharding capability of shardingsphere, multiple MogDB databases can logically form a larger database with distributed transactions and elastic scaling capabilities. The usage method is the same as that of an MogDB database. + +## Enhancements + +None. + +## Constraints + +None. + +## Dependencies + Shardingsphere middleware \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/middleware/middleware.md b/product/en/docs-mogdb/v5.0/characteristic-description/middleware/middleware.md index b4858295..1a8f29ab 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/middleware/middleware.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/middleware/middleware.md @@ -1,12 +1,12 @@ ---- -title: Middleware -summary: Middleware -author: Guo Huan -date: 2023-05-22 ---- - -# Middleware - -+ **[Distributed Database Capability](distributed-database-capability.md)** -+ **[Deploying a Distributed Database Using Kubernetes](deploying-a-distributed-database-using-kubernetes.md)** +--- +title: Middleware +summary: Middleware +author: Guo Huan +date: 2023-05-22 +--- + +# Middleware + ++ **[Distributed Database Capability](distributed-database-capability.md)** ++ **[Deploying a Distributed Database Using Kubernetes](deploying-a-distributed-database-using-kubernetes.md)** + **[Distributed Analysis Capabilities](distributed-analysis-capabilities.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/characteristic-description/workload-management/workload-management.md b/product/en/docs-mogdb/v5.0/characteristic-description/workload-management/workload-management.md index d240207d..ce00624f 100644 --- a/product/en/docs-mogdb/v5.0/characteristic-description/workload-management/workload-management.md +++ b/product/en/docs-mogdb/v5.0/characteristic-description/workload-management/workload-management.md @@ -1,10 +1,10 @@ ---- -title: Workload Management -summary: Workload Management -author: Guo Huan -date: 2023-05-22 ---- - -# Workload Management - +--- +title: Workload Management +summary: Workload Management +author: Guo Huan +date: 2023-05-22 +--- + +# Workload Management + - **[High-Latency Escape at the Infrastructure Layer](high-latency-escape-at-the-infrastructure-layer.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/developer-guide/dev/2-development-based-on-jdbc/jdbc-release-notes.md b/product/en/docs-mogdb/v5.0/developer-guide/dev/2-development-based-on-jdbc/jdbc-release-notes.md index a35e97da..1d63344c 100644 --- a/product/en/docs-mogdb/v5.0/developer-guide/dev/2-development-based-on-jdbc/jdbc-release-notes.md +++ b/product/en/docs-mogdb/v5.0/developer-guide/dev/2-development-based-on-jdbc/jdbc-release-notes.md @@ -9,29 +9,6 @@ date: 2023-12-13 MogDB JDBC Changelog mainly contains new features, improvements, BUG fixes and other changes. Please read the details below carefully to understand any changes. -## 5.0.0.9 - -2024-09-26 - -### Add - -- Add RAW type adaptation. Support getBytes/getBlob. SQLType is VARBINARY - -### Fixed - -- RegisterOut procedure/function out parameter CHAR as VARCHAR -- Modify the method of obtaining the data returned by the stored procedure/function cursor under `enable_plsql_return_hold_cursor=on`. The error in other scenarios remains unchanged - - Change the original `FETCH ALL in {cursorName}` to `FETCH FORWARD {fetchSize} in {cursorName}` under `autocommit=true` - - `fetchSize` is configured by prepareCall.setFetchSize and connection string `defaultRowFetchSize`. If neither is configured, the default value is 10 - - `close {cursorName}` statement will be executed under `rs.close()` -- Error when the procedure/function out parameter is BLOB/CLOB/RAW and is NULL -- Modify the BLOB type to return getColumnClassName to `org.postgresql.core.PGBlob` -- Modify the CLOB type return getColumnClassName to `org.postgresql.core.PGClob` -- The procedure/function parameter type is CHAR/VARCHAR/NVARCHAR, and the registered type is a mixed scenario of CHAR/VARCHAR/NVARCHAR -- registerOutParameter registers `TIME_WITH_TIMEZONE` as `TIMETZ` type -- registerOutParameter registers `TIMESTAMP_WITH_TIMEZONE` as `TIMESTAMPTZ` type -- setNull(1,types.Array,"table of") issue - ## 5.0.0.8 2024-07-01 diff --git a/product/en/docs-mogdb/v5.0/developer-guide/dev/3-development-based-on-odbc/1-development-based-on-odbc.md b/product/en/docs-mogdb/v5.0/developer-guide/dev/3-development-based-on-odbc/1-development-based-on-odbc.md index 90236f78..8e9c1c42 100644 --- a/product/en/docs-mogdb/v5.0/developer-guide/dev/3-development-based-on-odbc/1-development-based-on-odbc.md +++ b/product/en/docs-mogdb/v5.0/developer-guide/dev/3-development-based-on-odbc/1-development-based-on-odbc.md @@ -13,6 +13,7 @@ date: 2021-04-26 - **[Example: Common Functions and Batch Binding](5-example-common-functions-and-batch-binding.md)** - **[Typical Application Scenarios and Configurations](5.1-typical-application-scenarios-and-configurations.md)** - **[ODBC Interface Reference](6-ODBC/odbc-interface-reference.md)** +- **[ODBC Release Note](odbc-release-notes.md)** Open Database Connectivity (ODBC) is a Microsoft API for accessing databases based on the X/OPEN CLI. Applications interact with the database through the APIs provided by ODBC, which enhances their portability, scalability, and maintainability. diff --git a/product/en/docs-mogdb/v5.0/developer-guide/dev/3-development-based-on-odbc/3-configuring-a-data-source-in-the-linux-os.md b/product/en/docs-mogdb/v5.0/developer-guide/dev/3-development-based-on-odbc/3-configuring-a-data-source-in-the-linux-os.md index 6038f7ae..69a32f00 100644 --- a/product/en/docs-mogdb/v5.0/developer-guide/dev/3-development-based-on-odbc/3-configuring-a-data-source-in-the-linux-os.md +++ b/product/en/docs-mogdb/v5.0/developer-guide/dev/3-development-based-on-odbc/3-configuring-a-data-source-in-the-linux-os.md @@ -13,7 +13,7 @@ The ODBC driver (psqlodbcw.so) provided by MogDB can be used after it has been c 1. Obtain the source code package of unixODBC by following link: - [https://sourceforge.net/projects/unixodbc/files/unixODBC](https://sourceforge.net/projects/unixodbc/files/unixODBC) + After the download, validate the integrity based on the integrity validation algorithm provided by the community. diff --git a/product/en/docs-mogdb/v5.0/developer-guide/dev/3-development-based-on-odbc/odbc-release-notes.md b/product/en/docs-mogdb/v5.0/developer-guide/dev/3-development-based-on-odbc/odbc-release-notes.md new file mode 100644 index 00000000..ed773632 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/developer-guide/dev/3-development-based-on-odbc/odbc-release-notes.md @@ -0,0 +1,14 @@ +--- +title: ODBC release note +summary: ODBC release note +author: 齐永江 郭欢 +date: 2024-03-29 +--- + +# ODBC release note + +## Version 5.0.0.2 (2024-03-29) + +### Add + +- We now support the AIX-7.2.0.0_power model. diff --git a/product/en/docs-mogdb/v5.0/developer-guide/dev/4-development-based-on-libpq/development-based-on-libpq.md b/product/en/docs-mogdb/v5.0/developer-guide/dev/4-development-based-on-libpq/development-based-on-libpq.md index cd120482..7f4992d8 100644 --- a/product/en/docs-mogdb/v5.0/developer-guide/dev/4-development-based-on-libpq/development-based-on-libpq.md +++ b/product/en/docs-mogdb/v5.0/developer-guide/dev/4-development-based-on-libpq/development-based-on-libpq.md @@ -13,4 +13,5 @@ date: 2023-05-18 + **[Development Process](development-process.md)** + **[Example](libpq-example.md)** + **[Link Parameters](link-parameters.md)** -+ **[libpq API Reference](2-libpq/libpq-api-reference.md)** \ No newline at end of file ++ **[libpq API Reference](2-libpq/libpq-api-reference.md)** ++ **[libpq Release Note](./libpq-release-notes.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/developer-guide/dev/4-development-based-on-libpq/libpq-release-notes.md b/product/en/docs-mogdb/v5.0/developer-guide/dev/4-development-based-on-libpq/libpq-release-notes.md new file mode 100644 index 00000000..ba4a4e57 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/developer-guide/dev/4-development-based-on-libpq/libpq-release-notes.md @@ -0,0 +1,14 @@ +--- +title: libpq release note +summary: libpq release note +author: 齐永江 郭欢 +date: 2024-03-29 +--- + +# libpq release note + +## Version 5.0.0.1 (2024-03-29) + +### Add + +- We now support the AIX-7.2.0.0_power model. diff --git a/product/en/docs-mogdb/v5.0/developer-guide/logical-replication/logical-decoding/logical-decoding-support-for-DDL.md b/product/en/docs-mogdb/v5.0/developer-guide/logical-replication/logical-decoding/logical-decoding-support-for-DDL.md new file mode 100644 index 00000000..fc14af47 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/developer-guide/logical-replication/logical-decoding/logical-decoding-support-for-DDL.md @@ -0,0 +1,186 @@ +--- +title: Logical Decoding Support for DDL +summary: Logical Decoding Support for DDL +author: 郭欢 何文健 +date: 2024-01-29 +--- + +# Logical Decoding Support for DDL + +Starting from MogDB version 5.0.8, the logical replication feature has added support for DDL operations, reducing the manual maintenance of tables during logical replication and preventing issues in the replication synchronization process due to changes in table structure. The kernel logical decoding has added sequence support, and the three decoding plugins wal2json, logical_decoding, and mppdb_decoding have completed the sequence decoding interface. + +## Feature Description + +MogDB supports the following DDL (Data Definition Language, database schema definition language) operations during logical decoding: + +- CREATE/DROP TABLE|TABLE PARTITION +- CREATE/DROP INDEX +- TRUNCATE TABLE +- ALTER TABLE ADD COLUMN [CONSTRAINT] +- ALTER TABLE DROP COLUMN +- ALTER TABLE ALTER COLUMN [TYPE|SET NOT NULL|DROP NOT NULL|SET DEFAULT|DROP DEFAULT] +- ALTER TABLE [DROP|ADD|TRUNCATE] PARTITION +- ALTER TABLE MODIFY COLUMN data_type [ON UPDATE] +- ALTER TABLE MODIFY COLUMN [NOT] NULL +- ALTER TABLE ADD COLUMN [AFTER|FIRST] + +The necessary plugins for logical decoding support of DDL operations are: + +- wal2json +- mppdb_decoding +- test_decoding + +The following plugins have added new parsing for the logical decoding DDL type log `xl_logical_ddl_message`: + +- pg_xlogdump +- mog_xlogdump + +## Notes + +- Only DDL operations on row-stored tables are supported. +- DDL operations on column-stored and Ustore engines are not supported. +- DDL operations on temporary tables are not supported. +- DDL operations on non-logged tables are not supported. +- Logical subscriptions are not supported. +- When a statement contains multiple objects and the objects belong to different schemas, the output is in the order they appear, with their respective schemas. +- Some DDL operation statements may produce some DML statement parsing results that do not need attention due to kernel implementation reasons. +- The GUC parameter [wal_level](../../../reference-guide/guc-parameters/write-ahead-log/settings.md#wal_level) needs to be set to `logical` or higher, and [enable_ddl_logical_record](../../../reference-guide/guc-parameters/ha-replication/sending-server.md#enable_ddl_logical_record) must be enabled. +- wal2json only supports `format-version==1` and does not support `format-version==2`. + +## Example + +1. Logical decoding feature (using wal2json as the plugin) + + - Set `enable_ddl_logical_record=true` and `wal_level=logical`. + + Input SQL statements are as follows: + + ```sql + DROP TABLE IF EXISTS range_sales ; + SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'wal2json'); + + CREATE TABLE logical_tb2(col1 boolean[],col2 boolean); + drop table logical_tb2; + CREATE TABLE range_sales + ( + product_id INT4 NOT NULL, + customer_id INT4 PRIMARY KEY, + time_id DATE, + channel_id CHAR(1), + type_id INT4, + quantity_sold NUMERIC(3), + amount_sold NUMERIC(10,2) + ) + PARTITION BY RANGE (time_id) + ( + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + PARTITION time_2010 VALUES LESS THAN ('2011-01-01'), + PARTITION time_2011 VALUES LESS THAN ('2012-01-01') + ); + CREATE INDEX range_sales_idx1 ON range_sales(product_id) LOCAL; + CREATE INDEX range_sales_idx2 ON range_sales(time_id) GLOBAL; + + drop INDEX range_sales_idx1 ; + drop INDEX range_sales_idx2 ; + + drop TABLE range_sales; + SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'format-version', '1'); + SELECT 'stop' FROM pg_drop_replication_slot('regression_slot'); + ``` + + - wal2json decoding result: + + ```json + data + ------------------------------------------------------------------------------------------------- + {"change":[ + + DDL message: role: hewenjian; search_path: "$user",public; sz: 54; schemaname: public; + + original DDL query:CREATE TABLE logical_tb2(col1 boolean[],col2 boolean); + + ]} + {"change":[ + + DDL message: role: hewenjian; search_path: "$user",public; sz: 23; schemaname: public; + + original DDL query:drop table logical_tb2; + + ]} + {"change":[ + + DDL message: role: hewenjian; search_path: "$user",public; sz: 502; schemaname: public;+ + original DDL query:CREATE TABLE range_sales + + ( + + product_id INT4 NOT NULL, + + customer_id INT4 PRIMARY KEY, + + time_id DATE, + + channel_id CHAR(1), + + type_id INT4, + + quantity_sold NUMERIC(3), + + amount_sold NUMERIC(10,2) + + ) + + PARTITION BY RANGE (time_id) + + ( + + PARTITION time_2008 VALUES LESS THAN ('2009-01-01'), + + PARTITION time_2009 VALUES LESS THAN ('2010-01-01'), + + PARTITION time_2010 VALUES LESS THAN ('2011-01-01'), + + PARTITION time_2011 VALUES LESS THAN ('2012-01-01') + + ); + + ]} + {"change":[ + + DDL message: role: hewenjian; search_path: "$user",public; sz: 63; schemaname: public; + + original DDL query:CREATE INDEX range_sales_idx1 ON range_sales(product_id) LOCAL; + + ]} + {"change":[ + + DDL message: role: hewenjian; search_path: "$user",public; sz: 61; schemaname: public; + + original DDL query:CREATE INDEX range_sales_idx2 ON range_sales(time_id) GLOBAL; + + ]} + {"change":[ + + DDL message: role: hewenjian; search_path: "$user",public; sz: 30; schemaname: public; + + original DDL query:drop INDEX range_sales_idx1 ; + + ]} + {"change":[ + + DDL message: role: hewenjian; search_path: "$user",public; sz: 30; schemaname: public; + + original DDL query:drop INDEX range_sales_idx2 ; + + ]} + {"change":[ + + DDL message: role: hewenjian; search_path: "$user",public; sz: 23; schemaname: public; + + original DDL query:drop TABLE range_sales; + + ]} + (8 rows) + ``` + +2. pg_xlogdump + + The corresponding `wal2json` output for the generated WAL is: + + ```json + data + -------------------------------------------------------------------------------------------------------------------------------------------- + {"change":[ + + DDL message: role: hewenjian; search_path: public, new_schema1, new_schema2; sz: 53; schemaname: public, new_schema1, new_schema2;+ + original DDL query:TRUNCATE TABLE range_sales,range_sales1,range_sales2; + + ]} + (1 row) + ``` + + The corresponding pg_xlogdump output: + + ```json + REDO @ 0/55599A0; LSN 0/5559A80: prev 0/5559918; xid 15966; term 1; len 189; total 223; crc 4229648830; desc: LogicalDDLMessage - prefix "DDL"; role "hewenjian"; search_path "public, new_schema1, new_schema2"; schemaname "public, new_schema1, new_schema2"; payload (53 bytes): 54 52 55 4E 43 41 54 45 20 54 41 42 4C 45 20 72 61 6E 67 65 5F 73 61 6C 65 73 2C 72 61 6E 67 65 5F 73 61 6C 65 73 31 2C 72 61 6E 67 65 5F 73 61 6C 65 73 32 3B + ``` + +3. mog_xlogdump + + The corresponding `wal2json` output for the generated WAL is: + + ```json + data + -------------------------------------------------------------------------------------------------------------------------------------------- + {"change":[ + + DDL message: role: hewenjian; search_path: public, new_schema1, new_schema2; sz: 53; schemaname: public, new_schema1, new_schema2;+ + original DDL query:TRUNCATE TABLE range_sales,range_sales1,range_sales2; + + ]} + (1 row) + ``` + + The corresponding mog_xlogdump output: + + ```json + REDO @ 0/55599A0; LSN 0/5559A80: prev 0/5559918; xid 15966; term 1; len 189; total 223; crc 4229648830; desc: LogicalDDLMessage - prefix "DDL"; role "hewenjian"; search_path "public, new_schema1, new_schema2"; schemaname "public, new_schema1, new_schema2"; payload (53 bytes): 54 52 55 4E 43 41 54 45 20 54 41 42 4C 45 20 72 61 6E 67 65 5F 73 61 6C 65 73 2C 72 61 6E 67 65 5F 73 61 6C 65 73 31 2C 72 61 6E 67 65 5F 73 61 6C 65 73 32 3B + ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/developer-guide/logical-replication/logical-decoding/logical-decoding.md b/product/en/docs-mogdb/v5.0/developer-guide/logical-replication/logical-decoding/logical-decoding.md index 888e1702..e6262a43 100644 --- a/product/en/docs-mogdb/v5.0/developer-guide/logical-replication/logical-decoding/logical-decoding.md +++ b/product/en/docs-mogdb/v5.0/developer-guide/logical-replication/logical-decoding/logical-decoding.md @@ -9,3 +9,4 @@ date: 2023-05-19 + **[Overview](1-logical-decoding.md)** + **[Logical Decoding by SQL Function Interfaces](2-logical-decoding-by-sql-function-interfaces.md)** ++ **[Logical Decoding Support For DDL](logical-decoding-support-for-DDL.md)** diff --git a/product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table.md b/product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table.md index 19214ebc..52be0429 100644 --- a/product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table.md +++ b/product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table.md @@ -108,7 +108,7 @@ Modifies tables, including modifying table definitions, renaming tables, renamin - **DROP PRIMARY KEY [ RESTRICT | CASCADE ]** - Deletes the primary key of a table. + Deletes the foreign key of a table. - **DROP FOREIGN KEY foreign_key_name [ RESTRICT | CASCADE ]** diff --git a/product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-sql-syntax.md b/product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-sql-syntax.md index 1568aa48..1042e786 100644 --- a/product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-sql-syntax.md +++ b/product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-sql-syntax.md @@ -77,4 +77,4 @@ date: 2023-05-19 + **[SHOW VARIABLES](dolphin-show-variables.md)** + **[SHOW WARNINGS/ERRORS](dolphin-show-warnings.md)** + **[UPDATE](dolphin-update.md)** -+ **[USE db_name](dolphin-use-db_name.md)** \ No newline at end of file ++ **[USE db_name](dolphin-use-db-name.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-use-db_name.md b/product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-use-db-name.md similarity index 68% rename from product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-use-db_name.md rename to product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-use-db-name.md index be7bcdb5..1ab93d29 100644 --- a/product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-use-db_name.md +++ b/product/en/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-use-db-name.md @@ -9,7 +9,7 @@ date: 2022-10-24 ## Function -The USE db_name statement uses the db_name schema as the default (current) schema for subsequent statements. The schema remains the default schema until the end of the paragraph, or until a different USE statement is published. +The USE db\_name statement uses the db\_name database as the default (current) database for subsequent statements. The database remains the default database until the end of the paragraph, or until a different USE statement is published. ## Precautions @@ -25,12 +25,12 @@ USE db_name - **db_name** - Schema name + Database name ## Examples ```sql ---Switch to the db1 schema. +--Switch to the db1 database. MogDB=# USE db1; SET MogDB=# CREATE TABLE test(a text); @@ -38,7 +38,7 @@ CREATE TABLE MogDB=# INSERT INTO test VALUES('db1'); INSERT 0 1 ---Switch to the db2 schema. +--Switch to the db2 database. MogDB=# USE db2; SET MogDB=# CREATE TABLE test(a text); @@ -63,7 +63,7 @@ MogDB=# select a from test; db2 (1 row) ---Switch to the db1 schema. +--Switch to the db1 database. MogDB=# USE db1; SET MogDB=# select a from test; diff --git a/product/en/docs-mogdb/v5.0/developer-guide/plpgsql/1-4-arrays-and-records.md b/product/en/docs-mogdb/v5.0/developer-guide/plpgsql/1-4-arrays-and-records.md index c533bdb4..3af5831d 100644 --- a/product/en/docs-mogdb/v5.0/developer-guide/plpgsql/1-4-arrays-and-records.md +++ b/product/en/docs-mogdb/v5.0/developer-guide/plpgsql/1-4-arrays-and-records.md @@ -288,13 +288,13 @@ The set functions support **multiset union**, **intersect**, **except all**, and ANONYMOUS BLOCK EXECUTE ``` -- extend[(e)] +- extend[(count[, idx])] - Parameters: *e* is of the int4 type. + Parameters: *idx* and *count* are of the int4 type. Return type: No value is returned. - Description: Only the nest-table type is supported. One element is extended at the end of the nest-table variable. + Description: Only the nest-table type is supported. One or *count* elements are extended at the end of the nest-table variable. If index set element *idx* exists, *count* index elements are copied to the end of the variable. Restriction: extend() is not supported in nesting scenarios. @@ -326,11 +326,24 @@ The set functions support **multiset union**, **intersect**, **except all**, and INFO: {1} INFO: {1,NULL,NULL} ANONYMOUS BLOCK EXECUTE + + MogDB=# declare + MogDB-# type nest is table of int; + MogDB-# a nest := nest(1); + MogDB-# begin + MogDB$# raise info '%', a; + MogDB$# a.extend(2,1); + MogDB$# raise info '%', a; + MogDB$# end; + MogDB$# / + INFO: {1} + INFO: {1,1,1} + ANONYMOUS BLOCK EXECUTE ``` -- delete[(idx)] +- delete[(idx1[, idx2])] - Parameters: *idx* is of the int4 or varchar2 type. + Parameters: *idx1* and *idx2* are of the int4 or varchar2 type. Return type: No value is returned. @@ -366,6 +379,19 @@ The set functions support **multiset union**, **intersect**, **except all**, and INFO: {1,2,3,4,5} INFO: {1,2,4,5} ANONYMOUS BLOCK EXECUTE + + MogDB=# declare + MogDB-# type nest is table of int; + MogDB-# a nest := nest(1,2,3,4,5); + MogDB-# begin + MogDB$# raise info '%', a; + MogDB$# a.delete(2,4); + MogDB$# raise info '%', a; + MogDB$# end; + MogDB$# / + INFO: {1,2,3,4,5} + INFO: {1,5} + ANONYMOUS BLOCK EXECUTE ``` - trim[(n)] @@ -619,9 +645,9 @@ The set functions support **multiset union**, **intersect**, **except all**, and for i in 1 .. v2.count loop fetch c1 into tmp; if tmp is null then - RAISE NOTICE '%', i || ': is null'; + dbe_output.print_line(i || ': is null'); else - RAISE NOTICE '%', i || ': ' || tmp; + dbe_output.print_line(i || ': ' || tmp); end if; end loop; close c1; @@ -662,9 +688,9 @@ The set functions support **multiset union**, **intersect**, **except all**, and for i in 1 .. v2.count loop fetch c1 into tmp; if tmp is null then - RAISE NOTICE '%', i || ': is null'; + dbe_output.print_line(i || ': is null'); else - RAISE NOTICE '%', i || ': ' || tmp; + dbe_output.print_line(i || ': ' || tmp); end if; end loop; close c1; diff --git a/product/en/docs-mogdb/v5.0/high-available-guide/cluster-management/cm-configuration-parameter/cm-cm_server.md b/product/en/docs-mogdb/v5.0/high-available-guide/cluster-management/cm-configuration-parameter/cm-cm_server.md index 89a39d76..493a4353 100644 --- a/product/en/docs-mogdb/v5.0/high-available-guide/cluster-management/cm-configuration-parameter/cm-cm_server.md +++ b/product/en/docs-mogdb/v5.0/high-available-guide/cluster-management/cm-configuration-parameter/cm-cm_server.md @@ -197,7 +197,7 @@ Parameter description: Specifies the basic duration of cm_server arbitration del **Value range**: Boolean values **on**, **off**, **true**, **false**, **yes**, **no**, **1**, and **0** The modification of this parameter takes effect after reloading. For details, see [Options of set cm](./../introduction-to-cm_ctl-tool.md#setcm). -**Default value**: **off** +**Default value**: **on** ## datastorage_threshold_check_interval diff --git a/product/en/docs-mogdb/v5.0/installation-guide/manual-installation.md b/product/en/docs-mogdb/v5.0/installation-guide/manual-installation.md index 128462fc..a644070a 100644 --- a/product/en/docs-mogdb/v5.0/installation-guide/manual-installation.md +++ b/product/en/docs-mogdb/v5.0/installation-guide/manual-installation.md @@ -54,7 +54,7 @@ total 90236 chown omm:dbgrp MogDB-5.0.1-CentOS-64bit.tar.gz su - omm cd /opt/mogdb/software/ -tar -xf MogDB-5.0.1-CentOS-64bit.tar.gz -C /opt/mogdb/software --strip-components=1 +tar -xf MogDB-5.0.1-CentOS-64bit.tar.gz -C /opt/mogdb/software ``` ## 2. Database Initialization (Standalone) diff --git a/product/en/docs-mogdb/v5.0/mogeaver/mogeaver-release-notes.md b/product/en/docs-mogdb/v5.0/mogeaver/mogeaver-release-notes.md index 8195d5f2..5147e7bd 100644 --- a/product/en/docs-mogdb/v5.0/mogeaver/mogeaver-release-notes.md +++ b/product/en/docs-mogdb/v5.0/mogeaver/mogeaver-release-notes.md @@ -11,32 +11,6 @@ date: 2022-06-17 > > sudo xattr -r -d com.apple.quarantine /Applications/Mogeaver.app -## 23.3.1 - -2024-09-29 - -- [mogeaver-ce-23.3.1-linux.gtk.aarch64.tar.gz](https://cdn-mogdb.enmotech.com/mogeaver/23.3.1/mogeaver-ce-23.3.1-linux.gtk.aarch64.tar.gz) -- [mogeaver-ce-23.3.1-linux.gtk.x86_64.tar.gz](https://cdn-mogdb.enmotech.com/mogeaver/23.3.1/mogeaver-ce-23.3.1-linux.gtk.x86_64.tar.gz) -- [mogeaver-ce-23.3.1-macosx.cocoa.aarch64.tar.gz](https://cdn-mogdb.enmotech.com/mogeaver/23.3.1/mogeaver-ce-23.3.1-macosx.cocoa.aarch64.tar.gz) -- [mogeaver-ce-23.3.1-macosx.cocoa.x86_64.tar.gz](https://cdn-mogdb.enmotech.com/mogeaver/23.3.1/mogeaver-ce-23.3.1-macosx.cocoa.x86_64.tar.gz) -- [mogeaver-ce-23.3.1-win32.win32.x86_64.zip](https://cdn-mogdb.enmotech.com/mogeaver/23.3.1/mogeaver-ce-23.3.1-win32.win32.x86_64.zip) - -### Features - -- **MogDB:** Update jdbc driver 5.0.0.9.og -- **MogDB:** Add system schema definition -- **MogDB:** Support `pg_get_typedef(type_name)` -- **MogDB:** Disable the stored procedure/function modification function under the package -- **MogDB:** Support `raw` column type -- **MogDB:** Adapt parameter `compat_oracle_txn_control` -- **MogDB:** Optimize package object query/package creation -- **MogDB:** Support generating stored procedure with out parameter statement - -### Bug Fixes - -- **MogDB:** Fix the problem of opening a single database system mode display error -- **MogDB:** Column type `nvarchar` length display problem - ## 23.3.0 2024-02-29 diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/performance-tuning.md b/product/en/docs-mogdb/v5.0/performance-tuning/performance-tuning.md index 0a2aeb24..a5da14cf 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/performance-tuning.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/performance-tuning.md @@ -1,13 +1,13 @@ ---- -title: Performance Tuning -summary: Performance Tuning -author: zhang cuiping -date: 2023-04-14 ---- - -# Performance Tuning - -- **[System Tuning](system-tuning/system-tuning.md)** -- **[SQL Tuning](sql-tuning/sql-tuning.md)** -- **[WDR Snapshot](wdr/wdr.md)** +--- +title: Performance Tuning +summary: Performance Tuning +author: zhang cuiping +date: 2023-04-14 +--- + +# Performance Tuning + +- **[System Tuning](system-tuning/system-tuning.md)** +- **[SQL Tuning](sql-tuning/sql-tuning.md)** +- **[WDR Snapshot](wdr/wdr.md)** - **[TPCC Performance Tuning](TPCC-performance-tuning-guide.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/experience-in-rewriting-sql-statements.md b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/experience-in-rewriting-sql-statements.md index 8da3b8aa..7937fe23 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/experience-in-rewriting-sql-statements.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/experience-in-rewriting-sql-statements.md @@ -1,63 +1,63 @@ ---- -title: Experience in Rewriting SQL Statements -summary: Experience in Rewriting SQL Statements -author: Guo Huan -date: 2021-03-16 ---- - -# Experience in Rewriting SQL Statements - -Based on the SQL execution mechanism and a large number of practices, SQL statements can be optimized by following certain rules to enable the database to execute SQL statements more quickly and obtain correct results. You can comply with these rules to improve service query efficiency. - -- Replace **UNION** with **UNION ALL**. - - **UNION** eliminates duplicate rows while merging two result sets but **UNION ALL** merges the two result sets without deduplication. Therefore, replace **UNION** with **UNION ALL** if you are sure that the two result sets do not contain duplicate rows based on the service logic. - -- Add **NOT NULL** to the join columns. - - If there are many NULL values in the **JOIN** columns, you can add the filter criterion **IS NOT NULL** to filter data in advance to improve the **JOIN** efficiency. - -- Convert **NOT IN** to **NOT EXISTS**. - - **nestloop anti join** must be used to implement **NOT IN**, and **hash anti join** is required for **NOT EXISTS**. If no NULL value exists in the **JOIN** columns, **NOT IN** is equivalent to **NOT EXISTS**. Therefore, if you are sure that no NULL value exists, you can convert **NOT IN** to **NOT EXISTS** to generate **hash join** and to improve the query performance. - - As shown in the following statement, the **t2.d2** column does not contain null values (it is set to **NOT NULL**) and **NOT EXISTS** is used for the query. - - ```sql - SELECT * FROM t1 WHERE NOT EXISTS (SELECT * FROM t2 WHERE t1.c1=t2.d2); - ``` - - The generated execution plan is as follows: - - ``` - QUERY PLAN - ------------------------------ - Hash Anti Join - Hash Cond: (t1.c1 = t2.d2) - -> Seq Scan on t1 - -> Hash - -> Seq Scan on t2 - (5 rows) - ``` - -- Use **hashagg**. - - If a plan involving groupAgg and SORT operations generated by the **GROUP BY** statement is poor in performance, you can set **work_mem** to a larger value to generate a **hashagg** plan, which does not require sorting and improves the performance. - -- Replace functions with **CASE** statements. - - The MogDB performance greatly deteriorates if a large number of functions are called. In this case, you can change the pushdown functions to **CASE** statements. - -- Do not use functions or expressions for indexes. - - Using functions or expressions for indexes stops indexing. Instead, it enables scanning on the full table. - -- Do not use **!=** or <> operators, **NULL**, **OR**, or implicit parameter conversion in **WHERE** clauses. - -- Split complex SQL statements. - - You can split an SQL statement into several ones and save the execution result to a temporary table if the SQL statement is too complex to be tuned using the solutions above, including but not limited to the following scenarios: - - - The same subquery is involved in multiple SQL statements of a job and the subquery contains large amounts of data. - - Incorrect plan cost causes a small hash bucket of subquery. For example, the actual number of rows is 10 million, but only 1000 rows are in hash bucket. - - Functions such as **substr** and **to_number** cause incorrect measures for subqueries containing large amounts of data. +--- +title: Experience in Rewriting SQL Statements +summary: Experience in Rewriting SQL Statements +author: Guo Huan +date: 2021-03-16 +--- + +# Experience in Rewriting SQL Statements + +Based on the SQL execution mechanism and a large number of practices, SQL statements can be optimized by following certain rules to enable the database to execute SQL statements more quickly and obtain correct results. You can comply with these rules to improve service query efficiency. + +- Replace **UNION** with **UNION ALL**. + + **UNION** eliminates duplicate rows while merging two result sets but **UNION ALL** merges the two result sets without deduplication. Therefore, replace **UNION** with **UNION ALL** if you are sure that the two result sets do not contain duplicate rows based on the service logic. + +- Add **NOT NULL** to the join columns. + + If there are many NULL values in the **JOIN** columns, you can add the filter criterion **IS NOT NULL** to filter data in advance to improve the **JOIN** efficiency. + +- Convert **NOT IN** to **NOT EXISTS**. + + **nestloop anti join** must be used to implement **NOT IN**, and **hash anti join** is required for **NOT EXISTS**. If no NULL value exists in the **JOIN** columns, **NOT IN** is equivalent to **NOT EXISTS**. Therefore, if you are sure that no NULL value exists, you can convert **NOT IN** to **NOT EXISTS** to generate **hash join** and to improve the query performance. + + As shown in the following statement, the **t2.d2** column does not contain null values (it is set to **NOT NULL**) and **NOT EXISTS** is used for the query. + + ```sql + SELECT * FROM t1 WHERE NOT EXISTS (SELECT * FROM t2 WHERE t1.c1=t2.d2); + ``` + + The generated execution plan is as follows: + + ``` + QUERY PLAN + ------------------------------ + Hash Anti Join + Hash Cond: (t1.c1 = t2.d2) + -> Seq Scan on t1 + -> Hash + -> Seq Scan on t2 + (5 rows) + ``` + +- Use **hashagg**. + + If a plan involving groupAgg and SORT operations generated by the **GROUP BY** statement is poor in performance, you can set **work_mem** to a larger value to generate a **hashagg** plan, which does not require sorting and improves the performance. + +- Replace functions with **CASE** statements. + + The MogDB performance greatly deteriorates if a large number of functions are called. In this case, you can change the pushdown functions to **CASE** statements. + +- Do not use functions or expressions for indexes. + + Using functions or expressions for indexes stops indexing. Instead, it enables scanning on the full table. + +- Do not use **!=** or <> operators, **NULL**, **OR**, or implicit parameter conversion in **WHERE** clauses. + +- Split complex SQL statements. + + You can split an SQL statement into several ones and save the execution result to a temporary table if the SQL statement is too complex to be tuned using the solutions above, including but not limited to the following scenarios: + + - The same subquery is involved in multiple SQL statements of a job and the subquery contains large amounts of data. + - Incorrect plan cost causes a small hash bucket of subquery. For example, the actual number of rows is 10 million, but only 1000 rows are in hash bucket. + - Functions such as **substr** and **to_number** cause incorrect measures for subqueries containing large amounts of data. diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/introduction-to-the-sql-execution-plan.md b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/introduction-to-the-sql-execution-plan.md index a5ab0944..9fbe0358 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/introduction-to-the-sql-execution-plan.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/introduction-to-the-sql-execution-plan.md @@ -1,134 +1,136 @@ ---- -title: Introduction to the SQL Execution Plan -summary: Introduction to the SQL Execution Plan -author: Guo Huan -date: 2021-03-16 ---- - -# Introduction to the SQL Execution Plan - -## Overview - -The SQL execution plan is a node tree, which displays detailed procedure when MogDB runs an SQL statement. A database operator indicates one step. - -You can run the **EXPLAIN** command to view the execution plan generated for each query by an optimizer. The output of **EXPLAIN** has one row for each execution node, showing the basic node type and the cost estimation that the optimizer made for the execution of this node, as shown in Figure 1. - -**Figure 1** SQL execution plan example - -![sql-execution-plan-example](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/introduction-to-the-sql-execution-plan-1.png) - -- Nodes at the bottom level are scan nodes. They scan tables and return raw rows. The types of scan nodes (sequential scans and index scans) vary depending on the table access methods. Objects scanned by the bottom layer nodes may not be row-store data (not directly read from a table), such as **VALUES** clauses and functions that return rows, which have their own types of scan nodes. - -- If the query requires join, aggregation, sorting, or other operations on the raw rows, there will be other nodes above the scan nodes to perform these operations. In addition, there is more than one way to perform these operations, so different types of execution nodes may be displayed here. - -- The first row (the upper-layer node) estimates the total execution cost of the execution plan. Such an estimate indicates the value that the optimizer tries to minimize. - -### Execution Plan Information - -In addition to setting different display formats for an execution plan, you can use different **EXPLAIN** syntax to display execution plan information in detail. The following lists the common **EXPLAIN** syntax. For details about more **EXPLAIN** syntax, see EXPLAIN. - -- EXPLAIN **statement**: only generates an execution plan and does not execute. The *statement* indicates SQL statements. -- EXPLAIN ANALYZE **statement**: generates and executes an execution plan, and displays the execution summary. Then actual execution time statistics are added to the display, including the total elapsed time expended within each plan node (in milliseconds) and the total number of rows it actually returned. -- EXPLAIN PERFORMANCE **statement**: generates and executes the execution plan, and displays all execution information. - -To measure the run time cost of each node in the execution plan, the current execution of **EXPLAIN ANALYZE** or **EXPLAIN PERFORMANCE** adds profiling overhead to query execution. Running **EXPLAIN ANALYZE** or **EXPLAIN PERFORMANCE** on a query sometimes takes longer time than executing the query normally. The amount of overhead depends on the nature of the query, as well as the platform being used. - -Therefore, if an SQL statement is not finished after being running for a long time, run the **EXPLAIN** statement to view the execution plan and then locate the fault. If the SQL statement has been properly executed, run the **EXPLAIN ANALYZE** or **EXPLAIN PERFORMANCE** statement to check the execution plan and information to locate the fault. - -## Description - -As described in [Overview](#Overview), **EXPLAIN** displays the execution plan, but will not actually run SQL statements. **EXPLAIN ANALYZE** and **EXPLAIN PERFORMANCE** both will actually run SQL statements and return the execution information. This section describes the execution plan and execution information in detail. - -### Execution Plans - -The following SQL statement is used as an example: - -```sql -SELECT * FROM t1, t2 WHERE t1.c1 = t2.c2; -``` - -Run the **EXPLAIN** command and the output is as follows: - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/introduction-to-the-sql-execution-plan-2.png) - -**Interpretation of the execution plan level (vertical)**: - -1. Layer 1:**Seq Scan on t2** - - The table scan operator scans the table **t2** using **Seq Scan**. At this layer, data in the table **t2** is read from a buffer or disk, and then transferred to the upper-layer node for calculation. - -2. Layer 2:**Hash** - - Hash operator. It is used to calculate the hash value of the operator transferred from the lower layer for subsequent hash join operations. - -3. Layer 3:**Seq Scan on t1** - - The table scan operator scans the table **t1** using **Seq Scan**. At this layer, data in the table **t1** is read from a buffer or disk, and then transferred to the upper-layer node for hash join calculation. - -4. Layer 4:**Hash Join** - - Join operator. It is used to join data in the **t1** and **t2** tables using the hash join method and output the result data. - -**Keywords in the execution plan**: - -1. Table access modes - - - Seq Scan - - Scans all rows of the table in sequence. - - - Index Scan - - The optimizer uses a two-step plan: the child plan node visits an index to find the locations of rows matching the index condition, and then the upper plan node actually fetches those rows from the table itself. Fetching rows separately is much more expensive than reading them sequentially, but because not all pages of the table have to be visited, this is still cheaper than a sequential scan. The upper-layer planning node sorts index-identified rows based on their physical locations before reading them. This minimizes the independent capturing overhead. - - If there are separate indexes on multiple columns referenced in **WHERE**, the optimizer might choose to use an **AND** or **OR** combination of the indexes. However, this requires the visiting of both indexes, so it is not necessarily a win compared to using just one index and treating the other condition as a filter. - - The following Index scans featured with different sorting mechanisms are involved: - - - Bitmap Index Scan - - Fetches data pages using a bitmap. - - - Index Scan using index_name - - Fetches table rows in index order, which makes them even more expensive to read. However, there are so few rows that the extra cost of sorting the row locations is unnecessary. This plan type is used mainly for queries fetching just a single row and queries having an **ORDER BY** condition that matches the index order, because no extra sorting step is needed to satisfy **ORDER BY**. - -2. Table connection modes - - - Nested Loop - - A nested loop is used for queries that have a smaller data set connected. In a nested loop join, the foreign table drives the internal table and each row returned from the foreign table should have a matching row in the internal table. The returned result set of all queries should be less than 10,000. The table that returns a smaller subset will work as a foreign table, and indexes are recommended for connection columns of the internal table. - - - (Sonic) Hash Join - - A hash join is used for large tables. The optimizer uses a hash join, in which rows of one table are entered into an in-memory hash table, after which the other table is scanned and the hash table is probed for matches to each row. Sonic and non-Sonic hash joins differ in their hash table structures, which do not affect the execution result set. - - - Merge Join - - In most cases, the execution performance of a merge join is lower than that of a hash join. However, if the source data has been pre-sorted and no more sorting is needed during the merge join, its performance excels. - -3. Operators - - - sort - - Sorts the result set. - - - filter - - The **EXPLAIN** output shows the **WHERE** clause being applied as a **Filter** condition attached to the **Seq Scan** plan node. This means that the plan node checks the condition for each row it scans, and returns only the ones that meet the condition. The estimated number of output rows has been reduced because of the **WHERE** clause. However, the scan will still have to visit all 10,000 rows, as a result, the cost is not decreased. It increases a bit (by 10,000 x **cpu_operator_cost**) to reflect the extra CPU time spent on checking the **WHERE** condition. - - - LIMIT - - Limits the number of output execution results. If a **LIMIT** condition is added, not all rows are retrieved. - -### Execution Information - -The following SQL statement is used as an example: - -```sql -select sum(t2.c1) from t1,t2 where t1.c1=t2.c2 group by t1.c2; -``` - -The output of running **EXPLAIN PERFORMANCE** is as follows: - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/introduction-to-the-sql-execution-plan-3.png) +--- +title: Introduction to the SQL Execution Plan +summary: Introduction to the SQL Execution Plan +author: Guo Huan +date: 2021-03-16 +--- + +# Introduction to the SQL Execution Plan + +## Overview + +The SQL execution plan is a node tree, which displays detailed procedure when MogDB runs an SQL statement. A database operator indicates one step. + +You can run the **EXPLAIN** command to view the execution plan generated for each query by an optimizer. The output of **EXPLAIN** has one row for each execution node, showing the basic node type and the cost estimation that the optimizer made for the execution of this node, as shown in Figure 1. + +**Figure 1** SQL execution plan example + +![sql-execution-plan-example](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/introduction-to-the-sql-execution-plan-1.png) + +- Nodes at the bottom level are scan nodes. They scan tables and return raw rows. The types of scan nodes (sequential scans and index scans) vary depending on the table access methods. Objects scanned by the bottom layer nodes may not be row-store data (not directly read from a table), such as **VALUES** clauses and functions that return rows, which have their own types of scan nodes. + +- If the query requires join, aggregation, sorting, or other operations on the raw rows, there will be other nodes above the scan nodes to perform these operations. In addition, there is more than one way to perform these operations, so different types of execution nodes may be displayed here. + +- The first row (the upper-layer node) estimates the total execution cost of the execution plan. Such an estimate indicates the value that the optimizer tries to minimize. + +### Execution Plan Information + +In addition to setting different display formats for an execution plan, you can use different **EXPLAIN** syntax to display execution plan information in detail. The following lists the common **EXPLAIN** syntax. For details about more **EXPLAIN** syntax, see EXPLAIN. + +- EXPLAIN **statement**: only generates an execution plan and does not execute. The *statement* indicates SQL statements. +- EXPLAIN ANALYZE **statement**: generates and executes an execution plan, and displays the execution summary. Then actual execution time statistics are added to the display, including the total elapsed time expended within each plan node (in milliseconds) and the total number of rows it actually returned. +- EXPLAIN PERFORMANCE **statement**: generates and executes the execution plan, and displays all execution information. + +To measure the run time cost of each node in the execution plan, the current execution of **EXPLAIN ANALYZE** or **EXPLAIN PERFORMANCE** adds profiling overhead to query execution. Running **EXPLAIN ANALYZE** or **EXPLAIN PERFORMANCE** on a query sometimes takes longer time than executing the query normally. The amount of overhead depends on the nature of the query, as well as the platform being used. + +Therefore, if an SQL statement is not finished after being running for a long time, run the **EXPLAIN** statement to view the execution plan and then locate the fault. If the SQL statement has been properly executed, run the **EXPLAIN ANALYZE** or **EXPLAIN PERFORMANCE** statement to check the execution plan and information to locate the fault. + +The **EXPLAIN PERFORMANCE** lightweight execution is consistent with **EXPLAIN PERFORMANCE** but greatly reduces the time spent on performance analysis. + +## Description + +As described in [Overview](#Overview), **EXPLAIN** displays the execution plan, but will not actually run SQL statements. **EXPLAIN ANALYZE** and **EXPLAIN PERFORMANCE** both will actually run SQL statements and return the execution information. This section describes the execution plan and execution information in detail. + +### Execution Plans + +The following SQL statement is used as an example: + +```sql +SELECT * FROM t1, t2 WHERE t1.c1 = t2.c2; +``` + +Run the **EXPLAIN** command and the output is as follows: + +![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/introduction-to-the-sql-execution-plan-2.png) + +**Interpretation of the execution plan level (vertical)**: + +1. Layer 1:**Seq Scan on t2** + + The table scan operator scans the table **t2** using **Seq Scan**. At this layer, data in the table **t2** is read from a buffer or disk, and then transferred to the upper-layer node for calculation. + +2. Layer 2:**Hash** + + Hash operator. It is used to calculate the hash value of the operator transferred from the lower layer for subsequent hash join operations. + +3. Layer 3:**Seq Scan on t1** + + The table scan operator scans the table **t1** using **Seq Scan**. At this layer, data in the table **t1** is read from a buffer or disk, and then transferred to the upper-layer node for hash join calculation. + +4. Layer 4:**Hash Join** + + Join operator. It is used to join data in the **t1** and **t2** tables using the hash join method and output the result data. + +**Keywords in the execution plan**: + +1. Table access modes + + - Seq Scan + + Scans all rows of the table in sequence. + + - Index Scan + + The optimizer uses a two-step plan: the child plan node visits an index to find the locations of rows matching the index condition, and then the upper plan node actually fetches those rows from the table itself. Fetching rows separately is much more expensive than reading them sequentially, but because not all pages of the table have to be visited, this is still cheaper than a sequential scan. The upper-layer planning node sorts index-identified rows based on their physical locations before reading them. This minimizes the independent capturing overhead. + + If there are separate indexes on multiple columns referenced in **WHERE**, the optimizer might choose to use an **AND** or **OR** combination of the indexes. However, this requires the visiting of both indexes, so it is not necessarily a win compared to using just one index and treating the other condition as a filter. + + The following Index scans featured with different sorting mechanisms are involved: + + - Bitmap Index Scan + + Fetches data pages using a bitmap. + + - Index Scan using index_name + + Fetches table rows in index order, which makes them even more expensive to read. However, there are so few rows that the extra cost of sorting the row locations is unnecessary. This plan type is used mainly for queries fetching just a single row and queries having an **ORDER BY** condition that matches the index order, because no extra sorting step is needed to satisfy **ORDER BY**. + +2. Table connection modes + + - Nested Loop + + A nested loop is used for queries that have a smaller data set connected. In a nested loop join, the foreign table drives the internal table and each row returned from the foreign table should have a matching row in the internal table. The returned result set of all queries should be less than 10,000. The table that returns a smaller subset will work as a foreign table, and indexes are recommended for connection columns of the internal table. + + - (Sonic) Hash Join + + A hash join is used for large tables. The optimizer uses a hash join, in which rows of one table are entered into an in-memory hash table, after which the other table is scanned and the hash table is probed for matches to each row. Sonic and non-Sonic hash joins differ in their hash table structures, which do not affect the execution result set. + + - Merge Join + + In most cases, the execution performance of a merge join is lower than that of a hash join. However, if the source data has been pre-sorted and no more sorting is needed during the merge join, its performance excels. + +3. Operators + + - sort + + Sorts the result set. + + - filter + + The **EXPLAIN** output shows the **WHERE** clause being applied as a **Filter** condition attached to the **Seq Scan** plan node. This means that the plan node checks the condition for each row it scans, and returns only the ones that meet the condition. The estimated number of output rows has been reduced because of the **WHERE** clause. However, the scan will still have to visit all 10,000 rows, as a result, the cost is not decreased. It increases a bit (by 10,000 x **cpu_operator_cost**) to reflect the extra CPU time spent on checking the **WHERE** condition. + + - LIMIT + + Limits the number of output execution results. If a **LIMIT** condition is added, not all rows are retrieved. + +### Execution Information + +The following SQL statement is used as an example: + +```sql +select sum(t2.c1) from t1,t2 where t1.c1=t2.c2 group by t1.c2; +``` + +The output of running **EXPLAIN PERFORMANCE** is as follows: + +![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/introduction-to-the-sql-execution-plan-3.png) diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/query-execution-process.md b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/query-execution-process.md index d74c989a..c6a57aa7 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/query-execution-process.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/query-execution-process.md @@ -1,57 +1,57 @@ ---- -title: Query Execution Process -summary: Query Execution Process -author: Guo Huan -date: 2021-03-16 ---- - -# Query Execution Process - -## Introduction - -The process from receiving SQL statements to the statement execution by the SQL engine is shown in Figure 1 and described in Table 1. The texts in red are steps where database administrators can optimize queries. - -**Figure 1** Execution process of query-related SQL statements by the SQL engine - -![execution-process-of-query-related-sql-statements-by-the-sql-engine](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/query-execution-process-en-1.png) - -**Table 1** Execution process of query-related SQL statements by the SQL engine - -| Step | Description | -| :------------------------------------- | :----------------------------------------------------------- | -| 1. Perform syntax and lexical parsing. | Converts the input SQL statements from the string data type to the formatted structure stmt based on the specified SQL statement rules. | -| 2. Perform semantic parsing. | Converts the formatted structure obtained from the previous step into objects that can be recognized by the database. | -| 3. Rewrite the query statements. | Converts the output of the previous step into the structure that optimizes the query execution. | -| 4. Optimize the query. | Determines the execution mode of SQL statements (the execution plan) based on the result obtained from the previous step and the internal database statistics. For details about how the internal database statistics and GUC parameters affect the query optimization (execution plan), see [Optimizing Queries Using Statistics](#Optimizing Queries Using Statistics) and [Optimizing Queries Using GUC parameters](#Optimizing Queries Using GUC parameters). | -| 5. Perform the query. | Executes the SQL statements based on the execution path specified in the previous step. Selecting a proper underlying storage mode improves the query execution efficiency. For details, see [Optimizing Queries Using the Underlying Storage](#Optimizing Queries Using the Underlying Storage). | - -## Optimizing Queries Using Statistics - -The MogDB optimizer is a typical Cost-based Optimization (CBO). By using CBO, the database calculates the number of tuples and the execution cost for each step under each execution plan based on the number of table tuples, column width, null record ratio, and characteristic values, such as distinct, MCV, and HB values, and certain cost calculation methods. The database then selects the execution plan that takes the lowest cost for the overall execution or for the return of the first tuple. These characteristic values are the statistics, which is the core for optimizing a query. Accurate statistics helps the planner select the most appropriate query plan. Generally, you can collect statistics of a table or that of some columns in a table using **ANALYZE**. You are advised to periodically execute **ANALYZE** or execute it immediately after you modified most contents in a table. - -## Optimizing Queries Using GUC parameters - -Optimizing queries aims to select an efficient execution mode. - -Take the following SQL statement as an example: - -``` -select count(1) -from customer inner join store_sales on (ss_customer_sk = c_customer_sk); -``` - -During execution of **customer inner join store_sales**, MogDB supports nested loop, merge join, and hash join. The optimizer estimates the result set sizes and the execution cost for each join mode based on the statistics on the **customer** and **store_sales** tables. It then compares the costs and selects the one costing the least. - -As described in the preceding content, the execution cost is calculated based on certain methods and statistics. If the actual execution cost cannot be accurately estimated, you need to optimize the execution plan by setting the GUC parameters. - -## Optimizing Queries Using the Underlying Storage - -MogDB supports row- and column-store tables. The selection of an underlying storage mode strongly depends on specific customer service scenarios. You are advised to use column-store tables for computing service scenarios (mainly involving association and aggregation operations) and row-store tables for service scenarios, such as point queries and massive **UPDATE** or **DELETE** executions. - -Optimization methods of each storage mode will be described in detail below. - -## Optimizing Queries by Rewriting SQL Statements - -Besides the preceding methods that improve the performance of the execution plan generated by the SQL engine, database administrators can also enhance SQL statement performance by rewriting SQL statements while retaining the original service logic based on the execution mechanism of the database and abundant practices. - -This requires that database administrators know the customer services well and have professional knowledge of SQL statements. Below chapters will describe some common SQL rewriting scenarios. +--- +title: Query Execution Process +summary: Query Execution Process +author: Guo Huan +date: 2021-03-16 +--- + +# Query Execution Process + +## Introduction + +The process from receiving SQL statements to the statement execution by the SQL engine is shown in Figure 1 and described in Table 1. The texts in red are steps where database administrators can optimize queries. + +**Figure 1** Execution process of query-related SQL statements by the SQL engine + +![execution-process-of-query-related-sql-statements-by-the-sql-engine](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/query-execution-process-en-1.png) + +**Table 1** Execution process of query-related SQL statements by the SQL engine + +| Step | Description | +| :------------------------------------- | :----------------------------------------------------------- | +| 1. Perform syntax and lexical parsing. | Converts the input SQL statements from the string data type to the formatted structure stmt based on the specified SQL statement rules. | +| 2. Perform semantic parsing. | Converts the formatted structure obtained from the previous step into objects that can be recognized by the database. | +| 3. Rewrite the query statements. | Converts the output of the previous step into the structure that optimizes the query execution. | +| 4. Optimize the query. | Determines the execution mode of SQL statements (the execution plan) based on the result obtained from the previous step and the internal database statistics. For details about how the internal database statistics and GUC parameters affect the query optimization (execution plan), see [Optimizing Queries Using Statistics](#Optimizing Queries Using Statistics) and [Optimizing Queries Using GUC parameters](#Optimizing Queries Using GUC parameters). | +| 5. Perform the query. | Executes the SQL statements based on the execution path specified in the previous step. Selecting a proper underlying storage mode improves the query execution efficiency. For details, see [Optimizing Queries Using the Underlying Storage](#Optimizing Queries Using the Underlying Storage). | + +## Optimizing Queries Using Statistics + +The MogDB optimizer is a typical Cost-based Optimization (CBO). By using CBO, the database calculates the number of tuples and the execution cost for each step under each execution plan based on the number of table tuples, column width, null record ratio, and characteristic values, such as distinct, MCV, and HB values, and certain cost calculation methods. The database then selects the execution plan that takes the lowest cost for the overall execution or for the return of the first tuple. These characteristic values are the statistics, which is the core for optimizing a query. Accurate statistics helps the planner select the most appropriate query plan. Generally, you can collect statistics of a table or that of some columns in a table using **ANALYZE**. You are advised to periodically execute **ANALYZE** or execute it immediately after you modified most contents in a table. + +## Optimizing Queries Using GUC parameters + +Optimizing queries aims to select an efficient execution mode. + +Take the following SQL statement as an example: + +``` +select count(1) +from customer inner join store_sales on (ss_customer_sk = c_customer_sk); +``` + +During execution of **customer inner join store_sales**, MogDB supports nested loop, merge join, and hash join. The optimizer estimates the result set sizes and the execution cost for each join mode based on the statistics on the **customer** and **store_sales** tables. It then compares the costs and selects the one costing the least. + +As described in the preceding content, the execution cost is calculated based on certain methods and statistics. If the actual execution cost cannot be accurately estimated, you need to optimize the execution plan by setting the GUC parameters. + +## Optimizing Queries Using the Underlying Storage + +MogDB supports row- and column-store tables. The selection of an underlying storage mode strongly depends on specific customer service scenarios. You are advised to use column-store tables for computing service scenarios (mainly involving association and aggregation operations) and row-store tables for service scenarios, such as point queries and massive **UPDATE** or **DELETE** executions. + +Optimization methods of each storage mode will be described in detail below. + +## Optimizing Queries by Rewriting SQL Statements + +Besides the preceding methods that improve the performance of the execution plan generated by the SQL engine, database administrators can also enhance SQL statement performance by rewriting SQL statements while retaining the original service logic based on the execution mechanism of the database and abundant practices. + +This requires that database administrators know the customer services well and have professional knowledge of SQL statements. Below chapters will describe some common SQL rewriting scenarios. diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/resetting-key-parameters-during-sql-tuning.md b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/resetting-key-parameters-during-sql-tuning.md index 676c059b..68628091 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/resetting-key-parameters-during-sql-tuning.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/resetting-key-parameters-during-sql-tuning.md @@ -1,27 +1,27 @@ ---- -title: Resetting Key Parameters During SQL Tuning -summary: Resetting Key Parameters During SQL Tuning -author: Guo Huan -date: 2021-03-16 ---- - -# Resetting Key Parameters During SQL Tuning - -| Parameter/Reference Value | Description | -| :----------------------------- | :----------------------------------------------------------- | -| enable_nestloop=on | Specifies how the optimizer uses **Nest Loop Join**. If this parameter is set to **on**, the optimizer preferentially uses **Nest Loop Join**. If it is set to **off**, the optimizer preferentially uses other methods, if any.
NOTE:
If you only want to temporarily change the value of this parameter during the current database connection (that is, the current session), execute the following SQL statement:
`SET enable_nestloop to off;`
By default, this parameter is set to **on**. Change the value as required. Generally, nested loop join has the poorest performance among the three **JOIN** methods (nested loop join, merge join, and hash join). You are advised to set this parameter to **off**. | -| enable_bitmapscan=on | Specifies whether the optimizer uses bitmap scanning. If the value is **on**, bitmap scanning is used. If the value is **off**, it is not used.
NOTE:
If you only want to temporarily change the value of this parameter during the current database connection (that is, the current session), execute the following SQL statement:
`SET enable_bitmapscan to off;`
The bitmap scanning applies only in the query condition where **a > 1 and b > 1** and indexes are created on columns **a** and **b**. During performance tuning, if the query performance is poor and bitmapscan operators are in the execution plan, set this parameter to **off** and check whether the performance is improved. | -| enable_hashagg=on | Specifies whether to enable the optimizer's use of Hash-aggregation plan types. | -| enable_hashjoin=on | Specifies whether to enable the optimizer's use of Hash-join plan types. | -| enable_mergejoin=on | Specifies whether to enable the optimizer's use of Hash-merge plan types. | -| enable_indexscan=on | Specifies whether to enable the optimizer's use of index-scan plan types. | -| sql_beta_feature | Specifies whether to enable the optimizer's use of index-only-scan plan types. | -| enable_seqscan=on | Specifies whether the optimizer uses bitmap scanning. It is impossible to suppress sequential scans entirely, but setting this variable to **off** encourages the optimizer to choose other methods if available. | -| enable_sort=on | Specifies the optimizer sorts. It is impossible to fully suppress explicit sorts, but setting this variable to **off** allows the optimizer to preferentially choose other methods if available. | -| rewrite_rule | Specifies whether the optimizer enables the **LAZY\_AGG** and **MAGIC_SET** rewriting rules. | -| sql_beta_feature | Determines whether the optimizer enables the SEL_SEMI_POISSON, SEL_EXPR_INSTR, PARAM_PATH_GEN, RAND_COST_OPT, PARAM_PATH_OPT, PAGE_EST_OPT, CANONICAL_PATHKEY, PARTITION_OPFUSION, PREDPUSH_SAME_LEVEL, PARTITION_FDW_ON, DISABLE_BITMAP_COST_WITH_LOSSY_PAGES beta features. | -| var_eq_const_selectivity | Determines whether the optimizer uses histograms to calculate the integer constant selection rate. | -| partition_page_estimation | Determines whether to optimize the estimation of partitioned table page based on the pruning result. Only the partitioned table page and local index page are included, and the global index page is not included. The estimation formula is:
Number of pages after estimation = Number of pages in the partitioned table x (Number of partitions after pruning/Number of partitions) | -| partition_iterator_elimination | Determines whether to eliminate the partition iteration operator to improve execution efficiency when the partition pruning result of a partitioned table is a partition. | -| enable_functional_dependency | Determines whether to use the functional dependency statistics.
If the value is **on**:
- The statistics about multiple columns generated by ANALYZE contain functional dependency statistics.
- Functional dependency statistics are used to calculate the selection rate.
If the value is **off**:
- The statistics about multiple columns generated by ANALYZE do not contain functional dependency statistics.
- Functional dependency statistics are not used to calculate the selection rate.
description:
The concept of functional dependency comes from the relational database normal form, indicating the functional relationship between attributes. The concept of functional dependency statistics extends the preceding concept. It indicates the ratio of the data volume that meets the functional relationship to the total data volume.
Functional dependency statistics are a type of multi-column statistics, which can be used to improve the accuracy of selection rate estimation. Functional dependency statistics are applicable to the "where a = 1 and b = 1" format. The a and b must be attributes of the same table. The constraint is an equation constraint. The constraint is connected by AND. There are at least two constraints. | -| enable_seqscan_fusion | Determines whether to enable seqscan background noise elimination. | +--- +title: Resetting Key Parameters During SQL Tuning +summary: Resetting Key Parameters During SQL Tuning +author: Guo Huan +date: 2021-03-16 +--- + +# Resetting Key Parameters During SQL Tuning + +| Parameter/Reference Value | Description | +| :----------------------------- | :----------------------------------------------------------- | +| enable_nestloop=on | Specifies how the optimizer uses **Nest Loop Join**. If this parameter is set to **on**, the optimizer preferentially uses **Nest Loop Join**. If it is set to **off**, the optimizer preferentially uses other methods, if any.
NOTE:
If you only want to temporarily change the value of this parameter during the current database connection (that is, the current session), execute the following SQL statement:
`SET enable_nestloop to off;`
By default, this parameter is set to **on**. Change the value as required. Generally, nested loop join has the poorest performance among the three **JOIN** methods (nested loop join, merge join, and hash join). You are advised to set this parameter to **off**. | +| enable_bitmapscan=on | Specifies whether the optimizer uses bitmap scanning. If the value is **on**, bitmap scanning is used. If the value is **off**, it is not used.
NOTE:
If you only want to temporarily change the value of this parameter during the current database connection (that is, the current session), execute the following SQL statement:
`SET enable_bitmapscan to off;`
The bitmap scanning applies only in the query condition where **a > 1 and b > 1** and indexes are created on columns **a** and **b**. During performance tuning, if the query performance is poor and bitmapscan operators are in the execution plan, set this parameter to **off** and check whether the performance is improved. | +| enable_hashagg=on | Specifies whether to enable the optimizer's use of Hash-aggregation plan types. | +| enable_hashjoin=on | Specifies whether to enable the optimizer's use of Hash-join plan types. | +| enable_mergejoin=on | Specifies whether to enable the optimizer's use of Hash-merge plan types. | +| enable_indexscan=on | Specifies whether to enable the optimizer's use of index-scan plan types. | +| sql_beta_feature | Specifies whether to enable the optimizer's use of index-only-scan plan types. | +| enable_seqscan=on | Specifies whether the optimizer uses bitmap scanning. It is impossible to suppress sequential scans entirely, but setting this variable to **off** encourages the optimizer to choose other methods if available. | +| enable_sort=on | Specifies the optimizer sorts. It is impossible to fully suppress explicit sorts, but setting this variable to **off** allows the optimizer to preferentially choose other methods if available. | +| rewrite_rule | Specifies whether the optimizer enables the **LAZY\_AGG** and **MAGIC_SET** rewriting rules. | +| sql_beta_feature | Determines whether the optimizer enables the SEL_SEMI_POISSON, SEL_EXPR_INSTR, PARAM_PATH_GEN, RAND_COST_OPT, PARAM_PATH_OPT, PAGE_EST_OPT, CANONICAL_PATHKEY, PARTITION_OPFUSION, PREDPUSH_SAME_LEVEL, PARTITION_FDW_ON, DISABLE_BITMAP_COST_WITH_LOSSY_PAGES beta features. | +| var_eq_const_selectivity | Determines whether the optimizer uses histograms to calculate the integer constant selection rate. | +| partition_page_estimation | Determines whether to optimize the estimation of partitioned table page based on the pruning result. Only the partitioned table page and local index page are included, and the global index page is not included. The estimation formula is:
Number of pages after estimation = Number of pages in the partitioned table x (Number of partitions after pruning/Number of partitions) | +| partition_iterator_elimination | Determines whether to eliminate the partition iteration operator to improve execution efficiency when the partition pruning result of a partitioned table is a partition. | +| enable_functional_dependency | Determines whether to use the functional dependency statistics.
If the value is **on**:
- The statistics about multiple columns generated by ANALYZE contain functional dependency statistics.
- Functional dependency statistics are used to calculate the selection rate.
If the value is **off**:
- The statistics about multiple columns generated by ANALYZE do not contain functional dependency statistics.
- Functional dependency statistics are not used to calculate the selection rate.
description:
The concept of functional dependency comes from the relational database normal form, indicating the functional relationship between attributes. The concept of functional dependency statistics extends the preceding concept. It indicates the ratio of the data volume that meets the functional relationship to the total data volume.
Functional dependency statistics are a type of multi-column statistics, which can be used to improve the accuracy of selection rate estimation. Functional dependency statistics are applicable to the "where a = 1 and b = 1" format. The a and b must be attributes of the same table. The constraint is an equation constraint. The constraint is connected by AND. There are at least two constraints. | +| enable_seqscan_fusion | Determines whether to enable seqscan background noise elimination. | diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/reviewing-and-modifying-a-table-definition.md b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/reviewing-and-modifying-a-table-definition.md index 05a4a656..40e4a5e1 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/reviewing-and-modifying-a-table-definition.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/reviewing-and-modifying-a-table-definition.md @@ -1,64 +1,64 @@ ---- -title: Reviewing and Modifying a Table Definition -summary: Reviewing and Modifying a Table Definition -author: Guo Huan -date: 2021-03-16 ---- - -# Reviewing and Modifying a Table Definition - -## Overview - -To properly define a table, you must: - -1. **Reduce the data volume scanned** by using the partition pruning mechanism. -2. **Minimize random I/Os** by using clustering or partial clustering. - -The table definition is created during the database design and is reviewed and modified during the SQL statement optimization. - -## Selecting a Storage Model - -During database design, some key factors about table design will greatly affect the subsequent query performance of the database. Table design affects data storage as well. Scientific table design reduces I/O operations and minimizes memory usage, improving the query performance. - -Selecting a model for table storage is the first step of table definition. Select a proper storage model for your service based on the following table. - -| **Storage Model** | **Application Scenario** | -| :---------------- | :----------------------------------------------------------- | -| Row storage | Point queries (simple index-based queries that only return a few records)
Scenarios requiring frequent addition, deletion, and modification operations | -| Column storage | Statistics analysis query, in which operations, such as group and join, are performed many times | - -## Using PCKs - -1. The PCK is the column-store-based technology. It can minimize or maximize sparse indexes to quickly filter base tables. You are advised to select a maximum of two columns as PCKs. Use the following principles to specify PCKs: - 1. The selected PCKs must be restricted by simple expressions in base tables. Such constraints are usually represented by **col op const**, in which **col** indicates the column name, **op** indicates operators, (including =, >, >=, <=, and <), and **const** indicates constants. - 2. Select columns that are frequently selected (to filter much more undesired data) in simple expressions. - 3. List the less frequently selected columns on the top. - 4. List the columns of the enumerated type at the top. - -## Using Partitioned Tables - -Partitioning refers to splitting what is logically one large table into smaller physical pieces based on specific schemes. The table based on the logic is called a partitioned table, and a physical piece is called a partition. Data is stored in physical partitions not the logical table. A partitioned table has the following advantages over an ordinary table: - -1. High query performance: You can specify partitions when querying partitioned tables, improving query efficiency. -2. High availability: If a certain partition in a partitioned table is faulty, data in the other partitions is still available. -3. Easy maintenance: To fix a partitioned table having a faulty partition, you only need to fix the partition. - -MogDB supports range partitioned tables. - -Range partitioned table: Data in different ranges is mapped to different partitions. The range is determined by the partition key specified during the partitioned table creation. The partition key is usually a date. For example, sales data is partitioned by month. - -## Selecting a Data Type - -Use the following principles to select efficient data types: - -1. **Select data types that facilitate data calculation.** - - Generally, the calculation of integers (including common comparison calculations, such as =, >, <, ≥, ≤, and ≠ and group by) is more efficient than that of strings and floating point numbers. For example, if you need to perform a point query on a column-store table whose numeric column is used as a filter condition, the query will take over 10s. If you change the data type from **NUMERIC** to **INT**, the query duration will be reduced to 1.8s. - -2. **Select data types with a short length.** - - Data types with short length reduce both the data file size and the memory used for computing, improving the I/O and computing performance. For example, use **SMALLINT** instead of **INT**, and **INT** instead of **BIGINT**. - -3. **Use the same data type for a join.** - - You are advised to use the same data type for a join. To join columns with different data types, the database needs to convert them to the same type, which leads to additional performance overheads. +--- +title: Reviewing and Modifying a Table Definition +summary: Reviewing and Modifying a Table Definition +author: Guo Huan +date: 2021-03-16 +--- + +# Reviewing and Modifying a Table Definition + +## Overview + +To properly define a table, you must: + +1. **Reduce the data volume scanned** by using the partition pruning mechanism. +2. **Minimize random I/Os** by using clustering or partial clustering. + +The table definition is created during the database design and is reviewed and modified during the SQL statement optimization. + +## Selecting a Storage Model + +During database design, some key factors about table design will greatly affect the subsequent query performance of the database. Table design affects data storage as well. Scientific table design reduces I/O operations and minimizes memory usage, improving the query performance. + +Selecting a model for table storage is the first step of table definition. Select a proper storage model for your service based on the following table. + +| **Storage Model** | **Application Scenario** | +| :---------------- | :----------------------------------------------------------- | +| Row storage | Point queries (simple index-based queries that only return a few records)
Scenarios requiring frequent addition, deletion, and modification operations | +| Column storage | Statistics analysis query, in which operations, such as group and join, are performed many times | + +## Using PCKs + +1. The PCK is the column-store-based technology. It can minimize or maximize sparse indexes to quickly filter base tables. You are advised to select a maximum of two columns as PCKs. Use the following principles to specify PCKs: + 1. The selected PCKs must be restricted by simple expressions in base tables. Such constraints are usually represented by **col op const**, in which **col** indicates the column name, **op** indicates operators, (including =, >, >=, <=, and <), and **const** indicates constants. + 2. Select columns that are frequently selected (to filter much more undesired data) in simple expressions. + 3. List the less frequently selected columns on the top. + 4. List the columns of the enumerated type at the top. + +## Using Partitioned Tables + +Partitioning refers to splitting what is logically one large table into smaller physical pieces based on specific schemes. The table based on the logic is called a partitioned table, and a physical piece is called a partition. Data is stored in physical partitions not the logical table. A partitioned table has the following advantages over an ordinary table: + +1. High query performance: You can specify partitions when querying partitioned tables, improving query efficiency. +2. High availability: If a certain partition in a partitioned table is faulty, data in the other partitions is still available. +3. Easy maintenance: To fix a partitioned table having a faulty partition, you only need to fix the partition. + +MogDB supports range partitioned tables. + +Range partitioned table: Data in different ranges is mapped to different partitions. The range is determined by the partition key specified during the partitioned table creation. The partition key is usually a date. For example, sales data is partitioned by month. + +## Selecting a Data Type + +Use the following principles to select efficient data types: + +1. **Select data types that facilitate data calculation.** + + Generally, the calculation of integers (including common comparison calculations, such as =, >, <, ≥, ≤, and ≠ and group by) is more efficient than that of strings and floating point numbers. For example, if you need to perform a point query on a column-store table whose numeric column is used as a filter condition, the query will take over 10s. If you change the data type from **NUMERIC** to **INT**, the query duration will be reduced to 1.8s. + +2. **Select data types with a short length.** + + Data types with short length reduce both the data file size and the memory used for computing, improving the I/O and computing performance. For example, use **SMALLINT** instead of **INT**, and **INT** instead of **BIGINT**. + +3. **Use the same data type for a join.** + + You are advised to use the same data type for a join. To join columns with different data types, the database needs to convert them to the same type, which leads to additional performance overheads. diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/sql-tuning-optimizer.md b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/sql-tuning-optimizer.md deleted file mode 100644 index 2833a223..00000000 --- a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/sql-tuning-optimizer.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Optimizer -summary: Optimizer -author: zhang cuiping -date: 2023-08-11 ---- - -# Optimizer - -Optimizer aims to create an optimized execution plan. A certain SQL query (query tree) can be actually executed in multiple modes, in which the same result set will generate. If possible, a query optimizer may check each possible execution plan and finally choose the one run in highest peed. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**Note**: In some cases, checking all possible execution modes of a query may cost much time and memory, especially for some queries in execution that involve a large number of connection operations. Optimizer is used to judge a proper query plan instead of an optimized query plan in proper time. - -An optimizer works with the data structure called paths. The data structure is a planned simplified version, including necessary information used for decisions making by an optimizer. After finding an economical path, it will make an integrated plan tree to pass it to an executor. It includes enough details, that is the plans to be executed, which can be read by the executor and run. In the following sections, the distinctions between paths and plans are ignored. - -## A Possible Plan Generated - -The optimizer generates a plan for each relation (table) that appears in the scan query. The possible plans are determined by indexes available for each relation. A sequential search is always possible once for a relation, so a sequential search plan is always created. Suppose there is an index defined on a relation (such as a BTree index) and a query contains the constraint `relation.attribute OPR constant`. If `relation.attribute` happens to match a keyword in a BTree index and `OPR` is one of the operators listed in the operator class of the index, then another plan will be created that scans the relation using the B-tree index. If there are other indexes, and the constraints in the query match the keywords in that index, more plans will be generated. - -If the query asks to link two or more relations, the plan for the linked relation will not be considered until all feasible plans have been found while scanning a single relation. There are three possible join strategies: - -- Nested loop join: The right relation is scanned once for each row found in the left relation. This strategy is easy to implement, but can be time-consuming. However, if the right relation can be scanned with an index, then this might be a good strategy. The index scan of the right relation can be performed using the number from the current row of the left relation as the key. -- Merge join: Before the join starts, each relation sorts the join fields. The two relations are then scanned concurrently, and the matching rows are combined to form the join row. This union is more attractive because each relation is scanned only once. The required sorting step can be done either by an explicit sorting step or by scanning the relations in the appropriate order using the index on the join key. -- Hash join: First the right relation is scanned and loaded into a Hash table with the joined field as a hash key, then the left relation is scanned and each row found is used as a hash key to locate the matching row in the table. - -If there are more than two relations in a query, the final result must be built through a tree of join steps, each with two inputs. The optimizer examines the possible join orders and finds the one with the least cost. - -If the query uses fewer relations than geqo_threshold, a near-exhaustive search is run in order to find the best access sequence. The planner will prefer to connect between any two relationships that have a corresponding entry clause in the `WHERE` qualification (eg. there is a relationship like `where rel1.attr1=rel2.attr2`). Consider joining only when there is no other choices. There is no join clause for a particular relation, that is, no join clause is available for another relation. The planner thinks of all possible plans for each joining pair, and one of the criteria for choosing a plan is (presumably) to choose the cheapest one. - -The finished query tree consists of a sequential or index scan of the underlying relation, plus nested loop, merge, and hash join nodes as needed, plus any auxiliary steps needed, such as sort nodes or aggregate function calculation nodes. Most of these planning node types have additional *selection* (discard rows that don't match a specified boolean condition) and *projection* (compute a derived set of fields based on the given field values, that is to compute a scalar expression if needed). One responsibility of the optimizer is to attach a selection condition from the `WHERE` clause and the output expression needed to compute the most appropriate node for the plan tree. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/sql-tuning.md b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/sql-tuning.md index 34d0be86..7690c5a2 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/sql-tuning.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/sql-tuning.md @@ -1,19 +1,18 @@ ---- -title: SQL Optimization -summary: SQL Optimization -author: zhang cuiping -date: 2023-08-11 ---- - -# SQL Optimization - -- **[Optimizer](sql-tuning-optimizer.md)** -- **[Query Execution Process](query-execution-process.md)** -- **[Introduction to the SQL Execution Plan](introduction-to-the-sql-execution-plan.md)** -- **[Tuning Process](tuning-process.md)** -- **[Updating Statistics](updating-statistics.md)** -- **[Reviewing and Modifying a Table Definition](reviewing-and-modifying-a-table-definition.md)** -- **[Typical SQL Optimization Methods](typical-sql-optimization-methods.md)** -- **[Experience in Rewriting SQL Statements](experience-in-rewriting-sql-statements.md)** -- **[Resetting Key Parameters During SQL Tuning](resetting-key-parameters-during-sql-tuning.md)** +--- +title: SQL Optimization +summary: SQL Optimization +author: zhang cuiping +date: 2023-08-11 +--- + +# SQL Optimization + +- **[Query Execution Process](query-execution-process.md)** +- **[Introduction to the SQL Execution Plan](introduction-to-the-sql-execution-plan.md)** +- **[Tuning Process](tuning-process.md)** +- **[Updating Statistics](updating-statistics.md)** +- **[Reviewing and Modifying a Table Definition](reviewing-and-modifying-a-table-definition.md)** +- **[Typical SQL Optimization Methods](typical-sql-optimization-methods.md)** +- **[Experience in Rewriting SQL Statements](experience-in-rewriting-sql-statements.md)** +- **[Resetting Key Parameters During SQL Tuning](resetting-key-parameters-during-sql-tuning.md)** - **[Hint-based Tuning](hint-based-tuning.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/tuning-process.md b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/tuning-process.md index 13a7e658..eb35ffac 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/tuning-process.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/tuning-process.md @@ -1,33 +1,33 @@ ---- -title: Tuning Process -summary: Tuning Process -author: Guo Huan -date: 2021-03-16 ---- - -# Tuning Process - -You can analyze slow SQL statements to optimize them. - -## Collect all table statistics associated with the SQL statements - -In a database, statistics indicate the source data of a plan generated by a planner. If no statistics are available or out of date, the execution plan may seriously deteriorate, leading to low performance. According to past experience, about 10% performance problems occurred because no statistics are collected. For details, see Updating Statistics. - -## View the execution plan to find out the cause - -If the SQL statements have been running for a long period of time and not ended, run the **EXPLAIN** statement to view the execution plan and then locate the fault. If the SQL statement has been properly executed, run the **EXPLAIN ANALYZE** or **EXPLAIN PERFORMANCE** statement to check the execution plan and information to locate the fault. For details about the execution plan, see Introduction to the SQL Execution Plan. - -## Locate the specific cause of slow SQL statements and improvement measures for EXPLAIN or EXPLAIN PERFORMANCE statement - -It can be analyzed in four ways: - -- SQL Self-Diagnosis - -- Subquery Tuning -- Statistics Tuning - -- Operator-level tuning - -## Generally, some SQL statements can be converted to its equivalent statements in all or certain scenarios by rewriting querie - -SQL statements are simpler after they are rewritten. Some execution steps can be simplified to improve the performance. Query rewriting methods are universal in all databases. +--- +title: Tuning Process +summary: Tuning Process +author: Guo Huan +date: 2021-03-16 +--- + +# Tuning Process + +You can analyze slow SQL statements to optimize them. + +## Collect all table statistics associated with the SQL statements + +In a database, statistics indicate the source data of a plan generated by a planner. If no statistics are available or out of date, the execution plan may seriously deteriorate, leading to low performance. According to past experience, about 10% performance problems occurred because no statistics are collected. For details, see Updating Statistics. + +## View the execution plan to find out the cause + +If the SQL statements have been running for a long period of time and not ended, run the **EXPLAIN** statement to view the execution plan and then locate the fault. If the SQL statement has been properly executed, run the **EXPLAIN ANALYZE** or **EXPLAIN PERFORMANCE** statement to check the execution plan and information to locate the fault. For details about the execution plan, see Introduction to the SQL Execution Plan. + +## Locate the specific cause of slow SQL statements and improvement measures for EXPLAIN or EXPLAIN PERFORMANCE statement + +It can be analyzed in four ways: + +- SQL Self-Diagnosis + +- Subquery Tuning +- Statistics Tuning + +- Operator-level tuning + +## Generally, some SQL statements can be converted to its equivalent statements in all or certain scenarios by rewriting querie + +SQL statements are simpler after they are rewritten. Some execution steps can be simplified to improve the performance. Query rewriting methods are universal in all databases. diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/updating-statistics.md b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/updating-statistics.md index 15826f49..4f48392a 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/updating-statistics.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/sql-tuning/updating-statistics.md @@ -1,54 +1,54 @@ ---- -title: Updating Statistics -summary: Updating Statistics -author: Guo Huan -date: 2021-03-16 ---- - -# Updating Statistics - -In a database, statistics indicate the source data of a plan generated by a planner. If no statistics are available or out of date, the execution plan may seriously deteriorate, leading to low performance. - -## Background - -The **ANALYZE** statement collects statistic about table contents in databases, which will be stored in the **PG_STATISTIC** system catalog. Then, the query optimizer uses the statistics to work out the most efficient execution plan. - -After executing batch insertions and deletions, you are advised to run the **ANALYZE** statement on the table or the entire library to update statistics. By default, 30,000 rows of statistics are sampled. That is, the default value of the GUC parameter **default_statistics_target** is **100**. If the total number of rows in the table exceeds 1,600,000, you are advised to set **default_statistics_target** to **-2**, indicating that 2% of the statistics are collected. - -For an intermediate table generated during the execution of a batch script or stored procedure, you also need to run the **ANALYZE** statement. - -If there are multiple inter-related columns in a table and the conditions or grouping operations based on these columns are involved in the query, collect statistics about these columns so that the query optimizer can accurately estimate the number of rows and generate an effective execution plan. - -## Procedure - -Run the following commands to update the statistics about a table or the entire database: - -```sql ---Update statistics about a table. -ANALYZE tablename; ----Update statistics about the entire database. -ANALYZE; -``` - -Run the following statements to perform statistics-related operations on multiple columns: - -```sql ---Collect statistics about column_1 and column_2 of tablename. -ANALYZE tablename ((column_1, column_2)); - ---Declare statistics about column_1 and column_2 of tablename. -ALTER TABLE tablename ADD STATISTICS ((column_1, column_2)); - ---Collect statistics about one or more columns. -ANALYZE tablename; - ---Delete statistics about column_1 and column_2 of tablename or their statistics declaration. -ALTER TABLE tablename DELETE STATISTICS ((column_1, column_2)); -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> After the statistics are declared for multiple columns by running the **ALTER TABLE** **tablename** **ADD STATISTICS** statement, the system collects the statistics about these columns next time **ANALYZE** is performed on the table or the entire database. -> To collect the statistics, run the **ANALYZE** statement. -> -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> Use **EXPLAIN** to show the execution plan of each SQL statement. If **rows=10** (the default value, probably indicating that the table has not been analyzed) is displayed in the **SEQ SCAN** output of a table, run the **ANALYZE** statement for this table. +--- +title: Updating Statistics +summary: Updating Statistics +author: Guo Huan +date: 2021-03-16 +--- + +# Updating Statistics + +In a database, statistics indicate the source data of a plan generated by a planner. If no statistics are available or out of date, the execution plan may seriously deteriorate, leading to low performance. + +## Background + +The **ANALYZE** statement collects statistic about table contents in databases, which will be stored in the **PG_STATISTIC** system catalog. Then, the query optimizer uses the statistics to work out the most efficient execution plan. + +After executing batch insertions and deletions, you are advised to run the **ANALYZE** statement on the table or the entire library to update statistics. By default, 30,000 rows of statistics are sampled. That is, the default value of the GUC parameter **default_statistics_target** is **100**. If the total number of rows in the table exceeds 1,600,000, you are advised to set **default_statistics_target** to **-2**, indicating that 2% of the statistics are collected. + +For an intermediate table generated during the execution of a batch script or stored procedure, you also need to run the **ANALYZE** statement. + +If there are multiple inter-related columns in a table and the conditions or grouping operations based on these columns are involved in the query, collect statistics about these columns so that the query optimizer can accurately estimate the number of rows and generate an effective execution plan. + +## Procedure + +Run the following commands to update the statistics about a table or the entire database: + +```sql +--Update statistics about a table. +ANALYZE tablename; +---Update statistics about the entire database. +ANALYZE; +``` + +Run the following statements to perform statistics-related operations on multiple columns: + +```sql +--Collect statistics about column_1 and column_2 of tablename. +ANALYZE tablename ((column_1, column_2)); + +--Declare statistics about column_1 and column_2 of tablename. +ALTER TABLE tablename ADD STATISTICS ((column_1, column_2)); + +--Collect statistics about one or more columns. +ANALYZE tablename; + +--Delete statistics about column_1 and column_2 of tablename or their statistics declaration. +ALTER TABLE tablename DELETE STATISTICS ((column_1, column_2)); +``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> After the statistics are declared for multiple columns by running the **ALTER TABLE** **tablename** **ADD STATISTICS** statement, the system collects the statistics about these columns next time **ANALYZE** is performed on the table or the entire database. +> To collect the statistics, run the **ANALYZE** statement. +> +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> Use **EXPLAIN** to show the execution plan of each SQL statement. If **rows=10** (the default value, probably indicating that the table has not been analyzed) is displayed in the **SEQ SCAN** output of a table, run the **ANALYZE** statement for this table. diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/configuring-llvm.md b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/configuring-llvm.md index 56bb284f..3b5b9250 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/configuring-llvm.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/configuring-llvm.md @@ -1,75 +1,75 @@ ---- -title: Configuring LLVM -summary: Configuring LLVM -author: Liu Xu -date: 2021-03-04 ---- - -# Configuring LLVM - -Low Level Virtual Machine (LLVM) dynamic compilation can be used to generate customized machine code for each query to replace original common functions. Query performance is improved by reducing redundant judgment conditions and virtual function calls, and by making local data more accurate during actual queries. - -LLVM needs to consume extra MogDB to pre-generate intermediate representation (IR) and compile it into codes. Therefore, if the data volume is small or if a query itself consumes less MogDB, the performance deteriorates. - -## LLVM Application Scenarios and Restrictions - -### Application Scenarios - -- Expressions supporting LLVM - - The query statements that contain the following expressions support LLVM optimization: - - 1. Case…when… - 2. IN - 3. Bool (AND/OR/NOT) - 4. BooleanTest (IS_NOT_KNOWN/IS_UNKNOWN/IS_TRUE/IS_NOT_TRUE/IS_FALSE/IS_NOT_FALSE) - 5. NullTest (IS_NOT_NULL/IS_NULL) - 6. Operator - 7. Function - 8. Nullif - - Supported data types for expression computing are bool, tinyint, smallint, int, bigint, float4, float8, numeric, date, MogDB, MogDBtz, MogDBstamp, MogDBstamptz, interval, bpchar, varchar, text, and oid. - - Consider using LLVM dynamic compilation and optimization only if expressions are used in the following content in a vectorized executor: **filter** in the **Scan** node; **complicate hash condition**, **hash join filter**, and **hash join target** in the **Hash Join** node; **filter** and **join filter** in the **Nested Loop** node; **merge join filter** and **merge join target** in the **Merge Join** node; and **filter** in the **Group** node. - -- Operators supporting LLVM - - 1. Join: HashJoin - 2. Agg: HashAgg - 3. Sort - - Where HashJoin supports only Hash Inner Join, and the corresponding hash cond supports comparisons between int4, bigint, and bpchar. HashAgg supports sum and avg operations of bigint and numeric data types. Group By statements supports int4, bigint, bpchar, text, varchar, MogDBstamp, and count(*) aggregation operation. Sort supports only comparisons between int4, bigint, numeric, bpchar, text, and varchar data types. Except the preceding operations, LLVM dynamic compilation and optimization cannot be used. You can use the explain performance tool to check whether LLVM dynamic compilation and optimization can be used. - -### Non-applicable Scenarios - -- Tables that have small amount of data cannot be dynamically compiled. -- Query jobs with a non-vectorized execution path cannot be generated. - -## Other Factors Affecting LLVM Performance - -The LLVM optimization effect depends on not only operations and computing in the database, but also the selected hardware environment. - -- Number of C functions called by expressions - - CodeGen cannot be used in all expressions in an entire expression, that is, some expressions use CodeGen while others invoke original C codes for calculation. In an entire expression, if more expressions invoke original C codes, LLVM dynamic compilation and optimization may reduce the calculation performance. By setting **log_min_message** to **DEBUG1**, you can view expressions that directly invoke C codes. - -- Memory resources - - One of the key LLVM features is to ensure the locality of data, that is, data should be stored in registers as much as possible. Data loading should be reduced at the same MogDB. Therefore, when using LLVM optimization, value of **work_mem** must be set as large as required to ensure that codes are processed in the memory using corresponding LLVM. Otherwise, performance deteriorates. - -- Optimizer cost estimation - - The LLVM feature realizes a simple cost estimation model. You can determine whether to use LLVM dynamic compilation and optimization for the current node based on the tables involved in the node computing. If the optimizer understates or overestimates the actual number of rows involved, the income cannot be obtained. - -## Recommended Suggestions for LLVM - -Currently, the LLVM is enabled by default in the database kernel, and users can perform related configurations on it. The overall suggestions are as follows: - -1. Set **work_mem** to an appropriate value and set it to a large value in allowed conditions. If much data is spilled to disks, you are advised to disable the LLVM dynamic compilation and optimization by setting **enable_codegen** to **off**). - -2. Set **codegen_cost_threshold** to an appropriate value (The default value is **10000**). Ensure that LLVM dynamic compilation and optimization is not used when the data volume is small. After the value of **codegen_cost_threshold** is set, the database performance may deteriorate due to the use of LLVM dynamic compilation and optimization. In this case, you are advised to increase the parameter value. - -3. If a large number of C functions are called, you are advised not to use the LLVM dynamic compilation and optimization. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > If resources are robust, the larger the data volume is, the better the performance improvement effect is. +--- +title: Configuring LLVM +summary: Configuring LLVM +author: Liu Xu +date: 2021-03-04 +--- + +# Configuring LLVM + +Low Level Virtual Machine (LLVM) dynamic compilation can be used to generate customized machine code for each query to replace original common functions. Query performance is improved by reducing redundant judgment conditions and virtual function calls, and by making local data more accurate during actual queries. + +LLVM needs to consume extra MogDB to pre-generate intermediate representation (IR) and compile it into codes. Therefore, if the data volume is small or if a query itself consumes less MogDB, the performance deteriorates. + +## LLVM Application Scenarios and Restrictions + +### Application Scenarios + +- Expressions supporting LLVM + + The query statements that contain the following expressions support LLVM optimization: + + 1. Case…when… + 2. IN + 3. Bool (AND/OR/NOT) + 4. BooleanTest (IS_NOT_KNOWN/IS_UNKNOWN/IS_TRUE/IS_NOT_TRUE/IS_FALSE/IS_NOT_FALSE) + 5. NullTest (IS_NOT_NULL/IS_NULL) + 6. Operator + 7. Function + 8. Nullif + + Supported data types for expression computing are bool, tinyint, smallint, int, bigint, float4, float8, numeric, date, MogDB, MogDBtz, MogDBstamp, MogDBstamptz, interval, bpchar, varchar, text, and oid. + + Consider using LLVM dynamic compilation and optimization only if expressions are used in the following content in a vectorized executor: **filter** in the **Scan** node; **complicate hash condition**, **hash join filter**, and **hash join target** in the **Hash Join** node; **filter** and **join filter** in the **Nested Loop** node; **merge join filter** and **merge join target** in the **Merge Join** node; and **filter** in the **Group** node. + +- Operators supporting LLVM + + 1. Join: HashJoin + 2. Agg: HashAgg + 3. Sort + + Where HashJoin supports only Hash Inner Join, and the corresponding hash cond supports comparisons between int4, bigint, and bpchar. HashAgg supports sum and avg operations of bigint and numeric data types. Group By statements supports int4, bigint, bpchar, text, varchar, MogDBstamp, and count(*) aggregation operation. Sort supports only comparisons between int4, bigint, numeric, bpchar, text, and varchar data types. Except the preceding operations, LLVM dynamic compilation and optimization cannot be used. You can use the explain performance tool to check whether LLVM dynamic compilation and optimization can be used. + +### Non-applicable Scenarios + +- Tables that have small amount of data cannot be dynamically compiled. +- Query jobs with a non-vectorized execution path cannot be generated. + +## Other Factors Affecting LLVM Performance + +The LLVM optimization effect depends on not only operations and computing in the database, but also the selected hardware environment. + +- Number of C functions called by expressions + + CodeGen cannot be used in all expressions in an entire expression, that is, some expressions use CodeGen while others invoke original C codes for calculation. In an entire expression, if more expressions invoke original C codes, LLVM dynamic compilation and optimization may reduce the calculation performance. By setting **log_min_message** to **DEBUG1**, you can view expressions that directly invoke C codes. + +- Memory resources + + One of the key LLVM features is to ensure the locality of data, that is, data should be stored in registers as much as possible. Data loading should be reduced at the same MogDB. Therefore, when using LLVM optimization, value of **work_mem** must be set as large as required to ensure that codes are processed in the memory using corresponding LLVM. Otherwise, performance deteriorates. + +- Optimizer cost estimation + + The LLVM feature realizes a simple cost estimation model. You can determine whether to use LLVM dynamic compilation and optimization for the current node based on the tables involved in the node computing. If the optimizer understates or overestimates the actual number of rows involved, the income cannot be obtained. + +## Recommended Suggestions for LLVM + +Currently, the LLVM is enabled by default in the database kernel, and users can perform related configurations on it. The overall suggestions are as follows: + +1. Set **work_mem** to an appropriate value and set it to a large value in allowed conditions. If much data is spilled to disks, you are advised to disable the LLVM dynamic compilation and optimization by setting **enable_codegen** to **off**). + +2. Set **codegen_cost_threshold** to an appropriate value (The default value is **10000**). Ensure that LLVM dynamic compilation and optimization is not used when the data volume is small. After the value of **codegen_cost_threshold** is set, the database performance may deteriorate due to the use of LLVM dynamic compilation and optimization. In this case, you are advised to increase the parameter value. + +3. If a large number of C functions are called, you are advised not to use the LLVM dynamic compilation and optimization. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > If resources are robust, the larger the data volume is, the better the performance improvement effect is. diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/configuring-smp.md b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/configuring-smp.md index 56be1940..88e383ae 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/configuring-smp.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/configuring-smp.md @@ -1,111 +1,111 @@ ---- -title: Configuring SMP -summary: Configuring SMP -author: Guo Huan -date: 2021-07-26 ---- - -# Configuring SMP - -This section describes the usage restrictions, application scenarios, and configuration guide of symmetric multiprocessing (SMP). - -## SMP Application Scenarios and Restrictions - -### Context - -The SMP feature improves the performance through operator parallelism and occupies more system resources, including CPU, memory, and I/O. Actually, SMP is a method consuming resources to save time. It improves system performance in appropriate scenarios and when resources are sufficient, but may deteriorate performance otherwise. SMP applies to analytical query scenarios where a single query takes a long time and the service concurrency is low. The SMP parallel technology can reduce the query delay and improve the system throughput performance. However, in a high transactional concurrency scenario, a single query has a short delay. In this case, using a multi-thread parallel technology increases the query delay and reduces the system throughput performance. - -### Application Scenarios - -- Operators that support parallelism. The plan contains the following operators that support parallelism. - - - Scan: Row-store ordinary tables and row-store partitioned tables, and column-store ordinary tables and column-store partitioned tables can be sequentially scanned. - - Join: HashJoin and NestLoop - - Agg: HashAgg, SortAgg, PlainAgg, and WindowAgg (which supports only **partition by**, and does not support **order by**) - - Stream: Local Redistribute and Local Broadcast - - Others: Result, Subqueryscan, Unique, Material, Setop, Append, VectoRow, and RowToVec - -- SMP-specific operators: To execute queries in parallel, Stream operators are added for data exchange of the SMP feature. These new operators can be considered as the subtypes of Stream operators. - - - Local Gather aggregates data of parallel threads within an instance. - - Local Redistribute redistributes data based on the distributed key across threads within an instance. - - Local Broadcast broadcasts data to each thread within an instance. - - Local RoundRobin distributes data in polling mode across threads within an instance. - -- The following uses the **TPCH Q1** parallel plan as an example. - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/configuring-smp-1.png) - - In this plan, the Scan and HashAgg operators are processed in parallel, and the Local Gather operator is added for data exchange. Operator 3 is a Local Gather operator. "dop: 1⁄4" indicates that the degree of parallelism of the sender thread is 4 and the degree of parallelism of the receiver thread is 1. That is, the lower-layer HashAggregate operator 4 is executed based on the degree of parallelism 4, the upper-layer operators 1 and 2 are executed in serial mode, and operator 3 aggregates data of parallel threads within the instance. - - You can view the parallelism situation of each operator in the dop information. - -### Non-applicable Scenarios - -1. Index scanning cannot be executed in parallel. -2. MergeJoin cannot be executed in parallel. -3. WindowAgg order by cannot be executed in parallel. -4. The cursor cannot be executed in parallel. -5. Queries in stored procedures and functions cannot be executed in parallel. -6. Subplans and initplans cannot be queried in parallel, and operators that contain subqueries cannot be executed in parallel, either. -7. Query statements that contain the median operation cannot be executed in parallel. -8. Queries with global temporary tables cannot be executed in parallel. -9. Updating materialized views cannot be executed in parallel. - -## Resource Impact on SMP Performance - -The SMP architecture uses abundant resources to obtain time. After the plan parallelism is executed, the resource consumption is added, including the CPU, memory, and I/O resources. As the parallelism degree is expanded, the resource consumption increases. If these resources become a bottleneck, the SMP cannot improve the performance and the overall cluster performance may be deteriorated. The following information describes the situations that the SMP affects theses resources: - -- CPU resources - - In a general customer scenario, the system CPU usage is not high. Using the SMP parallelism architecture will fully use the CPU resources to improve the system performance. If the number of CPU kernels of the database server is too small and the CPU usage is already high, enabling the SMP parallelism may deteriorate the system performance due to resource competition between multiple threads. - -- Memory resources - - Query parallel causes memory usage growth, but the memory usage of each operator is still restricted by **work_mem** and other parameters. Assuming that **work_mem** is 4 GB and the degree of parallelism is 2, the memory usage of each thread in parallel is limited to 2 GB. When **work_mem** is small or the system memory is not sufficient, using SMP may flush data to disks. As a result, the query performance deteriorates. - -- I/O resources - - A parallel scan increases I/O resource consumption. It can improve scan performance only when I/O resources are sufficient. - -## Other Factors Affecting SMP Performance - -Besides resource factors, there are other factors that impact the SMP performance, such as uneven data distribution in a partitioned table and system parallelism degree. - -- Impact of data skew on SMP performance - - Severe data skew deteriorates SMP performance. For example, if the data volume of a value in the join column is much more than that of other values, the data volume of a parallel thread will be much more than that of others after Hash-based data redistribution, resulting in the long-tail issue and poor SMP performance. - -- Impact of system parallelism degree on the SMP performance - - The SMP feature uses more resources, and remaining resources are insufficient in a high concurrency scenario. Therefore, enabling the SMP function will result in severe resource competition among queries. Once resource competition occurs, no matter the CPU, I/O, or memory resources, all of them will result in entire performance deterioration. In the high concurrency scenario, enabling the SMP function will not improve the performance and even may cause performance deterioration. - -## Suggestions for Using SMP - -### Limitations - -To use the SMP feature to improve the performance, ensure that the following conditions are met: - -The CPU, memory, and I/O resources are sufficient. SMP is a solution that uses abundant resources to exchange time. After plan parallel is executed, resource consumption is increased. When these resources become a bottleneck, the SMP feature cannot improve the performance and even may deteriorate the performance. In the case of a resource bottleneck, you are advised to disable the SMP feature. - -### Procedure - -1. Observe the current system load situation. If resources are sufficient (the resource usage is smaller than 50%), perform step 2. Otherwise, exit this system. - -2. Set **query_dop** to **1** (default value). Use **explain** to generate an execution plan and check whether the plan can be used in scenarios in [SMP Application Scenarios and Restrictions](#smp-application-scenarios-and-restrictions). If yes, go to step 3. - -3. Set **query_dop** to **value**. The parallelism degree is 1 or **value** regardless of the resource usage and plan characteristics. - -4. Before the query statement is executed, set **query_dop** to an appropriate value. After the statement is executed, set **query_dop** to disable the query. The following provides an example: - - ``` - mogdb=# SET query_dop = 4; - mogdb=# SELECT COUNT(*) FROM t1 GROUP BY a; - ...... - mogdb=# SET query_dop = 1; - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - If resources are sufficient, the higher the parallelism degree is, the better the performance improvement effect is. - > - The SMP parallelism degree supports session level settings. You are advised to enable the SMP feature before executing a query that meets the requirements. After the execution is complete, disable the SMP feature. Otherwise, SMP may affect services in peak hours. +--- +title: Configuring SMP +summary: Configuring SMP +author: Guo Huan +date: 2021-07-26 +--- + +# Configuring SMP + +This section describes the usage restrictions, application scenarios, and configuration guide of symmetric multiprocessing (SMP). + +## SMP Application Scenarios and Restrictions + +### Context + +The SMP feature improves the performance through operator parallelism and occupies more system resources, including CPU, memory, and I/O. Actually, SMP is a method consuming resources to save time. It improves system performance in appropriate scenarios and when resources are sufficient, but may deteriorate performance otherwise. SMP applies to analytical query scenarios where a single query takes a long time and the service concurrency is low. The SMP parallel technology can reduce the query delay and improve the system throughput performance. However, in a high transactional concurrency scenario, a single query has a short delay. In this case, using a multi-thread parallel technology increases the query delay and reduces the system throughput performance. + +### Application Scenarios + +- Operators that support parallelism. The plan contains the following operators that support parallelism. + + - Scan: Row-store ordinary tables and row-store partitioned tables, and column-store ordinary tables and column-store partitioned tables can be sequentially scanned. + - Join: HashJoin and NestLoop + - Agg: HashAgg, SortAgg, PlainAgg, and WindowAgg (which supports only **partition by**, and does not support **order by**) + - Stream: Local Redistribute and Local Broadcast + - Others: Result, Subqueryscan, Unique, Material, Setop, Append, VectoRow, and RowToVec + +- SMP-specific operators: To execute queries in parallel, Stream operators are added for data exchange of the SMP feature. These new operators can be considered as the subtypes of Stream operators. + + - Local Gather aggregates data of parallel threads within an instance. + - Local Redistribute redistributes data based on the distributed key across threads within an instance. + - Local Broadcast broadcasts data to each thread within an instance. + - Local RoundRobin distributes data in polling mode across threads within an instance. + +- The following uses the **TPCH Q1** parallel plan as an example. + + ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/performance-tuning/configuring-smp-1.png) + + In this plan, the Scan and HashAgg operators are processed in parallel, and the Local Gather operator is added for data exchange. Operator 3 is a Local Gather operator. "dop: 1⁄4" indicates that the degree of parallelism of the sender thread is 4 and the degree of parallelism of the receiver thread is 1. That is, the lower-layer HashAggregate operator 4 is executed based on the degree of parallelism 4, the upper-layer operators 1 and 2 are executed in serial mode, and operator 3 aggregates data of parallel threads within the instance. + + You can view the parallelism situation of each operator in the dop information. + +### Non-applicable Scenarios + +1. Index scanning cannot be executed in parallel. +2. MergeJoin cannot be executed in parallel. +3. WindowAgg order by cannot be executed in parallel. +4. The cursor cannot be executed in parallel. +5. Queries in stored procedures and functions cannot be executed in parallel. +6. Subplans and initplans cannot be queried in parallel, and operators that contain subqueries cannot be executed in parallel, either. +7. Query statements that contain the median operation cannot be executed in parallel. +8. Queries with global temporary tables cannot be executed in parallel. +9. Updating materialized views cannot be executed in parallel. + +## Resource Impact on SMP Performance + +The SMP architecture uses abundant resources to obtain time. After the plan parallelism is executed, the resource consumption is added, including the CPU, memory, and I/O resources. As the parallelism degree is expanded, the resource consumption increases. If these resources become a bottleneck, the SMP cannot improve the performance and the overall cluster performance may be deteriorated. The following information describes the situations that the SMP affects theses resources: + +- CPU resources + + In a general customer scenario, the system CPU usage is not high. Using the SMP parallelism architecture will fully use the CPU resources to improve the system performance. If the number of CPU kernels of the database server is too small and the CPU usage is already high, enabling the SMP parallelism may deteriorate the system performance due to resource competition between multiple threads. + +- Memory resources + + Query parallel causes memory usage growth, but the memory usage of each operator is still restricted by **work_mem** and other parameters. Assuming that **work_mem** is 4 GB and the degree of parallelism is 2, the memory usage of each thread in parallel is limited to 2 GB. When **work_mem** is small or the system memory is not sufficient, using SMP may flush data to disks. As a result, the query performance deteriorates. + +- I/O resources + + A parallel scan increases I/O resource consumption. It can improve scan performance only when I/O resources are sufficient. + +## Other Factors Affecting SMP Performance + +Besides resource factors, there are other factors that impact the SMP performance, such as uneven data distribution in a partitioned table and system parallelism degree. + +- Impact of data skew on SMP performance + + Severe data skew deteriorates SMP performance. For example, if the data volume of a value in the join column is much more than that of other values, the data volume of a parallel thread will be much more than that of others after Hash-based data redistribution, resulting in the long-tail issue and poor SMP performance. + +- Impact of system parallelism degree on the SMP performance + + The SMP feature uses more resources, and remaining resources are insufficient in a high concurrency scenario. Therefore, enabling the SMP function will result in severe resource competition among queries. Once resource competition occurs, no matter the CPU, I/O, or memory resources, all of them will result in entire performance deterioration. In the high concurrency scenario, enabling the SMP function will not improve the performance and even may cause performance deterioration. + +## Suggestions for Using SMP + +### Limitations + +To use the SMP feature to improve the performance, ensure that the following conditions are met: + +The CPU, memory, and I/O resources are sufficient. SMP is a solution that uses abundant resources to exchange time. After plan parallel is executed, resource consumption is increased. When these resources become a bottleneck, the SMP feature cannot improve the performance and even may deteriorate the performance. In the case of a resource bottleneck, you are advised to disable the SMP feature. + +### Procedure + +1. Observe the current system load situation. If resources are sufficient (the resource usage is smaller than 50%), perform step 2. Otherwise, exit this system. + +2. Set **query_dop** to **1** (default value). Use **explain** to generate an execution plan and check whether the plan can be used in scenarios in [SMP Application Scenarios and Restrictions](#smp-application-scenarios-and-restrictions). If yes, go to step 3. + +3. Set **query_dop** to **value**. The parallelism degree is 1 or **value** regardless of the resource usage and plan characteristics. + +4. Before the query statement is executed, set **query_dop** to an appropriate value. After the statement is executed, set **query_dop** to disable the query. The following provides an example: + + ``` + mogdb=# SET query_dop = 4; + mogdb=# SELECT COUNT(*) FROM t1 GROUP BY a; + ...... + mogdb=# SET query_dop = 1; + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > + > - If resources are sufficient, the higher the parallelism degree is, the better the performance improvement effect is. + > - The SMP parallelism degree supports session level settings. You are advised to enable the SMP feature before executing a query that meets the requirements. After the execution is complete, disable the SMP feature. Otherwise, SMP may affect services in peak hours. diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/configuring-vector-engine.md b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/configuring-vector-engine.md index d8faba9d..fa54a6de 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/configuring-vector-engine.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/configuring-vector-engine.md @@ -1,59 +1,59 @@ ---- -title: Configuring Vectorized Engine -summary: Configuring Vectorized Engine -author: Guo Huan -date: 2022-04-23 ---- - -# Configuring Vectorized Engine - -The openGauss database supports the row executor and vectorized executor for processing row-store tables and column-store tables, respectively. - -- More data is read in one batch at a time, saving I/O resources. -- There are a large number of records in a batch, and the CPU cache hit rate increases. -- In pipeline mode, the number of function calls is small. -- A batch of data is processed at a time, which is efficient. - -Therefore, the openGauss database can achieve better query performance for complex analytical queries. However, column-store tables do not perform well in data insertion and update. Therefore, column-store tables cannot be used for services with frequent data insertion and update. - -To improve the query performance of row-store tables in complex analytical queries, the openGauss database provides the vectorized executor for processing row-store tables. You can set [try_vector_engine_strategy](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#try_vector_engine_strategy) to convert query statements containing row-store tables into vectorized execution plans for execution. - -This conversion is not applicable to all query scenarios. If a query statement contains operations such as expression calculation, multi-table join, and aggregation, the performance can be improved by converting the statement to a vectorized execution plan. Theoretically, converting a row-store table to a vectorized execution plan causes conversion overheads and performance deterioration. After the foregoing expression calculation, join operation, and aggregation operations are converted into vectorized execution plans, performance can be improved. The performance improvement must be higher than the overheads generated by the conversion. This determines whether the conversion is required. - -Take TPCH Q1 as an example. When the row executor is used, the execution time of the scan operator is 405210 ms, and the execution time of the aggregation operation is 2618964 ms. After the vectorized executor is used, the execution time of the scan operator (SeqScan and VectorAdapter) is 470840 ms, and the execution time of the aggregation operation is 212384 ms. So the query performance can be improved. - -Execution plan of the TPCH Q1 row executor: - -```sql - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------- - Sort (cost=43539570.49..43539570.50 rows=6 width=260) (actual time=3024174.439..3024174.439 rows=4 loops=1) - Sort Key: l_returnflag, l_linestatus - Sort Method: quicksort Memory: 25kB - -> HashAggregate (cost=43539570.30..43539570.41 rows=6 width=260) (actual time=3024174.396..3024174.403 rows=4 loops=1) - Group By Key: l_returnflag, l_linestatus - -> Seq Scan on lineitem (cost=0.00..19904554.46 rows=590875396 width=28) (actual time=0.016..405210.038 rows=596140342 loops=1) - Filter: (l_shipdate <= '1998-10-01 00:00:00'::timestamp without time zone) - Rows Removed by Filter: 3897560 - Total runtime: 3024174.578 ms -(9 rows) -``` - -Execution plan of the TPCH Q1 vectorized executor: - -```sql - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Row Adapter (cost=43825808.18..43825808.18 rows=6 width=298) (actual time=683224.925..683224.927 rows=4 loops=1) - -> Vector Sort (cost=43825808.16..43825808.18 rows=6 width=298) (actual time=683224.919..683224.919 rows=4 loops=1) - Sort Key: l_returnflag, l_linestatus - Sort Method: quicksort Memory: 3kB - -> Vector Sonic Hash Aggregate (cost=43825807.98..43825808.08 rows=6 width=298) (actual time=683224.837..683224.837 rows=4 loops=1) - Group By Key: l_returnflag, l_linestatus - -> Vector Adapter(type: BATCH MODE) (cost=19966853.54..19966853.54 rows=596473861 width=66) (actual time=0.982..470840.274 rows=596140342 loops=1) - Filter: (l_shipdate <= '1998-10-01 00:00:00'::timestamp without time zone) - Rows Removed by Filter: 3897560 - -> Seq Scan on lineitem (cost=0.00..19966853.54 rows=596473861 width=66) (actual time=0.364..199301.737 rows=600037902 loops=1) - Total runtime: 683225.564 ms -(11 rows) +--- +title: Configuring Vectorized Engine +summary: Configuring Vectorized Engine +author: Guo Huan +date: 2022-04-23 +--- + +# Configuring Vectorized Engine + +The openGauss database supports the row executor and vectorized executor for processing row-store tables and column-store tables, respectively. + +- More data is read in one batch at a time, saving I/O resources. +- There are a large number of records in a batch, and the CPU cache hit rate increases. +- In pipeline mode, the number of function calls is small. +- A batch of data is processed at a time, which is efficient. + +Therefore, the openGauss database can achieve better query performance for complex analytical queries. However, column-store tables do not perform well in data insertion and update. Therefore, column-store tables cannot be used for services with frequent data insertion and update. + +To improve the query performance of row-store tables in complex analytical queries, the openGauss database provides the vectorized executor for processing row-store tables. You can set [try_vector_engine_strategy](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#try_vector_engine_strategy) to convert query statements containing row-store tables into vectorized execution plans for execution. + +This conversion is not applicable to all query scenarios. If a query statement contains operations such as expression calculation, multi-table join, and aggregation, the performance can be improved by converting the statement to a vectorized execution plan. Theoretically, converting a row-store table to a vectorized execution plan causes conversion overheads and performance deterioration. After the foregoing expression calculation, join operation, and aggregation operations are converted into vectorized execution plans, performance can be improved. The performance improvement must be higher than the overheads generated by the conversion. This determines whether the conversion is required. + +Take TPCH Q1 as an example. When the row executor is used, the execution time of the scan operator is 405210 ms, and the execution time of the aggregation operation is 2618964 ms. After the vectorized executor is used, the execution time of the scan operator (SeqScan and VectorAdapter) is 470840 ms, and the execution time of the aggregation operation is 212384 ms. So the query performance can be improved. + +Execution plan of the TPCH Q1 row executor: + +```sql + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------- + Sort (cost=43539570.49..43539570.50 rows=6 width=260) (actual time=3024174.439..3024174.439 rows=4 loops=1) + Sort Key: l_returnflag, l_linestatus + Sort Method: quicksort Memory: 25kB + -> HashAggregate (cost=43539570.30..43539570.41 rows=6 width=260) (actual time=3024174.396..3024174.403 rows=4 loops=1) + Group By Key: l_returnflag, l_linestatus + -> Seq Scan on lineitem (cost=0.00..19904554.46 rows=590875396 width=28) (actual time=0.016..405210.038 rows=596140342 loops=1) + Filter: (l_shipdate <= '1998-10-01 00:00:00'::timestamp without time zone) + Rows Removed by Filter: 3897560 + Total runtime: 3024174.578 ms +(9 rows) +``` + +Execution plan of the TPCH Q1 vectorized executor: + +```sql + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Row Adapter (cost=43825808.18..43825808.18 rows=6 width=298) (actual time=683224.925..683224.927 rows=4 loops=1) + -> Vector Sort (cost=43825808.16..43825808.18 rows=6 width=298) (actual time=683224.919..683224.919 rows=4 loops=1) + Sort Key: l_returnflag, l_linestatus + Sort Method: quicksort Memory: 3kB + -> Vector Sonic Hash Aggregate (cost=43825807.98..43825808.08 rows=6 width=298) (actual time=683224.837..683224.837 rows=4 loops=1) + Group By Key: l_returnflag, l_linestatus + -> Vector Adapter(type: BATCH MODE) (cost=19966853.54..19966853.54 rows=596473861 width=66) (actual time=0.982..470840.274 rows=596140342 loops=1) + Filter: (l_shipdate <= '1998-10-01 00:00:00'::timestamp without time zone) + Rows Removed by Filter: 3897560 + -> Seq Scan on lineitem (cost=0.00..19966853.54 rows=596473861 width=66) (actual time=0.364..199301.737 rows=600037902 loops=1) + Total runtime: 683225.564 ms +(11 rows) ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/optimizing-os-parameters.md b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/optimizing-os-parameters.md index 7e0a3e53..1c573997 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/optimizing-os-parameters.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/optimizing-os-parameters.md @@ -1,117 +1,117 @@ ---- -title: Optimizing OS Parameters -summary: Optimizing OS Parameters -author: Liu Xu -date: 2021-03-04 ---- - -# Optimizing OS Parameters - -You can improve the MogDB performance by modifying key parameters of the OS based on actual service requirements during the performance optimization. - -## Prerequisites - -You have checked whether the OS parameters are set to the suggested values using **gs_check**. If not, modify them as required. - -## Memory Parameters - -In the **sysctl.conf** file, change the value of **vm.extfrag_threshold** to **1000** (reference value). If the file does not contain memory parameters, add them manually. - -```bash -vim /etc/sysctl.conf -``` - -Run the following command to make the modification take effect: - -```sql -sysctl -p -``` - -## Network Parameters - -- In the **sysctl.conf** file, modify network parameters. If the file does not contain such parameters, add them manually. For details, see [Table 1](#table 1). - - ``` - vim /etc/sysctl.conf - ``` - - Run the following command to make the modification take effect: - - ``` - sysctl -p - ``` - - **Table 1** Network parameters - - | Parameter | Reference Value | Description | -| :--------------------------- | :--------------------------- | :----------------------------------------------------------- | - | net.ipv4.tcp_timestamps | 1 | Specifies whether to enable quick reclamation of the sockets in TIME-WAIT state during TCP connection establishment. The default value **0** indicates that quick reclamation is disabled, and the value **1** indicates that quick reclamation is enabled. | - | net.ipv4.tcp_mem | 94500000 915000000 927000000 | **94500000**: If less than 94,500,000 pages are used by the TCP, the kernel is not affected.
**915000000**: If more than 915,000,000 pages are used by the TCP, the kernel enters the **memory pressure** mode.
**927000000**: If more than 927,000,000 pages are used by the TCP, the "Out of socket memory." message is displayed. | - | net.ipv4.tcp_max_orphans | 3276800 | Maximum number of the orphan sockets | - | net.ipv4.tcp_fin_timeout | 60 | Default timeout period | - | net.ipv4.ip_local_port_range | 26000 65535 | Port range that can be used by TCP or UDP | - -- Use the **ifconfig** command to set the maximum transmission unit (MTU) of 10 GE NICs. The value **8192** is recommended because this setting improves the network bandwidth usage. - - Example: - -```bash - #ifconfig ethx mtu 8192 -#ifconfig ethx - ethx Link encap:Ethernet HWaddr XX:XX:XX:XX:XX:XX - inet addr:xxx.xxx.xxx.xxx Bcast:xxx.xxx.xxx.xxx Mask:xxx.xxx.xxx.0 - inet6 addr: fxxx::9xxx:bxxx:xxxa:1d18/64 Scope:Link - UP BROADCAST RUNNING MULTICAST MTU:8192 Metric:1 - RX packets:179849803 errors:0 dropped:0 overruns:0 frame:0 - TX packets:40492292 errors:0 dropped:0 overruns:0 carrier:0 - collisions:0 txqueuelen:1000 - RX bytes:17952090386 (17120.4 Mb) TX bytes:171359670290 (163421.3 Mb) - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > -> - **ethx** indicates the 10 GE service NIC used in the database. - > - The first command is used to set the MTU. The second command is used to verify that the MTU has been successfully set. The texts in bold indicate the value of the MTU. - > - Set the MTU as user **root**. - -- Use **ethtool** to set the length of the receiving (**RX**) queue and that of the sending (**TX**) queue for 10 GE NICs. The value **4096** is recommended because this setting improves the network bandwidth usage. - - Example: - - ``` - # ethtool -G ethx rx 4096 tx 4096 - # ethtool -g ethx - Ring parameters for ethx: - Pre-set maximums: - RX: 4096 - RX Mini: 0 - RX Jumbo: 0 - TX: 4096 - Current hardware settings: - RX: 4096 - RX Mini: 0 - RX Jumbo: 0 - TX: 4096 - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - **ethx** indicates the 10 GE service NIC used in the database. - > - The first command is used to set the lengths of the receiving and sending queues. The second command is to verify whether the lengths have been successfully set. If information similar to the example is displayed, the setting is successful. - > - Set the lengths of the receiving and sending queues as user **root**. - -## I/O Parameters - -Set the **hugepage** attribute. Run the following commands to disable the transparent huge page function: - -```bash -echo never > -/sys/kernel/mm/transparent_hugepage/enabled -echo never > /sys/kernel/mm/transparent_hugepage/defrag -``` - -Run the following command to make the modification take effect: - -``` - reboot -``` +--- +title: Optimizing OS Parameters +summary: Optimizing OS Parameters +author: Liu Xu +date: 2021-03-04 +--- + +# Optimizing OS Parameters + +You can improve the MogDB performance by modifying key parameters of the OS based on actual service requirements during the performance optimization. + +## Prerequisites + +You have checked whether the OS parameters are set to the suggested values using **gs_check**. If not, modify them as required. + +## Memory Parameters + +In the **sysctl.conf** file, change the value of **vm.extfrag_threshold** to **1000** (reference value). If the file does not contain memory parameters, add them manually. + +```bash +vim /etc/sysctl.conf +``` + +Run the following command to make the modification take effect: + +```sql +sysctl -p +``` + +## Network Parameters + +- In the **sysctl.conf** file, modify network parameters. If the file does not contain such parameters, add them manually. For details, see [Table 1](#table 1). + + ``` + vim /etc/sysctl.conf + ``` + + Run the following command to make the modification take effect: + + ``` + sysctl -p + ``` + + **Table 1** Network parameters + + | Parameter | Reference Value | Description | +| :--------------------------- | :--------------------------- | :----------------------------------------------------------- | + | net.ipv4.tcp_timestamps | 1 | Specifies whether to enable quick reclamation of the sockets in TIME-WAIT state during TCP connection establishment. The default value **0** indicates that quick reclamation is disabled, and the value **1** indicates that quick reclamation is enabled. | + | net.ipv4.tcp_mem | 94500000 915000000 927000000 | **94500000**: If less than 94,500,000 pages are used by the TCP, the kernel is not affected.
**915000000**: If more than 915,000,000 pages are used by the TCP, the kernel enters the **memory pressure** mode.
**927000000**: If more than 927,000,000 pages are used by the TCP, the "Out of socket memory." message is displayed. | + | net.ipv4.tcp_max_orphans | 3276800 | Maximum number of the orphan sockets | + | net.ipv4.tcp_fin_timeout | 60 | Default timeout period | + | net.ipv4.ip_local_port_range | 26000 65535 | Port range that can be used by TCP or UDP | + +- Use the **ifconfig** command to set the maximum transmission unit (MTU) of 10 GE NICs. The value **8192** is recommended because this setting improves the network bandwidth usage. + + Example: + +```bash + #ifconfig ethx mtu 8192 +#ifconfig ethx + ethx Link encap:Ethernet HWaddr XX:XX:XX:XX:XX:XX + inet addr:xxx.xxx.xxx.xxx Bcast:xxx.xxx.xxx.xxx Mask:xxx.xxx.xxx.0 + inet6 addr: fxxx::9xxx:bxxx:xxxa:1d18/64 Scope:Link + UP BROADCAST RUNNING MULTICAST MTU:8192 Metric:1 + RX packets:179849803 errors:0 dropped:0 overruns:0 frame:0 + TX packets:40492292 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:17952090386 (17120.4 Mb) TX bytes:171359670290 (163421.3 Mb) + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > +> - **ethx** indicates the 10 GE service NIC used in the database. + > - The first command is used to set the MTU. The second command is used to verify that the MTU has been successfully set. The texts in bold indicate the value of the MTU. + > - Set the MTU as user **root**. + +- Use **ethtool** to set the length of the receiving (**RX**) queue and that of the sending (**TX**) queue for 10 GE NICs. The value **4096** is recommended because this setting improves the network bandwidth usage. + + Example: + + ``` + # ethtool -G ethx rx 4096 tx 4096 + # ethtool -g ethx + Ring parameters for ethx: + Pre-set maximums: + RX: 4096 + RX Mini: 0 + RX Jumbo: 0 + TX: 4096 + Current hardware settings: + RX: 4096 + RX Mini: 0 + RX Jumbo: 0 + TX: 4096 + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > + > - **ethx** indicates the 10 GE service NIC used in the database. + > - The first command is used to set the lengths of the receiving and sending queues. The second command is to verify whether the lengths have been successfully set. If information similar to the example is displayed, the setting is successful. + > - Set the lengths of the receiving and sending queues as user **root**. + +## I/O Parameters + +Set the **hugepage** attribute. Run the following commands to disable the transparent huge page function: + +```bash +echo never > +/sys/kernel/mm/transparent_hugepage/enabled +echo never > /sys/kernel/mm/transparent_hugepage/defrag +``` + +Run the following command to make the modification take effect: + +``` + reboot +``` diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-load-management-overview.md b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-load-management-overview.md index f1526f64..ca024301 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-load-management-overview.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-load-management-overview.md @@ -1,26 +1,26 @@ ---- -title: Overview -summary: Overview -author: zhang cuiping -date: 2023-04-07 ---- - -# Overview - -## Function - -MogDB manages resource load to balance system resource usage for jobs. - -## Related Concepts - -**Resource management** - -MogDB manages system resources, including CPU, memory, I/O, and storage resources. It allocates system resources in a proper way to prevent system efficiency deterioration or system running problems. - -**Cgroup** - -Control groups (Cgroups) are a mechanism provided by the Linux kernel to restrict, record, and isolate physical resources (such as CPU, memory, and I/O resources) used by process groups. Cgroups have strict restrictions on Linux system resources. If a process is added to a Cgroup, it can use only restricted resources. For details about Cgroup principles, see the product manual corresponding to your OS. - -**Resource pool** - +--- +title: Overview +summary: Overview +author: zhang cuiping +date: 2023-04-07 +--- + +# Overview + +## Function + +MogDB manages resource load to balance system resource usage for jobs. + +## Related Concepts + +**Resource management** + +MogDB manages system resources, including CPU, memory, I/O, and storage resources. It allocates system resources in a proper way to prevent system efficiency deterioration or system running problems. + +**Cgroup** + +Control groups (Cgroups) are a mechanism provided by the Linux kernel to restrict, record, and isolate physical resources (such as CPU, memory, and I/O resources) used by process groups. Cgroups have strict restrictions on Linux system resources. If a process is added to a Cgroup, it can use only restricted resources. For details about Cgroup principles, see the product manual corresponding to your OS. + +**Resource pool** + Resource pools are a configuration mechanism provided by MogDB to divide host resources (memory and I/O resources) and control SQL concurrency. Resource pools are bound to Cgroups. In this way, you can manage the resource loads of jobs in a specific resource pool. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-load-management.md b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-load-management.md index b88ec4fb..5503b4c7 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-load-management.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-load-management.md @@ -1,11 +1,11 @@ ---- -title: Resource Load Management -summary: Resource Load Management -author: zhang cuiping -date: 2023-04-07 ---- - -# Resource Load Management - -- **[Overview](resource-load-management-overview.md)** +--- +title: Resource Load Management +summary: Resource Load Management +author: zhang cuiping +date: 2023-04-07 +--- + +# Resource Load Management + +- **[Overview](resource-load-management-overview.md)** - **[Resource Management Preparation](./resource-management-preparations/resource-management-preparations.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/enabling-resource-load-management.md b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/enabling-resource-load-management.md index 3c4eb621..cd3a12af 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/enabling-resource-load-management.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/enabling-resource-load-management.md @@ -1,55 +1,55 @@ ---- -title: Enabling Resource Load Management -summary: Enabling Resource Load Management -author: zhang cuiping -date: 2023-04-07 ---- - -# Enabling Resource Load Management - -## Background - -This section describes how to configure parameters for resource load management. - -## Prerequisites - -- In MogDB, you can manage system resources only as a database administrator. Run the following statement to query user permissions: - - ```sql - MogDB=# SELECT rolname FROM pg_roles WHERE rolsystemadmin = 't'; - rolname - --------- - omm - Jack - (2 rows) - ``` - -- Resource load management can be applied only to users with the login permission. Run the following statement to query user permissions: - - ```sql - MogDB=# SELECT rolname FROM pg_roles WHERE rolcanlogin = 't'; - rolname - --------- - omm - (1 row) - ``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **Notice**: If a user's login permission is revoked, the user's resource pool will be changed to **default_pool**. For details about **default_pool**, see[Table 2](creating-resource-pool.md)#table2). - -## Procedure - -You can perform the following steps only as a database administrator to enable load management based on the resource pool. The following uses user **omm** as an example. - -1. Log in as the OS user **omm** to the primary node of MogDB. - -2. Enable resource pool–based load management. - - ``` - gs_guc set -N all -I all -c "use_workload_manager=on" - ``` - -3. Restart the database for the parameter settings to take effect. - - ``` - gs_om -t stop && gs_om -t start +--- +title: Enabling Resource Load Management +summary: Enabling Resource Load Management +author: zhang cuiping +date: 2023-04-07 +--- + +# Enabling Resource Load Management + +## Background + +This section describes how to configure parameters for resource load management. + +## Prerequisites + +- In MogDB, you can manage system resources only as a database administrator. Run the following statement to query user permissions: + + ```sql + MogDB=# SELECT rolname FROM pg_roles WHERE rolsystemadmin = 't'; + rolname + --------- + omm + Jack + (2 rows) + ``` + +- Resource load management can be applied only to users with the login permission. Run the following statement to query user permissions: + + ```sql + MogDB=# SELECT rolname FROM pg_roles WHERE rolcanlogin = 't'; + rolname + --------- + omm + (1 row) + ``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **Notice**: If a user's login permission is revoked, the user's resource pool will be changed to **default_pool**. For details about **default_pool**, see[Table 2](creating-resource-pool.md)#table2). + +## Procedure + +You can perform the following steps only as a database administrator to enable load management based on the resource pool. The following uses user **omm** as an example. + +1. Log in as the OS user **omm** to the primary node of MogDB. + +2. Enable resource pool–based load management. + + ``` + gs_guc set -N all -I all -c "use_workload_manager=on" + ``` + +3. Restart the database for the parameter settings to take effect. + + ``` + gs_om -t stop && gs_om -t start ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/resource-management-preparations.md b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/resource-management-preparations.md index d64e48eb..0e3bb4b6 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/resource-management-preparations.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/resource-management-preparations.md @@ -1,13 +1,13 @@ ---- -title: Resource Management Preparation -summary: Resource Management Preparation -author: zhang cuiping -date: 2023-04-07 ---- - -# Resource Management Preparation - -- **[Resource Planning](resource-planning.md)** -- **[Enabling Resource Load Management](enabling-resource-load-management.md)** -- **[Setting a Cgroup](setting-control-group.md)** +--- +title: Resource Management Preparation +summary: Resource Management Preparation +author: zhang cuiping +date: 2023-04-07 +--- + +# Resource Management Preparation + +- **[Resource Planning](resource-planning.md)** +- **[Enabling Resource Load Management](enabling-resource-load-management.md)** +- **[Setting a Cgroup](setting-control-group.md)** - **[Creating a Resource Pool](creating-resource-pool.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/resource-planning.md b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/resource-planning.md index 1e20f877..465a7fee 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/resource-planning.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/resource-planning.md @@ -1,34 +1,34 @@ ---- -title: Planning Resources -summary: Planning Resources -author: zhang cuiping -date: 2023-04-07 ---- - -# Planning Resources - -Before configuring resource load management, plan tenant resources based on service models. After services run for a period of time, you can adjust the configurations based on resource usage. - -Assume that two departments in a large enterprise use the same cluster. MogDB puts system resources used by the same department in a group to isolate resources for different departments. The following tables describe the resource plan. - -**Table 1** Tenant A resource plan - -| Parameter | **Example Value** | -| --------------------- | ------------------------------------------ | -| Sub-Class Cgroup | class_a | -| Workload Cgroup | - workload_a1
- workload_a2 | -| Group resource pool | resource_pool_a | -| Service resource pool | - resource_pool_a1
- resource_pool_a2 | -| Group user | tenant_a | -| Service user | - tenant_a1
- tenant_a2 | - -**Table 2** Tenant B resource plan - -| Parameter | Example Value | -| --------------------- | ------------------------------------------ | -| Sub-Class Cgroup | class_b | -| Workload Cgroup | - workload_b1
- workload_b2 | -| Group resource pool | resource_pool_b | -| Service resource pool | - resource_pool_b1
- resource_pool_b2 | -| Group user | tenant_b | +--- +title: Planning Resources +summary: Planning Resources +author: zhang cuiping +date: 2023-04-07 +--- + +# Planning Resources + +Before configuring resource load management, plan tenant resources based on service models. After services run for a period of time, you can adjust the configurations based on resource usage. + +Assume that two departments in a large enterprise use the same cluster. MogDB puts system resources used by the same department in a group to isolate resources for different departments. The following tables describe the resource plan. + +**Table 1** Tenant A resource plan + +| Parameter | **Example Value** | +| --------------------- | ------------------------------------------ | +| Sub-Class Cgroup | class_a | +| Workload Cgroup | - workload_a1
- workload_a2 | +| Group resource pool | resource_pool_a | +| Service resource pool | - resource_pool_a1
- resource_pool_a2 | +| Group user | tenant_a | +| Service user | - tenant_a1
- tenant_a2 | + +**Table 2** Tenant B resource plan + +| Parameter | Example Value | +| --------------------- | ------------------------------------------ | +| Sub-Class Cgroup | class_b | +| Workload Cgroup | - workload_b1
- workload_b2 | +| Group resource pool | resource_pool_b | +| Service resource pool | - resource_pool_b1
- resource_pool_b2 | +| Group user | tenant_b | | Service user | - tenant_b1
- tenant_b2 | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/setting-control-group.md b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/setting-control-group.md index d61e6ce6..b6b11e4c 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/setting-control-group.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/setting-control-group.md @@ -1,209 +1,209 @@ ---- -title: Setting a Cgroup -summary: Setting a Cgroup -author: zhang cuiping -date: 2023-04-07 ---- - -# Setting a Cgroup - -## Background - -The core of MogDB resource load management is resource pools. The first step of configuring a resource pool is to configure Cgroups in the environment. For details about Cgroup principles, see the product manual corresponding to your OS. For details about MogDB Cgroups, see [Viewing Cgroup Information](#Viewing Cgroup Information)。 - -The Class Cgroup is a top-layer Cgroup for database service running. **DefaultClass** is a sub-category of the Class Cgroup and is automatically created when a cluster is deployed. The **Medium** Cgroup under **DefaultClass** contains running jobs that are triggered by the system. Resource configurations of **Medium** cannot be modified, and the jobs running on it are not controlled by resource management. Therefore, you are advised to create sub-Class and Workload Cgroups to control resource allocation. - -## Prerequisites - -You are familiar with “Server Tools > gs_cgroup” and “Server Tools > gs_ssh” in *Tool Reference*. - -## Procedure - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: -> -> - To control all the resources in MogDB, you need to create, update, and delete Cgroups on each node. Use **gs_ssh** (see “Server Tools > gs_ssh” in *Tool Reference*) to run commands in the steps below. -> - A Cgroup must be named as follows: -> - The names of sub-Class Cgroups and Workload Cgroups cannot contain columns (:). -> - Cgroups having the same name cannot be created. - -**Creating sub-Class and Workload Cgroups** - -1. Log in as the OS user **omm** to the primary node of MogDB. - -2. Create sub-Class Cgroups **class_a** and **class_b**, and allocate 40% and 20% of Class CPU resources to them, respectively. - - ``` - gs_ssh -c "gs_cgroup -c -S class_a -s 40" - ``` - - ``` - gs_ssh -c "gs_cgroup -c -S class_b -s 20" - ``` - -3. Create Workload Cgroups **workload_a1** and **workload_a2** under **class_a**, and allocate 20% and 60% of **class_a** CPU resources to them, respectively. - - ``` - gs_ssh -c "gs_cgroup -c -S class_a -G workload_a1 -g 20 " - ``` - - ``` - gs_ssh -c "gs_cgroup -c -S class_a -G workload_a2 -g 60 " - ``` - -4. Create Workload Cgroups **workload_b1** and **workload_b2** under **class_b**, and allocate 50% and 40% of **class_b** CPU resources to them, respectively. - - ``` - gs_ssh -c "gs_cgroup -c -S class_b -G workload_b1 -g 50 " - ``` - - ``` - gs_ssh -c "gs_cgroup -c -S class_b -G workload_b2 -g 40 " - ``` - -**Adjusting resource quotas for Cgroups** - -1. Change the CPU resource quota for **class_a** to 30%. - - ``` - gs_ssh -c "gs_cgroup -u -S class_a -s 30" - ``` - -2. Change the CPU resource quota for **workload_a1** under **class_a** to 30% of **class_a** CPU resources. - - ``` - gs_ssh -c "gs_cgroup -u -S class_a -G workload_a1 -g 30" - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **Notice**: After adjustment, CPU resources allocated to **workload_a1** should not be greater than those allocated to **class_a**. The name of a Workload Cgroup cannot be a default name of the Timeshare Cgroup, that is, **Low**, **Medium**, **High**, or **Rush**. - -**Deleting a Cgroup** - -1. Delete the Cgroup **class_a**. - - ``` - gs_ssh -c "gs_cgroup -d -S class_a" - ``` - - After the command is executed, the Cgroup **class_a** is deleted. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **Notice**: User **root** or a user with the **root** permission can delete the default Cgroups that can be accessed by a common user *username* by specifying **-d** and **-U** *username*. A common user can delete existing Class Cgroups by specifying **-d** and **-S** *classname*. - -## Viewing Cgroup Information - -1. View Cgroup information in configuration files. - - ``` - gs_cgroup -p - ``` - - Cgroup configuration - - ``` - gs_cgroup -p - - Top Group information is listed: - GID: 0 Type: Top Percent(%): 1000( 50) Name: Root Cores: 0-47 - GID: 1 Type: Top Percent(%): 833( 83) Name: mogdb:omm Cores: 0-20 - GID: 2 Type: Top Percent(%): 333( 40) Name: Backend Cores: 0-20 - GID: 3 Type: Top Percent(%): 499( 60) Name: Class Cores: 0-20 - - Backend Group information is listed: - GID: 4 Type: BAKWD Name: DefaultBackend TopGID: 2 Percent(%): 266(80) Cores: 0-20 - GID: 5 Type: BAKWD Name: Vacuum TopGID: 2 Percent(%): 66(20) Cores: 0-20 - - Class Group information is listed: - GID: 20 Type: CLASS Name: DefaultClass TopGID: 3 Percent(%): 166(20) MaxLevel: 1 RemPCT: 100 Cores: 0-20 - GID: 21 Type: CLASS Name: class1 TopGID: 3 Percent(%): 332(40) MaxLevel: 2 RemPCT: 70 Cores: 0-20 - - Workload Group information is listed: - GID: 86 Type: DEFWD Name: grp1:2 ClsGID: 21 Percent(%): 99(30) WDLevel: 2 Quota(%): 30 Cores: 0-5 - - Timeshare Group information is listed: - GID: 724 Type: TSWD Name: Low Rate: 1 - GID: 725 Type: TSWD Name: Medium Rate: 2 - GID: 726 Type: TSWD Name: High Rate: 4 - GID: 727 Type: TSWD Name: Rush Rate: 8 - - Group Exception information is listed: - GID: 20 Type: EXCEPTION Class: DefaultClass - PENALTY: QualificationTime=1800 CPUSkewPercent=30 - - GID: 21 Type: EXCEPTION Class: class1 - PENALTY: AllCpuTime=100 QualificationTime=2400 CPUSkewPercent=90 - - GID: 86 Type: EXCEPTION Group: class1:grp1:2 - ABORT: BlockTime=1200 ElapsedTime=2400 - ``` - - The following table lists the Cgroup configuration shown in the above example. - - **Table 1** Cgroup configuration - - | GID | Type | Name | Percentage (%) | Remarks | - | :--- | :--------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | - | 0 | Top Cgroup | Root | The number **1000** indicates that the total system resources are divided into 1000 pieces.
The number **50** in the parentheses indicates 50% of I/O resources.
MogDB does not control I/O resources through Cgroups. Therefore, the following Cgroup information is only about CPU. | - | - | 1 | Top Cgroup | mogdb:omm | Only one database program runs in a cluster. The default quota of the **Gaussdb:omm** Cgroup is 833. That is, the ratio of database programs to non-database programs is 5:1 (833:167). | - | - | 2 | Top Cgroup | Backend | The number **40** in the parentheses indicates that the Backend Cgroup takes up 40% of the resources of the **Gaussdb:dbuser** Cgroup. The number **60** in the parentheses indicates that the Class Cgroup takes up 60% of the resources of the **Gaussdb:dbuser** Cgroup. | - | - | 3 | Top Cgroup | Class | The number **40** in the parentheses indicates that the Backend Cgroup takes up 40% of the resources of the **Gaussdb:dbuser** Cgroup. The number **60** in the parentheses indicates that the Class Cgroup takes up 60% of the resources of the **Gaussdb:dbuser** Cgroup. | - | - | 4 | Backend Cgroup | DefaultBackend | The numbers **80** and **20** in the parentheses indicate the percentages of Backend Cgroup resources taken by the **DefaultBackend** and **Vacuum** groups, respectively. | **TopGID**: GID (2) of the Backend Cgroup in a Top Cgroup | - | 5 | Backend Cgroup | Vacuum | The numbers **80** and **20** in the parentheses indicate the percentages of Backend Cgroup resources taken by the **DefaultBackend** and **Vacuum** groups, respectively. | **TopGID**: GID (2) of the Backend Cgroup in a Top Cgroup | - | 20 | Class Cgroup | DefaultClass | The number **20** in the parentheses indicates that the **DefaultClass** Cgroup takes up 20% of the Class Cgroup resources. The number **40** in the parentheses indicates that the **class1** Cgroup takes up 40% of the Class Cgroup resources. There are only two Class Cgroups currently. Therefore, the system resource quotas for the Class Cgroups (499) are allocated in the ratio of 20:40 (166:332). | 1. **TopGID**: GID (3) of the Class Cgroups in a Top Cgroup to which the **DefaultClass** and **class1** Cgroups belong.
2. **MaxLevel**:
maximum number of levels for Workload Cgroups in a Class Cgroup. This parameter is set to **1** for **DefaultClass** because it has no Workload Cgroups.
3. **RemPCT**:
percentage of remaining resources in a Class Cgroup after its resources are allocated to Workload Cgroups. For example, the percentage of remaining resources in the **class1** Cgroup is 70%. | - | 21 | Class Cgroup | class1 | The number **20** in the parentheses indicates that the **DefaultClass** Cgroup takes up 20% of the Class Cgroup resources. The number **40** in the parentheses indicates that the **class1** Cgroup takes up 40% of the Class Cgroup resources. There are only two Class Cgroups currently. Therefore, the system resource quotas for the Class Cgroups (499) are allocated in the ratio of 20:40 (166:332). | 1. **TopGID**: GID (3) of the Class Cgroups in a Top Cgroup to which the **DefaultClass** and **class1** Cgroups belong.
2. **MaxLevel**:
maximum number of levels for Workload Cgroups in a Class Cgroup. This parameter is set to **1** for **DefaultClass** because it has no Workload Cgroups.
3. **RemPCT**:
percentage of remaining resources in a Class Cgroup after its resources are allocated to Workload Cgroups. For example, the percentage of remaining resources in the **class1** Cgroup is 70%. | - | 86 | Workload Cgroup | grp1:2
(This name is composed of the name of a Workload Cgroup and its level in the Class Cgroup. This **grp1:2** Cgroup is the first Workload Cgroup under the **class1** Cgroup, and its level is 2. Each Class Cgroup contains a maximum of 10 levels of Workload Cgroups.) | In this example, this Workload Cgroup takes up 30% of **class1** Cgroup resources (332 x 30% = 99). | 1. **ClsGID**: GID of the **class1** Cgroup to which the Workload Cgroup belongs.
2. **WDLevel**: level of the Workload Cgroup in the corresponding Class Cgroup. | - | 724 | Timeshare Cgroup | Low | - | **Rate**: rate of resources allocated to a Timeshare Cgroup. The **Low** Cgroup has the minimum rate 1 and the **Rush** Cgroup has the maximum rate 8. The resource rate for **Rush**:**High**:**Medium**:**Low** is **8**:**4**:**2**:**1** under a Timeshare Cgroup. | - | 725 | Timeshare Cgroup | Medium | - | **Rate**: rate of resources allocated to a Timeshare Cgroup. The **Low** Cgroup has the minimum rate 1 and the **Rush** Cgroup has the maximum rate 8. The resource rate for **Rush**:**High**:**Medium**:**Low** is **8**:**4**:**2**:**1** under a Timeshare Cgroup. | - | 726 | Timeshare Cgroup | High | - | **Rate**: rate of resources allocated to a Timeshare Cgroup. The **Low** Cgroup has the minimum rate 1 and the **Rush** Cgroup has the maximum rate 8. The resource rate for **Rush**:**High**:**Medium**:**Low** is **8**:**4**:**2**:**1** under a Timeshare Cgroup. | - | 727 | Timeshare Cgroup | Rush | - | **Rate**: rate of resources allocated to a Timeshare Cgroup. The **Low** Cgroup has the minimum rate 1 and the **Rush** Cgroup has the maximum rate 8. The resource rate for **Rush**:**High**:**Medium**:**Low** is **8**:**4**:**2**:**1** under a Timeshare Cgroup. | - -2. View the Cgroup tree in the OS. - - Run the following command to query the structure of the Cgroup tree: - - ``` - gs_cgroup -P - ``` - - In the command output, **shares** indicates the value of **cpu.shares**, which specifies the dynamic quota of CPU resources in the OS, and **cpus** indicates the value of **cpuset.cpus**, which specifies the dynamic quota of CPUSET resources in the OS (number of cores that a Cgroup can use). - - ```bash - Mount Information: - cpu:/dev/cgroup/cpu - blkio:/dev/cgroup/blkio - cpuset:/dev/cgroup/cpuset - cpuacct:/dev/cgroup/cpuacct - - Group Tree Information: - - Gaussdb:wangrui (shares: 5120, cpus: 0-20, weight: 1000) - - Backend (shares: 4096, cpus: 0-20, weight: 400) - - Vacuum (shares: 2048, cpus: 0-20, weight: 200) - - DefaultBackend (shares: 8192, cpus: 0-20, weight: 800) - - Class (shares: 6144, cpus: 0-20, weight: 600) - - class1 (shares: 4096, cpus: 0-20, weight: 400) - - RemainWD:1 (shares: 1000, cpus: 0-20, weight: 100) - - RemainWD:2 (shares: 7000, cpus: 0-20, weight: 700) - - Timeshare (shares: 1024, cpus: 0-20, weight: 500) - - Rush (shares: 8192, cpus: 0-20, weight: 800) - - High (shares: 4096, cpus: 0-20, weight: 400) - - Medium (shares: 2048, cpus: 0-20, weight: 200) - - Low (shares: 1024, cpus: 0-20, weight: 100) - - grp1:2 (shares: 3000, cpus: 0-5, weight: 300) - - TopWD:1 (shares: 9000, cpus: 0-20, weight: 900) - - DefaultClass (shares: 2048, cpus: 0-20, weight: 200) - - RemainWD:1 (shares: 1000, cpus: 0-20, weight: 100) - - Timeshare (shares: 1024, cpus: 0-20, weight: 500) - - Rush (shares: 8192, cpus: 0-20, weight: 800) - - High (shares: 4096, cpus: 0-20, weight: 400) - - Medium (shares: 2048, cpus: 0-20, weight: 200) - - Low (shares: 1024, cpus: 0-20, weight: 100) - - TopWD:1 (shares: 9000, cpus: 0-20, weight: 900) - ``` - -3. Obtain the Cgroup configuration in a system view. - - a. Use gsql to access a MogDB database. - - b. Obtain the configuration about all Cgroups in the system. - - ```sql - MogDB=# SELECT * FROM gs_all_control_group_info; +--- +title: Setting a Cgroup +summary: Setting a Cgroup +author: zhang cuiping +date: 2023-04-07 +--- + +# Setting a Cgroup + +## Background + +The core of MogDB resource load management is resource pools. The first step of configuring a resource pool is to configure Cgroups in the environment. For details about Cgroup principles, see the product manual corresponding to your OS. For details about MogDB Cgroups, see [Viewing Cgroup Information](#Viewing Cgroup Information)。 + +The Class Cgroup is a top-layer Cgroup for database service running. **DefaultClass** is a sub-category of the Class Cgroup and is automatically created when a cluster is deployed. The **Medium** Cgroup under **DefaultClass** contains running jobs that are triggered by the system. Resource configurations of **Medium** cannot be modified, and the jobs running on it are not controlled by resource management. Therefore, you are advised to create sub-Class and Workload Cgroups to control resource allocation. + +## Prerequisites + +You are familiar with “Server Tools > gs_cgroup” and “Server Tools > gs_ssh” in *Tool Reference*. + +## Procedure + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +> - To control all the resources in MogDB, you need to create, update, and delete Cgroups on each node. Use **gs_ssh** (see “Server Tools > gs_ssh” in *Tool Reference*) to run commands in the steps below. +> - A Cgroup must be named as follows: +> - The names of sub-Class Cgroups and Workload Cgroups cannot contain columns (:). +> - Cgroups having the same name cannot be created. + +**Creating sub-Class and Workload Cgroups** + +1. Log in as the OS user **omm** to the primary node of MogDB. + +2. Create sub-Class Cgroups **class_a** and **class_b**, and allocate 40% and 20% of Class CPU resources to them, respectively. + + ``` + gs_ssh -c "gs_cgroup -c -S class_a -s 40" + ``` + + ``` + gs_ssh -c "gs_cgroup -c -S class_b -s 20" + ``` + +3. Create Workload Cgroups **workload_a1** and **workload_a2** under **class_a**, and allocate 20% and 60% of **class_a** CPU resources to them, respectively. + + ``` + gs_ssh -c "gs_cgroup -c -S class_a -G workload_a1 -g 20 " + ``` + + ``` + gs_ssh -c "gs_cgroup -c -S class_a -G workload_a2 -g 60 " + ``` + +4. Create Workload Cgroups **workload_b1** and **workload_b2** under **class_b**, and allocate 50% and 40% of **class_b** CPU resources to them, respectively. + + ``` + gs_ssh -c "gs_cgroup -c -S class_b -G workload_b1 -g 50 " + ``` + + ``` + gs_ssh -c "gs_cgroup -c -S class_b -G workload_b2 -g 40 " + ``` + +**Adjusting resource quotas for Cgroups** + +1. Change the CPU resource quota for **class_a** to 30%. + + ``` + gs_ssh -c "gs_cgroup -u -S class_a -s 30" + ``` + +2. Change the CPU resource quota for **workload_a1** under **class_a** to 30% of **class_a** CPU resources. + + ``` + gs_ssh -c "gs_cgroup -u -S class_a -G workload_a1 -g 30" + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **Notice**: After adjustment, CPU resources allocated to **workload_a1** should not be greater than those allocated to **class_a**. The name of a Workload Cgroup cannot be a default name of the Timeshare Cgroup, that is, **Low**, **Medium**, **High**, or **Rush**. + +**Deleting a Cgroup** + +1. Delete the Cgroup **class_a**. + + ``` + gs_ssh -c "gs_cgroup -d -S class_a" + ``` + + After the command is executed, the Cgroup **class_a** is deleted. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **Notice**: User **root** or a user with the **root** permission can delete the default Cgroups that can be accessed by a common user *username* by specifying **-d** and **-U** *username*. A common user can delete existing Class Cgroups by specifying **-d** and **-S** *classname*. + +## Viewing Cgroup Information + +1. View Cgroup information in configuration files. + + ``` + gs_cgroup -p + ``` + + Cgroup configuration + + ``` + gs_cgroup -p + + Top Group information is listed: + GID: 0 Type: Top Percent(%): 1000( 50) Name: Root Cores: 0-47 + GID: 1 Type: Top Percent(%): 833( 83) Name: mogdb:omm Cores: 0-20 + GID: 2 Type: Top Percent(%): 333( 40) Name: Backend Cores: 0-20 + GID: 3 Type: Top Percent(%): 499( 60) Name: Class Cores: 0-20 + + Backend Group information is listed: + GID: 4 Type: BAKWD Name: DefaultBackend TopGID: 2 Percent(%): 266(80) Cores: 0-20 + GID: 5 Type: BAKWD Name: Vacuum TopGID: 2 Percent(%): 66(20) Cores: 0-20 + + Class Group information is listed: + GID: 20 Type: CLASS Name: DefaultClass TopGID: 3 Percent(%): 166(20) MaxLevel: 1 RemPCT: 100 Cores: 0-20 + GID: 21 Type: CLASS Name: class1 TopGID: 3 Percent(%): 332(40) MaxLevel: 2 RemPCT: 70 Cores: 0-20 + + Workload Group information is listed: + GID: 86 Type: DEFWD Name: grp1:2 ClsGID: 21 Percent(%): 99(30) WDLevel: 2 Quota(%): 30 Cores: 0-5 + + Timeshare Group information is listed: + GID: 724 Type: TSWD Name: Low Rate: 1 + GID: 725 Type: TSWD Name: Medium Rate: 2 + GID: 726 Type: TSWD Name: High Rate: 4 + GID: 727 Type: TSWD Name: Rush Rate: 8 + + Group Exception information is listed: + GID: 20 Type: EXCEPTION Class: DefaultClass + PENALTY: QualificationTime=1800 CPUSkewPercent=30 + + GID: 21 Type: EXCEPTION Class: class1 + PENALTY: AllCpuTime=100 QualificationTime=2400 CPUSkewPercent=90 + + GID: 86 Type: EXCEPTION Group: class1:grp1:2 + ABORT: BlockTime=1200 ElapsedTime=2400 + ``` + + The following table lists the Cgroup configuration shown in the above example. + + **Table 1** Cgroup configuration + + | GID | Type | Name | Percentage (%) | Remarks | + | :--- | :--------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | + | 0 | Top Cgroup | Root | The number **1000** indicates that the total system resources are divided into 1000 pieces.
The number **50** in the parentheses indicates 50% of I/O resources.
MogDB does not control I/O resources through Cgroups. Therefore, the following Cgroup information is only about CPU. | - | + | 1 | Top Cgroup | mogdb:omm | Only one database program runs in a cluster. The default quota of the **Gaussdb:omm** Cgroup is 833. That is, the ratio of database programs to non-database programs is 5:1 (833:167). | - | + | 2 | Top Cgroup | Backend | The number **40** in the parentheses indicates that the Backend Cgroup takes up 40% of the resources of the **Gaussdb:dbuser** Cgroup. The number **60** in the parentheses indicates that the Class Cgroup takes up 60% of the resources of the **Gaussdb:dbuser** Cgroup. | - | + | 3 | Top Cgroup | Class | The number **40** in the parentheses indicates that the Backend Cgroup takes up 40% of the resources of the **Gaussdb:dbuser** Cgroup. The number **60** in the parentheses indicates that the Class Cgroup takes up 60% of the resources of the **Gaussdb:dbuser** Cgroup. | - | + | 4 | Backend Cgroup | DefaultBackend | The numbers **80** and **20** in the parentheses indicate the percentages of Backend Cgroup resources taken by the **DefaultBackend** and **Vacuum** groups, respectively. | **TopGID**: GID (2) of the Backend Cgroup in a Top Cgroup | + | 5 | Backend Cgroup | Vacuum | The numbers **80** and **20** in the parentheses indicate the percentages of Backend Cgroup resources taken by the **DefaultBackend** and **Vacuum** groups, respectively. | **TopGID**: GID (2) of the Backend Cgroup in a Top Cgroup | + | 20 | Class Cgroup | DefaultClass | The number **20** in the parentheses indicates that the **DefaultClass** Cgroup takes up 20% of the Class Cgroup resources. The number **40** in the parentheses indicates that the **class1** Cgroup takes up 40% of the Class Cgroup resources. There are only two Class Cgroups currently. Therefore, the system resource quotas for the Class Cgroups (499) are allocated in the ratio of 20:40 (166:332). | 1. **TopGID**: GID (3) of the Class Cgroups in a Top Cgroup to which the **DefaultClass** and **class1** Cgroups belong.
2. **MaxLevel**:
maximum number of levels for Workload Cgroups in a Class Cgroup. This parameter is set to **1** for **DefaultClass** because it has no Workload Cgroups.
3. **RemPCT**:
percentage of remaining resources in a Class Cgroup after its resources are allocated to Workload Cgroups. For example, the percentage of remaining resources in the **class1** Cgroup is 70%. | + | 21 | Class Cgroup | class1 | The number **20** in the parentheses indicates that the **DefaultClass** Cgroup takes up 20% of the Class Cgroup resources. The number **40** in the parentheses indicates that the **class1** Cgroup takes up 40% of the Class Cgroup resources. There are only two Class Cgroups currently. Therefore, the system resource quotas for the Class Cgroups (499) are allocated in the ratio of 20:40 (166:332). | 1. **TopGID**: GID (3) of the Class Cgroups in a Top Cgroup to which the **DefaultClass** and **class1** Cgroups belong.
2. **MaxLevel**:
maximum number of levels for Workload Cgroups in a Class Cgroup. This parameter is set to **1** for **DefaultClass** because it has no Workload Cgroups.
3. **RemPCT**:
percentage of remaining resources in a Class Cgroup after its resources are allocated to Workload Cgroups. For example, the percentage of remaining resources in the **class1** Cgroup is 70%. | + | 86 | Workload Cgroup | grp1:2
(This name is composed of the name of a Workload Cgroup and its level in the Class Cgroup. This **grp1:2** Cgroup is the first Workload Cgroup under the **class1** Cgroup, and its level is 2. Each Class Cgroup contains a maximum of 10 levels of Workload Cgroups.) | In this example, this Workload Cgroup takes up 30% of **class1** Cgroup resources (332 x 30% = 99). | 1. **ClsGID**: GID of the **class1** Cgroup to which the Workload Cgroup belongs.
2. **WDLevel**: level of the Workload Cgroup in the corresponding Class Cgroup. | + | 724 | Timeshare Cgroup | Low | - | **Rate**: rate of resources allocated to a Timeshare Cgroup. The **Low** Cgroup has the minimum rate 1 and the **Rush** Cgroup has the maximum rate 8. The resource rate for **Rush**:**High**:**Medium**:**Low** is **8**:**4**:**2**:**1** under a Timeshare Cgroup. | + | 725 | Timeshare Cgroup | Medium | - | **Rate**: rate of resources allocated to a Timeshare Cgroup. The **Low** Cgroup has the minimum rate 1 and the **Rush** Cgroup has the maximum rate 8. The resource rate for **Rush**:**High**:**Medium**:**Low** is **8**:**4**:**2**:**1** under a Timeshare Cgroup. | + | 726 | Timeshare Cgroup | High | - | **Rate**: rate of resources allocated to a Timeshare Cgroup. The **Low** Cgroup has the minimum rate 1 and the **Rush** Cgroup has the maximum rate 8. The resource rate for **Rush**:**High**:**Medium**:**Low** is **8**:**4**:**2**:**1** under a Timeshare Cgroup. | + | 727 | Timeshare Cgroup | Rush | - | **Rate**: rate of resources allocated to a Timeshare Cgroup. The **Low** Cgroup has the minimum rate 1 and the **Rush** Cgroup has the maximum rate 8. The resource rate for **Rush**:**High**:**Medium**:**Low** is **8**:**4**:**2**:**1** under a Timeshare Cgroup. | + +2. View the Cgroup tree in the OS. + + Run the following command to query the structure of the Cgroup tree: + + ``` + gs_cgroup -P + ``` + + In the command output, **shares** indicates the value of **cpu.shares**, which specifies the dynamic quota of CPU resources in the OS, and **cpus** indicates the value of **cpuset.cpus**, which specifies the dynamic quota of CPUSET resources in the OS (number of cores that a Cgroup can use). + + ```bash + Mount Information: + cpu:/dev/cgroup/cpu + blkio:/dev/cgroup/blkio + cpuset:/dev/cgroup/cpuset + cpuacct:/dev/cgroup/cpuacct + + Group Tree Information: + - Gaussdb:wangrui (shares: 5120, cpus: 0-20, weight: 1000) + - Backend (shares: 4096, cpus: 0-20, weight: 400) + - Vacuum (shares: 2048, cpus: 0-20, weight: 200) + - DefaultBackend (shares: 8192, cpus: 0-20, weight: 800) + - Class (shares: 6144, cpus: 0-20, weight: 600) + - class1 (shares: 4096, cpus: 0-20, weight: 400) + - RemainWD:1 (shares: 1000, cpus: 0-20, weight: 100) + - RemainWD:2 (shares: 7000, cpus: 0-20, weight: 700) + - Timeshare (shares: 1024, cpus: 0-20, weight: 500) + - Rush (shares: 8192, cpus: 0-20, weight: 800) + - High (shares: 4096, cpus: 0-20, weight: 400) + - Medium (shares: 2048, cpus: 0-20, weight: 200) + - Low (shares: 1024, cpus: 0-20, weight: 100) + - grp1:2 (shares: 3000, cpus: 0-5, weight: 300) + - TopWD:1 (shares: 9000, cpus: 0-20, weight: 900) + - DefaultClass (shares: 2048, cpus: 0-20, weight: 200) + - RemainWD:1 (shares: 1000, cpus: 0-20, weight: 100) + - Timeshare (shares: 1024, cpus: 0-20, weight: 500) + - Rush (shares: 8192, cpus: 0-20, weight: 800) + - High (shares: 4096, cpus: 0-20, weight: 400) + - Medium (shares: 2048, cpus: 0-20, weight: 200) + - Low (shares: 1024, cpus: 0-20, weight: 100) + - TopWD:1 (shares: 9000, cpus: 0-20, weight: 900) + ``` + +3. Obtain the Cgroup configuration in a system view. + + a. Use gsql to access a MogDB database. + + b. Obtain the configuration about all Cgroups in the system. + + ```sql + MogDB=# SELECT * FROM gs_all_control_group_info; ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/wdr/wdr-report.md b/product/en/docs-mogdb/v5.0/performance-tuning/wdr/wdr-report.md index afbcda0f..ce9b0ed8 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/wdr/wdr-report.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/wdr/wdr-report.md @@ -1,385 +1,385 @@ ---- -title: Viewing WDRs -summary: Viewing WDRs -author: Guo Huan -date: 2022-04-23 ---- - -# Viewing WDRs - -## Database Stat - -The following table describes columns in the Database Stat report. - -**Table 1** Columns in the Database Stat report - -| Column | Description | -| :------------- | :----------------------------------------------------------- | -| DB Name | Database name. | -| Backends | Number of backends connected to this database. | -| Xact Commit | Number of transactions in this database that have been committed. | -| Xact Rollback | Number of transactions in this database that have been rolled back. | -| Blks Read | Number of disk blocks read in this database. | -| Blks Hit | Number of times that disk blocks have been found in the cache. | -| Tuple Returned | Number of rows sequentially scanned. | -| Tuple Fetched | Number of rows randomly scanned. | -| Tuple Inserted | Number of rows inserted by queries in this database. | -| Tuple Updated | Number of rows updated by queries in this database. | -| Tup Deleted | Number of rows deleted by queries in this database. | -| Conflicts | Number of queries canceled due to conflicts. | -| Temp Files | Number of temporary files created by queries in this database. | -| Temp Bytes | Total amount of data written to temporary files by queries in this database. | -| Deadlocks | Number of deadlocks detected in this database. | -| Blk Read Time | Time spent reading data file blocks by backends in this database (unit: ms). | -| Blk Write Time | Time spent writing into data file blocks by backends in this database (unit: ms). | -| Stats Reset | Time at which the current statistics were reset. | - -## Load Profile - -The following table lists metrics in the Load Profile report. - -**Table 2** Metrics in the Load Profile report - -| Metric | Description | -| :------------------------ | :--------------------------------------------------------- | -| DB Time(us) | Total elapsed time of a job. | -| CPU Time(us) | Total CPU time used for job running. | -| Redo size(blocks) | Size of the generated WAL (blocks). | -| Logical read (blocks) | Number of logical reads (blocks) on a table or an index. | -| Physical read (blocks) | Number of physical reads (blocks) on a table or an index. | -| Physical write (blocks) | Number of physical writes (blocks) on a table or an index. | -| Read IO requests | Number of times that a table or an index is read. | -| Write IO requests | Number of times that a table or an index is written. | -| Read IO (MB) | Table or index read size (MB). | -| Write IO (MB) | Table or index write size (MB). | -| Logons | Number of logins. | -| Executes (SQL) | Number of times the SQL statement is executed. | -| Rollbacks | Number of rolled-back transactions. | -| Transactions | Number of transactions. | -| SQL response time P95(us) | Response time of 95% SQL statements. | -| SQL response time P80(us) | Response time of 80% SQL statements. | - -## Instance Efficiency Percentages - -The following table lists metrics in the Instance Efficiency Percentages report. - -**Table 3** Metrics in the Instance Efficiency Percentages report - -| Metric | Description | -| :---------------- | :----------------------------------------------------------- | -| Buffer Hit % | Hit ratio of the buffer pool. | -| Effective CPU % | Ratio of the CPU time to the DB time. | -| WalWrite NoWait % | Ratio of the number of events that access the WAL buffer to the total number of wait events. | -| Soft Parse % | Ratio of the number of soft parsing times to the total number of parsing times. | -| Non-Parse CPU % | Ratio of the non-parse time to the total execution time. | - -## Top 10 Events by Total Wait Time - -The following table lists columns in the Top 10 Events by Total Wait Time report. - -**Table 4** Columns in the Top 10 Events by Total Wait Time report - -| Column | Description | -| :------------------ | :---------------------------------- | -| Event | Name of a wait event. | -| Waits | Number of wait times. | -| Total Wait Time(us) | Total wait time, in microseconds. | -| Avg Wait Time(us) | Average wait time, in microseconds. | -| Type | Wait event type. | - -## Wait Classes by Total Wait Time - -The following table lists columns in the Wait Classes by Total Wait Time report. - -**Table 5** Columns in the Wait Classes by Total Wait Time report - -| Column | Description | -| :------------------ | :----------------------------------------------------------- | -| Type | Wait events are classified as follows:
- STATUS。
- LWLOCK_EVENT。
- LOCK_EVENT。
- IO_EVENT。 | -| Waits | Number of wait times. | -| Total Wait Time(us) | Total wait time, in microseconds. | -| Avg Wait Time(us) | Average wait time, in microseconds. | - -## Host CPU - -The following table describes columns in the Host CPU report. - -**Table 6** Columns in the Host CPU report - -| Column | Description | -| :----------------- | :----------------------------------------------- | -| Cpus | Number of processors. | -| Cores | Number of CPU cores. | -| Sockets | Number of CPU sockets. | -| Load Average Begin | Load average value of the start snapshot. | -| Load Average End | Load average value of the end snapshot. | -| %User | Percentage of CPU time spent in the user mode. | -| %System | Percentage of CPU time spent in the kernel mode. | -| %WIO | Percentage of CPU time spent in wait I/O. | -| %Idle | Percentage of CPU idle time. | - -## IO Profile - -The following table lists metrics in the IO Profile report. - -**Table 7** Metrics in the IO Profile report - -| Metric | Description | -| :---------------- | :---------------------------------- | -| Database requests | Number of database I/O times. | -| Database (MB) | Database I/O data volume. | -| Database (blocks) | Number of database I/O data blocks. | -| Redo requests | Number of redo I/O times. | -| Redo (MB) | Redo I/O data volume. | - -## Memory Statistics - -The following table lists metrics in the Memory Statistics report. - -**Table 8** Columns in the Memory Statistics report - -| Metric | Description | -| :------------------ | :------------------------------------------------------ | -| shared_used_memory | Size of used shared memory (MB). | -| max_shared_memory | Maximum shared memory (MB). | -| process_used_memory | Memory used by processes (MB). | -| max_process_memory | Maximum memory that can be allocated to a process (MB). | - -## Time Model - -The following table describes metrics in the Time Model report. - -**Table 9** Metrics in the Time Model report - -| Metric | Description | -| :------------------ | :----------------------------------------------------------- | -| DB_TIME | Total end-to-end wall time consumed by all threads (unit: μs). | -| EXECUTION_TIME | Total time consumed on the executor (unit: μs). | -| PL_EXECUTION_TIME | Total time consumed for executing PL/SQL statements (unit: μs). | -| CPU_TIME | Total CPU time consumed by all threads (unit: μs). | -| PLAN_TIME | Total time consumed for generating an execution plan (unit: μs). | -| REWRITE_TIME | Total time consumed on query rewriting (unit: μs). | -| PL_COMPILATION_TIME | Total time consumed for SQL compilation (unit: μs). | -| PARSE_TIME | Total time consumed for parsing SQL statements (unit: μs). | -| NET_SEND_TIME | Total time consumed for sending data over network (unit: μs). | -| DATA_IO_TIME | Total time consumed for data read and write (unit: μs). | - -## SQL Statistics - -The following table describes columns in the SQL Statistics report. - -**Table 10** Columns in the SQL Statistics report - -| Column | Description | -| :-------------------- | :----------------------------------------------------------- | -| Unique SQL Id | ID of the normalized SQL statement. | -| Node Name | Node name. | -| User Name | Username. | -| Tuples Read | Number of tuples that are read. | -| Calls | Number of calls. | -| Min Elapse Time(us) | Minimum execution time (unit: us). | -| Max Elapse Time(us) | Maximum execution time (unit: us). | -| Total Elapse Time(us) | Total execution time (unit: us). | -| Avg Elapse Time(us) | Average execution time (unit: us). | -| Returned Rows | Number of rows returned by SELECT. | -| Tuples Affected | Number of rows affected by INSERT, UPDATE, and DELETE. | -| Logical Read | Number of logical reads on the buffer. | -| Physical Read | Number of physical reads on the buffer. | -| CPU Time(us) | CPU time (unit: us). | -| Data IO Time(us) | Time spent on I/O (unit: us). | -| Sort Count | Number of sorting execution times. | -| Sort Time(us) | Sorting execution time (unit: us). | -| Sort Mem Used(KB) | Size of work memory used during sorting (unit: KB). | -| Sort Spill Count | Number of file writes when data is flushed to disks during sorting. | -| Sort Spill Size(KB) | File size used when data is flushed to disks during sorting (unit: KB). | -| Hash Count | Number of hashing execution times. | -| Hash Time(us) | Hashing execution time (unit: us). | -| Hash Mem Used(KB) | Size of work memory used during hashing (unit: KB). | -| Hash Spill Count | Number of file writes when data is flushed to disks during hashing. | -| Hash Spill Size(KB) | File size used when data is flushed to disks during hashing (unit: KB). | -| SQL Text | Normalized SQL character string. | - -## Wait Events - -The following table describes columns in the Wait Events report. - -**Table 11** Columns in the Wait Events report - -| Column | Description | -| :------------------- | :----------------------------------------------------------- | -| Type | Wait events are classified as follows:
- STATUS
- LWLOCK_EVENT
- LOCK_EVENT
- IO_EVENT | -| Event | Name of a wait event. | -| Total Wait Time (us) | Total wait time (unit: us). | -| Waits | Total number of wait times. | -| Failed Waits | Number of wait failures. | -| Avg Wait Time (us) | Average wait time (unit: us). | -| Max Wait Time (us) | Maximum wait time (unit: us). | - -## Cache IO Stats - -Cache IO Stats contains two tables: User table and User index. The columns in the tables are described as follows. - -### User table IO activity ordered by heap blks hit ratio - -**Table 12** Columns in the User table IO activity ordered by heap blks hit ratio report - -| Column | Description | -| :------------------- | :----------------------------------------------------------- | -| DB Name | Database name. | -| Schema Name | Schema name. | -| Table Name | Table name. | -| %Heap Blks Hit Ratio | Buffer pool hit ratio of the table. | -| Heap Blks Read | Number of disk blocks read from the table. | -| Heap Blks Hit | Number of cache hits in the table. | -| Idx Blks Read | Number of disk blocks read from all indexes on the table. | -| Idx Blks Hit | Number of cache hits in the table . | -| Toast Blks Read | Number of disk blocks read from the TOAST table (if any) in the table. | -| Toast Blks Hit | Number of buffer hits in the TOAST table (if any) in the table. | -| Tidx Blks Read | Number of disk blocks read from the TOAST table index (if any) in the table. | -| Tidx Blks Hit | Number of buffer hits in the TOAST table index (if any) in the table. | - -### User index IO activity ordered by idx blks hit ratio - -**Table 13** Columns in the User index IO activity ordered by idx blks hit ratio report - -| Column | Description | -| :------------------ | :-------------------------------------------------------- | -| DB Name | Database name. | -| Schema Name | Schema name. | -| Table Name | Table name. | -| Index Name | Index name. | -| %Idx Blks Hit Ratio | Index hit ratio. | -| Idx Blks Read | Number of disk blocks read from all indexes on the table. | -| Idx Blks Hit | Number of cache hits in the table. | - -## Utility status - -**Utility status** contains two tables: **Replication slot** and **Replication stat**. Columns in the tables are described as follows: - -### Replication slot - -**Table 14** Columns in the Replication slot report - -| Column | Description | -| :------------ | :--------------------------------------------- | -| Slot Name | Replication node name. | -| Slot Type | Type of the replication node. | -| DB Name | Name of the database on the replication node. | -| Active | Replication node status. | -| Xmin | Transaction ID of the replication node. | -| Restart Lsn | Xlog file information on the replication node. | -| Dummy Standby | Replication node as a dummy standby. | - -### Replication stat - -**Table 15** Columns in the Replication stat report - -| Column | Description | -| :----------------------- | :---------------------------------------- | -| Thread Id | PID of the thread. | -| Usesys Id | User system ID. | -| Username | Username. | -| Application Name | Application name. | -| Client Addr | Client address. | -| Client Hostname | Client host name. | -| Client Port | Port of the client. | -| Backend Start | Start time of an application. | -| State | Log replication status. | -| Sender Sent Location | Location where the sender sends logs. | -| Receiver Write Location | Location where the receiver writes logs. | -| Receiver Flush Location | Location where the receiver flushes logs. | -| Receiver Replay Location | Location where the receiver replays logs. | -| Sync Priority | Synchronization priority. | -| Sync State | Synchronization status. | - -## Object stats - -Object stats contains three tables: User Tables stats, User index stats, and Bad lock stats. Columns in the tables are described as follows: - -### User Tables stats - -**Table 16** Columns in the User Tables stats report - -| Column | Description | -| :---------------- | :----------------------------------------------------------- | -| DB Name | Database name. | -| Schema | Schema name. | -| Relname | Relation name. | -| Seq Scan | Number of sequential scans initiated on this table. | -| Seq Tup Read | Number of live rows fetched by sequential scans. | -| Index Scan | Number of index scans initiated on the table. | -| Index Tup Fetch | Number of live rows fetched by index scans. | -| Tuple Insert | Number of rows inserted. | -| Tuple Update | Number of rows updated. | -| Tuple Delete | Number of rows deleted. | -| Tuple Hot Update | Number of rows HOT updated (with no separate index updated). | -| Live Tuple | Estimated number of live rows. | -| Dead Tuple | Estimated number of dead rows. | -| Last Vacuum | Last time at which this table was manually vacuumed (not counting **VACUUM FULL**). | -| Last Autovacuum | Last time at which this table was vacuumed by the autovacuum daemon. | -| Last Analyze | Last time at which this table was manually analyzed. | -| Last Autoanalyze | Last time at which this table was analyzed by the autovacuum daemon. | -| Vacuum Count | Number of times the table has been manually vacuumed (not counting **VACUUM FULL**). | -| Autovacuum Count | Number of times the table has been vacuumed by the autovacuum daemon. | -| Analyze Count | Number of times the table has been manually analyzed. | -| Autoanalyze Count | Number of times the table has been analyzed by the autovacuum daemon. | - -### User index stats - -**Table 17** Columns in the User index stats report - -| Column | Description | -| :---------------- | :----------------------------------------------------------- | -| DB Name | Database name. | -| Schema | Schema name. | -| Relname | Relation name. | -| Index Relname | Index name. | -| Index Scan | Number of index scans initiated on the index. | -| Index Tuple Read | Number of index entries returned by scans on the index. | -| Index Tuple Fetch | Number of live table rows fetched by simple index scans using the index. | - -### Bad lock stats - -**Table 18** Columns in the Bad lock stats report - -| Column | Description | -| :------------ | :--------------------- | -| DB Id | OID of the database. | -| Tablespace Id | Tablespace OID. | -| Relfilenode | File object ID. | -| Fork Number | File type. | -| Error Count | Number of failures. | -| First Time | First occurrence time. | -| Last Time | Last occurrence time. | - -## Configuration settings - -The following table describes columns in the Configuration settings report. - -**Table 19** Columns in the Configuration settings report - -| Column | Description | -| :------------ | :------------------------------------------------------ | -| Name | GUC name. | -| Abstract | GUC description. | -| Type | Data type. | -| Curent Value | Current value. | -| Min Value | Valid minimum value. | -| Max Value | Valid maximum value. | -| Category | GUC type. | -| Enum Values | All enumerated values. | -| Default Value | Default parameter value used upon the database startup. | -| Reset Value | Default parameter value used upon the database reset. | - -## SQL Detail - -The following table describes columns in the SQL Detail report. - -**Table 20** Columns in the SQL Detail report - -| Column | Description | -| :------------ | :---------------------------------------------------- | -| Unique SQL Id | ID of the normalized SQL statement. | -| User Name | Username. | -| Node Name | Node name. This column is not displayed in node mode. | -| SQL Text | Normalized SQL text. | +--- +title: Viewing WDRs +summary: Viewing WDRs +author: Guo Huan +date: 2022-04-23 +--- + +# Viewing WDRs + +## Database Stat + +The following table describes columns in the Database Stat report. + +**Table 1** Columns in the Database Stat report + +| Column | Description | +| :------------- | :----------------------------------------------------------- | +| DB Name | Database name. | +| Backends | Number of backends connected to this database. | +| Xact Commit | Number of transactions in this database that have been committed. | +| Xact Rollback | Number of transactions in this database that have been rolled back. | +| Blks Read | Number of disk blocks read in this database. | +| Blks Hit | Number of times that disk blocks have been found in the cache. | +| Tuple Returned | Number of rows sequentially scanned. | +| Tuple Fetched | Number of rows randomly scanned. | +| Tuple Inserted | Number of rows inserted by queries in this database. | +| Tuple Updated | Number of rows updated by queries in this database. | +| Tup Deleted | Number of rows deleted by queries in this database. | +| Conflicts | Number of queries canceled due to conflicts. | +| Temp Files | Number of temporary files created by queries in this database. | +| Temp Bytes | Total amount of data written to temporary files by queries in this database. | +| Deadlocks | Number of deadlocks detected in this database. | +| Blk Read Time | Time spent reading data file blocks by backends in this database (unit: ms). | +| Blk Write Time | Time spent writing into data file blocks by backends in this database (unit: ms). | +| Stats Reset | Time at which the current statistics were reset. | + +## Load Profile + +The following table lists metrics in the Load Profile report. + +**Table 2** Metrics in the Load Profile report + +| Metric | Description | +| :------------------------ | :--------------------------------------------------------- | +| DB Time(us) | Total elapsed time of a job. | +| CPU Time(us) | Total CPU time used for job running. | +| Redo size(blocks) | Size of the generated WAL (blocks). | +| Logical read (blocks) | Number of logical reads (blocks) on a table or an index. | +| Physical read (blocks) | Number of physical reads (blocks) on a table or an index. | +| Physical write (blocks) | Number of physical writes (blocks) on a table or an index. | +| Read IO requests | Number of times that a table or an index is read. | +| Write IO requests | Number of times that a table or an index is written. | +| Read IO (MB) | Table or index read size (MB). | +| Write IO (MB) | Table or index write size (MB). | +| Logons | Number of logins. | +| Executes (SQL) | Number of times the SQL statement is executed. | +| Rollbacks | Number of rolled-back transactions. | +| Transactions | Number of transactions. | +| SQL response time P95(us) | Response time of 95% SQL statements. | +| SQL response time P80(us) | Response time of 80% SQL statements. | + +## Instance Efficiency Percentages + +The following table lists metrics in the Instance Efficiency Percentages report. + +**Table 3** Metrics in the Instance Efficiency Percentages report + +| Metric | Description | +| :---------------- | :----------------------------------------------------------- | +| Buffer Hit % | Hit ratio of the buffer pool. | +| Effective CPU % | Ratio of the CPU time to the DB time. | +| WalWrite NoWait % | Ratio of the number of events that access the WAL buffer to the total number of wait events. | +| Soft Parse % | Ratio of the number of soft parsing times to the total number of parsing times. | +| Non-Parse CPU % | Ratio of the non-parse time to the total execution time. | + +## Top 10 Events by Total Wait Time + +The following table lists columns in the Top 10 Events by Total Wait Time report. + +**Table 4** Columns in the Top 10 Events by Total Wait Time report + +| Column | Description | +| :------------------ | :---------------------------------- | +| Event | Name of a wait event. | +| Waits | Number of wait times. | +| Total Wait Time(us) | Total wait time, in microseconds. | +| Avg Wait Time(us) | Average wait time, in microseconds. | +| Type | Wait event type. | + +## Wait Classes by Total Wait Time + +The following table lists columns in the Wait Classes by Total Wait Time report. + +**Table 5** Columns in the Wait Classes by Total Wait Time report + +| Column | Description | +| :------------------ | :----------------------------------------------------------- | +| Type | Wait events are classified as follows:
- STATUS。
- LWLOCK_EVENT。
- LOCK_EVENT。
- IO_EVENT。 | +| Waits | Number of wait times. | +| Total Wait Time(us) | Total wait time, in microseconds. | +| Avg Wait Time(us) | Average wait time, in microseconds. | + +## Host CPU + +The following table describes columns in the Host CPU report. + +**Table 6** Columns in the Host CPU report + +| Column | Description | +| :----------------- | :----------------------------------------------- | +| Cpus | Number of processors. | +| Cores | Number of CPU cores. | +| Sockets | Number of CPU sockets. | +| Load Average Begin | Load average value of the start snapshot. | +| Load Average End | Load average value of the end snapshot. | +| %User | Percentage of CPU time spent in the user mode. | +| %System | Percentage of CPU time spent in the kernel mode. | +| %WIO | Percentage of CPU time spent in wait I/O. | +| %Idle | Percentage of CPU idle time. | + +## IO Profile + +The following table lists metrics in the IO Profile report. + +**Table 7** Metrics in the IO Profile report + +| Metric | Description | +| :---------------- | :---------------------------------- | +| Database requests | Number of database I/O times. | +| Database (MB) | Database I/O data volume. | +| Database (blocks) | Number of database I/O data blocks. | +| Redo requests | Number of redo I/O times. | +| Redo (MB) | Redo I/O data volume. | + +## Memory Statistics + +The following table lists metrics in the Memory Statistics report. + +**Table 8** Columns in the Memory Statistics report + +| Metric | Description | +| :------------------ | :------------------------------------------------------ | +| shared_used_memory | Size of used shared memory (MB). | +| max_shared_memory | Maximum shared memory (MB). | +| process_used_memory | Memory used by processes (MB). | +| max_process_memory | Maximum memory that can be allocated to a process (MB). | + +## Time Model + +The following table describes metrics in the Time Model report. + +**Table 9** Metrics in the Time Model report + +| Metric | Description | +| :------------------ | :----------------------------------------------------------- | +| DB_TIME | Total end-to-end wall time consumed by all threads (unit: μs). | +| EXECUTION_TIME | Total time consumed on the executor (unit: μs). | +| PL_EXECUTION_TIME | Total time consumed for executing PL/SQL statements (unit: μs). | +| CPU_TIME | Total CPU time consumed by all threads (unit: μs). | +| PLAN_TIME | Total time consumed for generating an execution plan (unit: μs). | +| REWRITE_TIME | Total time consumed on query rewriting (unit: μs). | +| PL_COMPILATION_TIME | Total time consumed for SQL compilation (unit: μs). | +| PARSE_TIME | Total time consumed for parsing SQL statements (unit: μs). | +| NET_SEND_TIME | Total time consumed for sending data over network (unit: μs). | +| DATA_IO_TIME | Total time consumed for data read and write (unit: μs). | + +## SQL Statistics + +The following table describes columns in the SQL Statistics report. + +**Table 10** Columns in the SQL Statistics report + +| Column | Description | +| :-------------------- | :----------------------------------------------------------- | +| Unique SQL Id | ID of the normalized SQL statement. | +| Node Name | Node name. | +| User Name | Username. | +| Tuples Read | Number of tuples that are read. | +| Calls | Number of calls. | +| Min Elapse Time(us) | Minimum execution time (unit: us). | +| Max Elapse Time(us) | Maximum execution time (unit: us). | +| Total Elapse Time(us) | Total execution time (unit: us). | +| Avg Elapse Time(us) | Average execution time (unit: us). | +| Returned Rows | Number of rows returned by SELECT. | +| Tuples Affected | Number of rows affected by INSERT, UPDATE, and DELETE. | +| Logical Read | Number of logical reads on the buffer. | +| Physical Read | Number of physical reads on the buffer. | +| CPU Time(us) | CPU time (unit: us). | +| Data IO Time(us) | Time spent on I/O (unit: us). | +| Sort Count | Number of sorting execution times. | +| Sort Time(us) | Sorting execution time (unit: us). | +| Sort Mem Used(KB) | Size of work memory used during sorting (unit: KB). | +| Sort Spill Count | Number of file writes when data is flushed to disks during sorting. | +| Sort Spill Size(KB) | File size used when data is flushed to disks during sorting (unit: KB). | +| Hash Count | Number of hashing execution times. | +| Hash Time(us) | Hashing execution time (unit: us). | +| Hash Mem Used(KB) | Size of work memory used during hashing (unit: KB). | +| Hash Spill Count | Number of file writes when data is flushed to disks during hashing. | +| Hash Spill Size(KB) | File size used when data is flushed to disks during hashing (unit: KB). | +| SQL Text | Normalized SQL character string. | + +## Wait Events + +The following table describes columns in the Wait Events report. + +**Table 11** Columns in the Wait Events report + +| Column | Description | +| :------------------- | :----------------------------------------------------------- | +| Type | Wait events are classified as follows:
- STATUS
- LWLOCK_EVENT
- LOCK_EVENT
- IO_EVENT | +| Event | Name of a wait event. | +| Total Wait Time (us) | Total wait time (unit: us). | +| Waits | Total number of wait times. | +| Failed Waits | Number of wait failures. | +| Avg Wait Time (us) | Average wait time (unit: us). | +| Max Wait Time (us) | Maximum wait time (unit: us). | + +## Cache IO Stats + +Cache IO Stats contains two tables: User table and User index. The columns in the tables are described as follows. + +### User table IO activity ordered by heap blks hit ratio + +**Table 12** Columns in the User table IO activity ordered by heap blks hit ratio report + +| Column | Description | +| :------------------- | :----------------------------------------------------------- | +| DB Name | Database name. | +| Schema Name | Schema name. | +| Table Name | Table name. | +| %Heap Blks Hit Ratio | Buffer pool hit ratio of the table. | +| Heap Blks Read | Number of disk blocks read from the table. | +| Heap Blks Hit | Number of cache hits in the table. | +| Idx Blks Read | Number of disk blocks read from all indexes on the table. | +| Idx Blks Hit | Number of cache hits in the table . | +| Toast Blks Read | Number of disk blocks read from the TOAST table (if any) in the table. | +| Toast Blks Hit | Number of buffer hits in the TOAST table (if any) in the table. | +| Tidx Blks Read | Number of disk blocks read from the TOAST table index (if any) in the table. | +| Tidx Blks Hit | Number of buffer hits in the TOAST table index (if any) in the table. | + +### User index IO activity ordered by idx blks hit ratio + +**Table 13** Columns in the User index IO activity ordered by idx blks hit ratio report + +| Column | Description | +| :------------------ | :-------------------------------------------------------- | +| DB Name | Database name. | +| Schema Name | Schema name. | +| Table Name | Table name. | +| Index Name | Index name. | +| %Idx Blks Hit Ratio | Index hit ratio. | +| Idx Blks Read | Number of disk blocks read from all indexes on the table. | +| Idx Blks Hit | Number of cache hits in the table. | + +## Utility status + +**Utility status** contains two tables: **Replication slot** and **Replication stat**. Columns in the tables are described as follows: + +### Replication slot + +**Table 14** Columns in the Replication slot report + +| Column | Description | +| :------------ | :--------------------------------------------- | +| Slot Name | Replication node name. | +| Slot Type | Type of the replication node. | +| DB Name | Name of the database on the replication node. | +| Active | Replication node status. | +| Xmin | Transaction ID of the replication node. | +| Restart Lsn | Xlog file information on the replication node. | +| Dummy Standby | Replication node as a dummy standby. | + +### Replication stat + +**Table 15** Columns in the Replication stat report + +| Column | Description | +| :----------------------- | :---------------------------------------- | +| Thread Id | PID of the thread. | +| Usesys Id | User system ID. | +| Username | Username. | +| Application Name | Application name. | +| Client Addr | Client address. | +| Client Hostname | Client host name. | +| Client Port | Port of the client. | +| Backend Start | Start time of an application. | +| State | Log replication status. | +| Sender Sent Location | Location where the sender sends logs. | +| Receiver Write Location | Location where the receiver writes logs. | +| Receiver Flush Location | Location where the receiver flushes logs. | +| Receiver Replay Location | Location where the receiver replays logs. | +| Sync Priority | Synchronization priority. | +| Sync State | Synchronization status. | + +## Object stats + +Object stats contains three tables: User Tables stats, User index stats, and Bad lock stats. Columns in the tables are described as follows: + +### User Tables stats + +**Table 16** Columns in the User Tables stats report + +| Column | Description | +| :---------------- | :----------------------------------------------------------- | +| DB Name | Database name. | +| Schema | Schema name. | +| Relname | Relation name. | +| Seq Scan | Number of sequential scans initiated on this table. | +| Seq Tup Read | Number of live rows fetched by sequential scans. | +| Index Scan | Number of index scans initiated on the table. | +| Index Tup Fetch | Number of live rows fetched by index scans. | +| Tuple Insert | Number of rows inserted. | +| Tuple Update | Number of rows updated. | +| Tuple Delete | Number of rows deleted. | +| Tuple Hot Update | Number of rows HOT updated (with no separate index updated). | +| Live Tuple | Estimated number of live rows. | +| Dead Tuple | Estimated number of dead rows. | +| Last Vacuum | Last time at which this table was manually vacuumed (not counting **VACUUM FULL**). | +| Last Autovacuum | Last time at which this table was vacuumed by the autovacuum daemon. | +| Last Analyze | Last time at which this table was manually analyzed. | +| Last Autoanalyze | Last time at which this table was analyzed by the autovacuum daemon. | +| Vacuum Count | Number of times the table has been manually vacuumed (not counting **VACUUM FULL**). | +| Autovacuum Count | Number of times the table has been vacuumed by the autovacuum daemon. | +| Analyze Count | Number of times the table has been manually analyzed. | +| Autoanalyze Count | Number of times the table has been analyzed by the autovacuum daemon. | + +### User index stats + +**Table 17** Columns in the User index stats report + +| Column | Description | +| :---------------- | :----------------------------------------------------------- | +| DB Name | Database name. | +| Schema | Schema name. | +| Relname | Relation name. | +| Index Relname | Index name. | +| Index Scan | Number of index scans initiated on the index. | +| Index Tuple Read | Number of index entries returned by scans on the index. | +| Index Tuple Fetch | Number of live table rows fetched by simple index scans using the index. | + +### Bad lock stats + +**Table 18** Columns in the Bad lock stats report + +| Column | Description | +| :------------ | :--------------------- | +| DB Id | OID of the database. | +| Tablespace Id | Tablespace OID. | +| Relfilenode | File object ID. | +| Fork Number | File type. | +| Error Count | Number of failures. | +| First Time | First occurrence time. | +| Last Time | Last occurrence time. | + +## Configuration settings + +The following table describes columns in the Configuration settings report. + +**Table 19** Columns in the Configuration settings report + +| Column | Description | +| :------------ | :------------------------------------------------------ | +| Name | GUC name. | +| Abstract | GUC description. | +| Type | Data type. | +| Curent Value | Current value. | +| Min Value | Valid minimum value. | +| Max Value | Valid maximum value. | +| Category | GUC type. | +| Enum Values | All enumerated values. | +| Default Value | Default parameter value used upon the database startup. | +| Reset Value | Default parameter value used upon the database reset. | + +## SQL Detail + +The following table describes columns in the SQL Detail report. + +**Table 20** Columns in the SQL Detail report + +| Column | Description | +| :------------ | :---------------------------------------------------- | +| Unique SQL Id | ID of the normalized SQL statement. | +| User Name | Username. | +| Node Name | Node name. This column is not displayed in node mode. | +| SQL Text | Normalized SQL text. | diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/wdr/wdr-snapshot-schema.md b/product/en/docs-mogdb/v5.0/performance-tuning/wdr/wdr-snapshot-schema.md index b8679403..1a494118 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/wdr/wdr-snapshot-schema.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/wdr/wdr-snapshot-schema.md @@ -1,273 +1,273 @@ ---- -title: WDR Snapshot Schema -summary: WDR Snapshot Schema -author: Zhang Cuiping -date: 2021-03-11 ---- - -# WDR Snapshot Schema - -After the WDR snapshot function is enabled (**enable_wdr_snapshot** is set to **on**), schema **snapshot** is created in user tablespace **pg_default** in database **postgres** to flush WDR snapshot data. By default, the initial user or the **monadmin** user can access the snapshot schema. - -You can set the parameter **wdr_snapshot_retention_days** to automatically manage the snapshot lifecycle. - -
- -## Original Information Table of WDR Snapshots - -### SNAPSHOT.SNAPSHOT - -**SNAPSHOT** records the index information, start time, and end time of WDR snapshots stored in the current system. The results can only be queried in the system library, but not in the user library. - -**Table 1** SNAPSHOT attributes - -| Name | Type | Description | Example | -| :---------- | :-------- | :--------------------------- | :---------------------------- | -| snapshot_id | bigint | WDR snapshot ID | 1 | -| start_ts | timestamp | Start time of a WDR snapshot | 2019-12-28 17:11:27.423742+08 | -| end_ts | timestamp | End time of a WDR snapshot | 2019-12-28 17:11:43.67726+08 | - -
- -### SNAPSHOT.TABLES_SNAP_TIMESTAMP - -**TABLES_SNAP_TIMESTAMP** records the start time and end time of data collection, as well as corresponding databases, and table objects for all stored WDR snapshots. - -**Table 2** TABLES_SNAP_TIMESTAMP attributes - -| Name | Type | Description | Example | -| :---------- | :-------- | :--------------------------------------- | :---------------------------- | -| snapshot_id | bigint | WDR snapshot ID | 1 | -| db_name | text | Database corresponding to a WDR snapshot | tpcc1000 | -| tablename | text | Table corresponding to a WDR snapshot | snap_xc_statio_all_indexes | -| start_ts | timestamp | Start time of a WDR snapshot | 2019-12-28 17:11:27.425849+08 | -| end_ts | timestamp | End time of a WDR snapshot | 2019-12-28 17:11:27.707398+08 | - -
- -### SNAP_SEQ - -**SNAP_SEQ** is an ascending sequence, which provides IDs for WDR snapshots. - -
- -## WDR Snapshot Data Table - -The naming rule of a WDR snapshot data table is **snap_{Source data table}**. - -WDR snapshot data tables come from all views in **DBE_PERF** Schema. - -All WDR Snapshot data tables can be queried by running the following command. - -```sql -select * from pg_catalog.pg_tables where schemaname='snapshot'; -``` - -The following table lists all WDR Snapshot data tables and related introduction pages for your reference. - -| schemaname | tablename | -| ---------- | ------------------------------------------------------------ | -| snapshot | [tables_snap_timestamp](#tables_snap_timestamp) | -| snapshot | [snapshot](#SNAPSHOT) | -| snapshot | [snap_global_os_runtime](../../reference-guide/schema/DBE_PERF/os/GLOBAL_OS_RUNTIME.md) | -| snapshot | [snap_global_os_threads](../../reference-guide/schema/DBE_PERF/os/GLOBAL_OS_THREADS.md) | -| snapshot | [snap_global_instance_time](../../reference-guide/schema/DBE_PERF/instance/GLOBAL_INSTANCE_TIME.md) | -| snapshot | [snap_summary_workload_sql_count](../../reference-guide/schema/DBE_PERF/workload/SUMMARY_WORKLOAD_SQL_COUNT.md) | -| snapshot | [snap_summary_workload_sql_elapse_time](../../reference-guide/schema/DBE_PERF/workload/SUMMARY_WORKLOAD_SQL_ELAPSE_TIME.md) | -| snapshot | [snap_global_workload_transaction](../../reference-guide/schema/DBE_PERF/workload/GLOBAL_WORKLOAD_TRANSACTION.md) | -| snapshot | [snap_summary_workload_transaction](../../reference-guide/schema/DBE_PERF/workload/SUMMARY_WORKLOAD_TRANSACTION.md) | -| snapshot | [snap_global_thread_wait_status](../../reference-guide/schema/DBE_PERF/session-thread/GLOBAL_THREAD_WAIT_STATUS.md) | -| snapshot | [snap_global_memory_node_detail](../../reference-guide/schema/DBE_PERF/memory/GLOBAL_MEMORY_NODE_DETAIL.md) | -| snapshot | [snap_global_shared_memory_detail](../../reference-guide/schema/DBE_PERF/memory/GLOBAL_SHARED_MEMORY_DETAIL.md) | -| snapshot | [snap_global_stat_db_cu](../../reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STAT_DB_CU.md) | -| snapshot | [snap_global_stat_database](../../reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_DATABASE.md) | -| snapshot | [snap_summary_stat_database](../../reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_DATABASE.md) | -| snapshot | [snap_global_stat_database_conflicts](../../reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_DATABASE_CONFLICTS.md) | -| snapshot | [snap_summary_stat_database_conflicts](../../reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_DATABASE_CONFLICTS.md) | -| snapshot | [snap_global_stat_bad_block](../../reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_BAD_BLOCK.md) | -| snapshot | [snap_summary_stat_bad_block](../../reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_BAD_BLOCK.md) | -| snapshot | [snap_global_file_redo_iostat](../../reference-guide/schema/DBE_PERF/file/GLOBAL_FILE_REDO_IOSTAT.md) | -| snapshot | [snap_summary_file_redo_iostat](../../reference-guide/schema/DBE_PERF/file/SUMMARY_FILE_REDO_IOSTAT.md) | -| snapshot | [snap_global_rel_iostat](../../reference-guide/schema/DBE_PERF/file/GLOBAL_REL_IOSTAT.md) | -| snapshot | [snap_summary_rel_iostat](../../reference-guide/schema/DBE_PERF/file/SUMMARY_REL_IOSTAT.md) | -| snapshot | [snap_global_file_iostat](../../reference-guide/schema/DBE_PERF/file/GLOBAL_FILE_IOSTAT.md) | -| snapshot | [snap_summary_file_iostat](../../reference-guide/schema/DBE_PERF/file/SUMMARY_FILE_IOSTAT.md) | -| snapshot | [snap_global_replication_slots](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_REPLICATION_SLOTS.md) | -| snapshot | [snap_global_bgwriter_stat](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_BGWRITER_STAT.md) | -| snapshot | [snap_global_replication_stat](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_REPLICATION_STAT.md) | -| snapshot | [snap_global_transactions_running_xacts](../../reference-guide/schema/DBE_PERF/transaction/GLOBAL_TRANSACTIONS_RUNNING_XACTS.md) | -| snapshot | [snap_summary_transactions_running_xacts](../../reference-guide/schema/DBE_PERF/transaction/SUMMARY_TRANSACTIONS_RUNNING_XACTS.md) | -| snapshot | [snap_global_transactions_prepared_xacts](../../reference-guide/schema/DBE_PERF/transaction/GLOBAL_TRANSACTIONS_PREPARED_XACTS.md) | -| snapshot | [snap_summary_transactions_prepared_xacts](../../reference-guide/schema/DBE_PERF/transaction/SUMMARY_TRANSACTIONS_PREPARED_XACTS.md) | -| snapshot | [snap_summary_statement](../../reference-guide/schema/DBE_PERF/query/SUMMARY_STATEMENT.md) | -| snapshot | [snap_global_statement_count](../../reference-guide/schema/DBE_PERF/query/GLOBAL_STATEMENT_COUNT.md) | -| snapshot | [snap_summary_statement_count](../../reference-guide/schema/DBE_PERF/query/SUMMARY_STATEMENT_COUNT.md) | -| snapshot | [snap_global_config_settings](../../reference-guide/schema/DBE_PERF/configuration/GLOBAL_CONFIG_SETTINGS.md) | -| snapshot | [snap_global_wait_events](../../reference-guide/schema/DBE_PERF/wait-events/GLOBAL_WAIT_EVENTS.md) | -| snapshot | [snap_summary_user_login](../../reference-guide/schema/DBE_PERF/utility/SUMMARY_USER_LOGIN.md) | -| snapshot | [snap_global_ckpt_status](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_CKPT_STATUS.md) | -| snapshot | [snap_global_double_write_status](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_DOUBLE_WRITE_STATUS.md) | -| snapshot | [snap_global_pagewriter_status](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_PAGEWRITER_STATUS.md) | -| snapshot | [snap_global_redo_status](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_REDO_STATUS.md) | -| snapshot | [snap_global_rto_status](../../reference-guide/schema/DBE_PERF/rto/global_rto_status.md) | -| snapshot | [snap_global_recovery_status](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_RECOVERY_STATUS.md) | -| snapshot | [snap_global_threadpool_status](../../reference-guide/schema/DBE_PERF/session-thread/GLOBAL_THREADPOOL_STATUS.md) | -| snapshot | [snap_statement_responsetime_percentile](../../reference-guide/schema/DBE_PERF/query/STATEMENT_RESPONSETIME_PERCENTILE.md) | -| snapshot | [snap_global_statio_all_indexes](../../reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_ALL_INDEXES.md) | -| snapshot | [snap_summary_statio_all_indexes](../../reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_ALL_INDEXES.md) | -| snapshot | [snap_global_statio_all_sequences](../../reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_ALL_SEQUENCES.md) | -| snapshot | [snap_summary_statio_all_sequences](../../reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_ALL_SEQUENCES.md) | -| snapshot | [snap_global_statio_all_tables](../../reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_ALL_TABLES.md) | -| snapshot | [snap_summary_statio_all_tables](../../reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_ALL_TABLES.md) | -| snapshot | [snap_global_stat_all_indexes](../../reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_ALL_INDEXES.md) | -| snapshot | [snap_summary_stat_all_indexes](../../reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_ALL_INDEXES.md) | -| snapshot | [snap_summary_stat_user_functions](../../reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_USER_FUNCTIONS.md) | -| snapshot | [snap_global_stat_user_functions](../../reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_USER_FUNCTIONS.md) | -| snapshot | [snap_global_stat_all_tables](../../reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_ALL_TABLES.md) | -| snapshot | [snap_summary_stat_all_tables](../../reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_ALL_TABLES.md) | -| snapshot | [snap_class_vital_info](../../reference-guide/schema/DBE_PERF/utility/CLASS_VITAL_INFO.md) | -| snapshot | [snap_global_record_reset_time](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_RECORD_RESET_TIME.md) | - -
- -## Performance Report Generated Based on WDR Snapshot - -A performance report is generated by summarizing and collecting statistics based on WDR snapshot data tables. - -**Prerequisites** - -A report can be generated after the WDR snapshot function is enabled (that is, **enable_wdr_snapshot** is set to **on**) and the number of snapshots is greater than or equal to 2. - -**Procedure** - -1. Run the following command to create a report file: - - ``` - touch /home/om/wdrTestNode.html - ``` - -2. Run the following command to connect the postgres database. - - ```bash - gsql -d postgres -p -r - ``` - -3. Run the following command to query the generated snapshot and obtain **snapshot_id**: - - ```sql - select * from snapshot.snapshot; - ``` - -4. (Optional) Run the following command on the CCN to manually create a snapshot. If only one snapshot exists in the database or you want to view the monitoring data of the database in the current period, manually create a snapshot. This command is only available to the **sysadmin** user. - - ```sql - select create_wdr_snapshot(); - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note:** - > Run the **cm_ctl query -Cdvi** command. The command output returned in the **Central Coordinator State** part is the CCN information. - -5. Run the following commands to generate a WDR in HTML format on the local PC: - - a. Run the following commands to set the report format. **\a** indicates that table row and column symbols are not displayed. **\t** indicates that column names are not displayed. **\o** specifies an output file. - - ```bash - gsql> \a - gsql> \t - gsql> \o /home/om/wdrTestNode.html - ``` - - b. Run the following command to generate a WDR in HTML format: - - ```sql - gsql> select generate_wdr_report(begin_snap_id Oid, end_snap_id Oid, int report_type, int report_scope, int node_name ); - ``` - - Example 1: Generate a cluster-level report. - - ``` - select generate_wdr_report(1, 2, 'all', 'cluster',null); - ``` - - Example 2: Generate a report for a node. - - ``` - select generate_wdr_report(1, 2, 'all', 'node', pgxc_node_str()::cstring); - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Currently, the name of the MogDB node is fixed to **dn_6001_6002_6003**. You can also replace it with the actual node name. - - **Table 3** Parameters of the generate_wdr_report function - - | Parameter | Description | Value Range | - | :----------------------------- | :--------------------------- | :--------------------------------- | - | begin_snap_id | ID of a snapshot when a query starts, which is specified by **snapshot_id** in the **snapshot.snaoshot** table. | - | - | end_snap_id | ID of a snapshot when a query ends. By default, the value of **end_snap_id** is greater than that of **begin_snap_id table** (**snapshot_id** in the **snapshot.snapshot** table). | - | - | report_type | Type of the generated report. The value can be **summary**, **detail**, or **all**. | **summary**: Summary data
**detail**: Detailed data
**all**: summary data and detailed data | - | report_scope | Range of the generated report. The value can be **cluster** or **node**. | **cluster**: database-level information
**node**: node-level information | - | node_name | When **report\_scope** is set to **node**, set this parameter to the name of the corresponding node. (You can run the `select * from pg_node_env;` command to query the node name.)
If **report\_scope** is set to **cluster**, this parameter can be omitted, left blank, empty or set to **NULL**. | **node**: a node name in MogDB
**cluster**: This value is omitted, left blank,empty or set to **NULL**. | - - c. Run the following command to disable the output options and format the output: - - ```bash - \o \a \t - ``` - -6. View the WDR in **/home/om/** as required. - -**Table 4** WDR report - -| Item | Description | -| :---------------------------------------------------- | :----------------------------------------------------------- | -| Database Stat (database scope) | Performance statistics information in database dimensions, including transaction, write/read, row activity, write conflict, deadlock, and so on | -| Load Profile (database scope) | Performance statistics information in database dimensions, including CPU time, DB time, logical read/physical read, IO performance, login/logout, workload intensity, and workload performance, and so on | -| Instance Efficiency Percentages (database/node scope) | Buffer Hit (buffer hit rate), Effective CPU (CPU usage), WalWrite NoWait (success rate of obtaining Wal Buffer), Soft Parse (soft parsing rate), and Non-parse CPU (percentage of CPU time spent in non-parsing activities) at the database or node level | -| Top 10 Events by Total Wait Time (node scope) | Event that consumes the most time | -| Wait Classes by Total Wait Time (node scope) | Class of wait events that consume the most time | -| Host CPU (node scope) | CPU usage of the host | -| Memory Statistics (node scope) | memory statistics in the kernel | -| Object stats (node scope) | Performance statistics information in the table and index dimensions | -| Database Configuration (node scope) | Node configuration | -| SQL Statistics (node scope) | Performance statistics of a SQL statement in all dimensions, including end-to-end time, row activity, cache hit rate, CPU usage, time consumption segmentation | -| SQL Detail (node scope) | SQL statement details | - -**Examples** - -```sql ---Create a report file. -touch /home/om/wdrTestNode.html - ---Connect to the database. -gsql -d postgres -p [*Port number*] -r - ---Query the snapshots that have been generated. -MogDB=# select * from snapshot.snapshot; - snapshot_id | start_ts | end_ts --------------+-------------------------------+------------------------------- - 1 | 2020-09-07 10:20:36.763244+08 | 2020-09-07 10:20:42.166511+08 - 2 | 2020-09-07 10:21:13.416352+08 | 2020-09-07 10:21:19.470911+08 -(2 rows) - - ---Generate the formatted performance report **wdrTestNode.html**. -MogDB=# \a \t \o /home/om/wdrTestNode.html -Output format is unaligned. -Showing only tuples. - ---Write data into the performance report **wdrTestNode.html**. -MogDB=# select generate_wdr_report(1, 2, 'all', 'node', 'dn_6001_6002_6003'); - ---Close the performance report **wdrTestNode.html**. -MogDB=# \o - ---Generate the formatted performance report **wdrTestCluster.html**. -MogDB=# \o /home/om/wdrTestCluster.html - ---Write data into the performance report **wdrTestCluster.html**. -MogDB=# select generate_wdr_report(1, 2, 'all', 'cluster'); - ---Close the performance report **wdrTestCluster.html**. -MogDB=# \o \a \t -Output format is aligned. -Tuples only is off. +--- +title: WDR Snapshot Schema +summary: WDR Snapshot Schema +author: Zhang Cuiping +date: 2021-03-11 +--- + +# WDR Snapshot Schema + +After the WDR snapshot function is enabled (**enable_wdr_snapshot** is set to **on**), schema **snapshot** is created in user tablespace **pg_default** in database **postgres** to flush WDR snapshot data. By default, the initial user or the **monadmin** user can access the snapshot schema. + +You can set the parameter **wdr_snapshot_retention_days** to automatically manage the snapshot lifecycle. + +
+ +## Original Information Table of WDR Snapshots + +### SNAPSHOT.SNAPSHOT + +**SNAPSHOT** records the index information, start time, and end time of WDR snapshots stored in the current system. The results can only be queried in the system library, but not in the user library. + +**Table 1** SNAPSHOT attributes + +| Name | Type | Description | Example | +| :---------- | :-------- | :--------------------------- | :---------------------------- | +| snapshot_id | bigint | WDR snapshot ID | 1 | +| start_ts | timestamp | Start time of a WDR snapshot | 2019-12-28 17:11:27.423742+08 | +| end_ts | timestamp | End time of a WDR snapshot | 2019-12-28 17:11:43.67726+08 | + +
+ +### SNAPSHOT.TABLES_SNAP_TIMESTAMP + +**TABLES_SNAP_TIMESTAMP** records the start time and end time of data collection, as well as corresponding databases, and table objects for all stored WDR snapshots. + +**Table 2** TABLES_SNAP_TIMESTAMP attributes + +| Name | Type | Description | Example | +| :---------- | :-------- | :--------------------------------------- | :---------------------------- | +| snapshot_id | bigint | WDR snapshot ID | 1 | +| db_name | text | Database corresponding to a WDR snapshot | tpcc1000 | +| tablename | text | Table corresponding to a WDR snapshot | snap_xc_statio_all_indexes | +| start_ts | timestamp | Start time of a WDR snapshot | 2019-12-28 17:11:27.425849+08 | +| end_ts | timestamp | End time of a WDR snapshot | 2019-12-28 17:11:27.707398+08 | + +
+ +### SNAP_SEQ + +**SNAP_SEQ** is an ascending sequence, which provides IDs for WDR snapshots. + +
+ +## WDR Snapshot Data Table + +The naming rule of a WDR snapshot data table is **snap_{Source data table}**. + +WDR snapshot data tables come from all views in **DBE_PERF** Schema. + +All WDR Snapshot data tables can be queried by running the following command. + +```sql +select * from pg_catalog.pg_tables where schemaname='snapshot'; +``` + +The following table lists all WDR Snapshot data tables and related introduction pages for your reference. + +| schemaname | tablename | +| ---------- | ------------------------------------------------------------ | +| snapshot | [tables_snap_timestamp](#tables_snap_timestamp) | +| snapshot | [snapshot](#SNAPSHOT) | +| snapshot | [snap_global_os_runtime](../../reference-guide/schema/DBE_PERF/os/GLOBAL_OS_RUNTIME.md) | +| snapshot | [snap_global_os_threads](../../reference-guide/schema/DBE_PERF/os/GLOBAL_OS_THREADS.md) | +| snapshot | [snap_global_instance_time](../../reference-guide/schema/DBE_PERF/instance/GLOBAL_INSTANCE_TIME.md) | +| snapshot | [snap_summary_workload_sql_count](../../reference-guide/schema/DBE_PERF/workload/SUMMARY_WORKLOAD_SQL_COUNT.md) | +| snapshot | [snap_summary_workload_sql_elapse_time](../../reference-guide/schema/DBE_PERF/workload/SUMMARY_WORKLOAD_SQL_ELAPSE_TIME.md) | +| snapshot | [snap_global_workload_transaction](../../reference-guide/schema/DBE_PERF/workload/GLOBAL_WORKLOAD_TRANSACTION.md) | +| snapshot | [snap_summary_workload_transaction](../../reference-guide/schema/DBE_PERF/workload/SUMMARY_WORKLOAD_TRANSACTION.md) | +| snapshot | [snap_global_thread_wait_status](../../reference-guide/schema/DBE_PERF/session-thread/GLOBAL_THREAD_WAIT_STATUS.md) | +| snapshot | [snap_global_memory_node_detail](../../reference-guide/schema/DBE_PERF/memory/GLOBAL_MEMORY_NODE_DETAIL.md) | +| snapshot | [snap_global_shared_memory_detail](../../reference-guide/schema/DBE_PERF/memory/GLOBAL_SHARED_MEMORY_DETAIL.md) | +| snapshot | [snap_global_stat_db_cu](../../reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STAT_DB_CU.md) | +| snapshot | [snap_global_stat_database](../../reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_DATABASE.md) | +| snapshot | [snap_summary_stat_database](../../reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_DATABASE.md) | +| snapshot | [snap_global_stat_database_conflicts](../../reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_DATABASE_CONFLICTS.md) | +| snapshot | [snap_summary_stat_database_conflicts](../../reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_DATABASE_CONFLICTS.md) | +| snapshot | [snap_global_stat_bad_block](../../reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_BAD_BLOCK.md) | +| snapshot | [snap_summary_stat_bad_block](../../reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_BAD_BLOCK.md) | +| snapshot | [snap_global_file_redo_iostat](../../reference-guide/schema/DBE_PERF/file/GLOBAL_FILE_REDO_IOSTAT.md) | +| snapshot | [snap_summary_file_redo_iostat](../../reference-guide/schema/DBE_PERF/file/SUMMARY_FILE_REDO_IOSTAT.md) | +| snapshot | [snap_global_rel_iostat](../../reference-guide/schema/DBE_PERF/file/GLOBAL_REL_IOSTAT.md) | +| snapshot | [snap_summary_rel_iostat](../../reference-guide/schema/DBE_PERF/file/SUMMARY_REL_IOSTAT.md) | +| snapshot | [snap_global_file_iostat](../../reference-guide/schema/DBE_PERF/file/GLOBAL_FILE_IOSTAT.md) | +| snapshot | [snap_summary_file_iostat](../../reference-guide/schema/DBE_PERF/file/SUMMARY_FILE_IOSTAT.md) | +| snapshot | [snap_global_replication_slots](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_REPLICATION_SLOTS.md) | +| snapshot | [snap_global_bgwriter_stat](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_BGWRITER_STAT.md) | +| snapshot | [snap_global_replication_stat](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_REPLICATION_STAT.md) | +| snapshot | [snap_global_transactions_running_xacts](../../reference-guide/schema/DBE_PERF/transaction/GLOBAL_TRANSACTIONS_RUNNING_XACTS.md) | +| snapshot | [snap_summary_transactions_running_xacts](../../reference-guide/schema/DBE_PERF/transaction/SUMMARY_TRANSACTIONS_RUNNING_XACTS.md) | +| snapshot | [snap_global_transactions_prepared_xacts](../../reference-guide/schema/DBE_PERF/transaction/GLOBAL_TRANSACTIONS_PREPARED_XACTS.md) | +| snapshot | [snap_summary_transactions_prepared_xacts](../../reference-guide/schema/DBE_PERF/transaction/SUMMARY_TRANSACTIONS_PREPARED_XACTS.md) | +| snapshot | [snap_summary_statement](../../reference-guide/schema/DBE_PERF/query/SUMMARY_STATEMENT.md) | +| snapshot | [snap_global_statement_count](../../reference-guide/schema/DBE_PERF/query/GLOBAL_STATEMENT_COUNT.md) | +| snapshot | [snap_summary_statement_count](../../reference-guide/schema/DBE_PERF/query/SUMMARY_STATEMENT_COUNT.md) | +| snapshot | [snap_global_config_settings](../../reference-guide/schema/DBE_PERF/configuration/GLOBAL_CONFIG_SETTINGS.md) | +| snapshot | [snap_global_wait_events](../../reference-guide/schema/DBE_PERF/wait-events/GLOBAL_WAIT_EVENTS.md) | +| snapshot | [snap_summary_user_login](../../reference-guide/schema/DBE_PERF/utility/SUMMARY_USER_LOGIN.md) | +| snapshot | [snap_global_ckpt_status](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_CKPT_STATUS.md) | +| snapshot | [snap_global_double_write_status](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_DOUBLE_WRITE_STATUS.md) | +| snapshot | [snap_global_pagewriter_status](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_PAGEWRITER_STATUS.md) | +| snapshot | [snap_global_redo_status](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_REDO_STATUS.md) | +| snapshot | [snap_global_rto_status](../../reference-guide/schema/DBE_PERF/rto/global_rto_status.md) | +| snapshot | [snap_global_recovery_status](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_RECOVERY_STATUS.md) | +| snapshot | [snap_global_threadpool_status](../../reference-guide/schema/DBE_PERF/session-thread/GLOBAL_THREADPOOL_STATUS.md) | +| snapshot | [snap_statement_responsetime_percentile](../../reference-guide/schema/DBE_PERF/query/STATEMENT_RESPONSETIME_PERCENTILE.md) | +| snapshot | [snap_global_statio_all_indexes](../../reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_ALL_INDEXES.md) | +| snapshot | [snap_summary_statio_all_indexes](../../reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_ALL_INDEXES.md) | +| snapshot | [snap_global_statio_all_sequences](../../reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_ALL_SEQUENCES.md) | +| snapshot | [snap_summary_statio_all_sequences](../../reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_ALL_SEQUENCES.md) | +| snapshot | [snap_global_statio_all_tables](../../reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_ALL_TABLES.md) | +| snapshot | [snap_summary_statio_all_tables](../../reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_ALL_TABLES.md) | +| snapshot | [snap_global_stat_all_indexes](../../reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_ALL_INDEXES.md) | +| snapshot | [snap_summary_stat_all_indexes](../../reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_ALL_INDEXES.md) | +| snapshot | [snap_summary_stat_user_functions](../../reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_USER_FUNCTIONS.md) | +| snapshot | [snap_global_stat_user_functions](../../reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_USER_FUNCTIONS.md) | +| snapshot | [snap_global_stat_all_tables](../../reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_ALL_TABLES.md) | +| snapshot | [snap_summary_stat_all_tables](../../reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_ALL_TABLES.md) | +| snapshot | [snap_class_vital_info](../../reference-guide/schema/DBE_PERF/utility/CLASS_VITAL_INFO.md) | +| snapshot | [snap_global_record_reset_time](../../reference-guide/schema/DBE_PERF/utility/GLOBAL_RECORD_RESET_TIME.md) | + +
+ +## Performance Report Generated Based on WDR Snapshot + +A performance report is generated by summarizing and collecting statistics based on WDR snapshot data tables. + +**Prerequisites** + +A report can be generated after the WDR snapshot function is enabled (that is, **enable_wdr_snapshot** is set to **on**) and the number of snapshots is greater than or equal to 2. + +**Procedure** + +1. Run the following command to create a report file: + + ``` + touch /home/om/wdrTestNode.html + ``` + +2. Run the following command to connect the postgres database. + + ```bash + gsql -d postgres -p -r + ``` + +3. Run the following command to query the generated snapshot and obtain **snapshot_id**: + + ```sql + select * from snapshot.snapshot; + ``` + +4. (Optional) Run the following command on the CCN to manually create a snapshot. If only one snapshot exists in the database or you want to view the monitoring data of the database in the current period, manually create a snapshot. This command is only available to the **sysadmin** user. + + ```sql + select create_wdr_snapshot(); + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note:** + > Run the **cm_ctl query -Cdvi** command. The command output returned in the **Central Coordinator State** part is the CCN information. + +5. Run the following commands to generate a WDR in HTML format on the local PC: + + a. Run the following commands to set the report format. **\a** indicates that table row and column symbols are not displayed. **\t** indicates that column names are not displayed. **\o** specifies an output file. + + ```bash + gsql> \a + gsql> \t + gsql> \o /home/om/wdrTestNode.html + ``` + + b. Run the following command to generate a WDR in HTML format: + + ```sql + gsql> select generate_wdr_report(begin_snap_id Oid, end_snap_id Oid, int report_type, int report_scope, int node_name ); + ``` + + Example 1: Generate a cluster-level report. + + ``` + select generate_wdr_report(1, 2, 'all', 'cluster',null); + ``` + + Example 2: Generate a report for a node. + + ``` + select generate_wdr_report(1, 2, 'all', 'node', pgxc_node_str()::cstring); + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Currently, the name of the MogDB node is fixed to **dn_6001_6002_6003**. You can also replace it with the actual node name. + + **Table 3** Parameters of the generate_wdr_report function + + | Parameter | Description | Value Range | + | :----------------------------- | :--------------------------- | :--------------------------------- | + | begin_snap_id | ID of a snapshot when a query starts, which is specified by **snapshot_id** in the **snapshot.snaoshot** table. | - | + | end_snap_id | ID of a snapshot when a query ends. By default, the value of **end_snap_id** is greater than that of **begin_snap_id table** (**snapshot_id** in the **snapshot.snapshot** table). | - | + | report_type | Type of the generated report. The value can be **summary**, **detail**, or **all**. | **summary**: Summary data
**detail**: Detailed data
**all**: summary data and detailed data | + | report_scope | Range of the generated report. The value can be **cluster** or **node**. | **cluster**: database-level information
**node**: node-level information | + | node_name | When **report\_scope** is set to **node**, set this parameter to the name of the corresponding node. (You can run the `select * from pg_node_env;` command to query the node name.)
If **report\_scope** is set to **cluster**, this parameter can be omitted, left blank, empty or set to **NULL**. | **node**: a node name in MogDB
**cluster**: This value is omitted, left blank,empty or set to **NULL**. | + + c. Run the following command to disable the output options and format the output: + + ```bash + \o \a \t + ``` + +6. View the WDR in **/home/om/** as required. + +**Table 4** WDR report + +| Item | Description | +| :---------------------------------------------------- | :----------------------------------------------------------- | +| Database Stat (database scope) | Performance statistics information in database dimensions, including transaction, write/read, row activity, write conflict, deadlock, and so on | +| Load Profile (database scope) | Performance statistics information in database dimensions, including CPU time, DB time, logical read/physical read, IO performance, login/logout, workload intensity, and workload performance, and so on | +| Instance Efficiency Percentages (database/node scope) | Buffer Hit (buffer hit rate), Effective CPU (CPU usage), WalWrite NoWait (success rate of obtaining Wal Buffer), Soft Parse (soft parsing rate), and Non-parse CPU (percentage of CPU time spent in non-parsing activities) at the database or node level | +| Top 10 Events by Total Wait Time (node scope) | Event that consumes the most time | +| Wait Classes by Total Wait Time (node scope) | Class of wait events that consume the most time | +| Host CPU (node scope) | CPU usage of the host | +| Memory Statistics (node scope) | memory statistics in the kernel | +| Object stats (node scope) | Performance statistics information in the table and index dimensions | +| Database Configuration (node scope) | Node configuration | +| SQL Statistics (node scope) | Performance statistics of a SQL statement in all dimensions, including end-to-end time, row activity, cache hit rate, CPU usage, time consumption segmentation | +| SQL Detail (node scope) | SQL statement details | + +**Examples** + +```sql +--Create a report file. +touch /home/om/wdrTestNode.html + +--Connect to the database. +gsql -d postgres -p [*Port number*] -r + +--Query the snapshots that have been generated. +MogDB=# select * from snapshot.snapshot; + snapshot_id | start_ts | end_ts +-------------+-------------------------------+------------------------------- + 1 | 2020-09-07 10:20:36.763244+08 | 2020-09-07 10:20:42.166511+08 + 2 | 2020-09-07 10:21:13.416352+08 | 2020-09-07 10:21:19.470911+08 +(2 rows) + + +--Generate the formatted performance report **wdrTestNode.html**. +MogDB=# \a \t \o /home/om/wdrTestNode.html +Output format is unaligned. +Showing only tuples. + +--Write data into the performance report **wdrTestNode.html**. +MogDB=# select generate_wdr_report(1, 2, 'all', 'node', 'dn_6001_6002_6003'); + +--Close the performance report **wdrTestNode.html**. +MogDB=# \o + +--Generate the formatted performance report **wdrTestCluster.html**. +MogDB=# \o /home/om/wdrTestCluster.html + +--Write data into the performance report **wdrTestCluster.html**. +MogDB=# select generate_wdr_report(1, 2, 'all', 'cluster'); + +--Close the performance report **wdrTestCluster.html**. +MogDB=# \o \a \t +Output format is aligned. +Tuples only is off. ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/performance-tuning/wdr/wdr.md b/product/en/docs-mogdb/v5.0/performance-tuning/wdr/wdr.md index 2f873e8c..4cad6094 100644 --- a/product/en/docs-mogdb/v5.0/performance-tuning/wdr/wdr.md +++ b/product/en/docs-mogdb/v5.0/performance-tuning/wdr/wdr.md @@ -1,11 +1,11 @@ ---- -title: WDR Snapshot -summary: WDR Snapshot -author: zhang cuiping -date: 2023-04-07 ---- - -# WDR Snapshot - -- **[WDR Snapshot Schema](wdr-snapshot-schema.md)** +--- +title: WDR Snapshot +summary: WDR Snapshot +author: zhang cuiping +date: 2023-04-07 +--- + +# WDR Snapshot + +- **[WDR Snapshot Schema](wdr-snapshot-schema.md)** - **[Viewing WDRs](wdr-report.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/ai-feature-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/ai-feature-functions.md index af88be8d..b06dd6a2 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/ai-feature-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/ai-feature-functions.md @@ -1,200 +1,200 @@ ---- -title: AI Feature Functions -summary: AI Feature Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# AI Feature Functions - -- gs_index_advise(text) - - Description: Recommends an index for a single query statement. - - Parameter: SQL statement string - - Return type: record - - Single-query Index Recommendation describes the examples. - -- hypopg_create_index(text) - - Description: Creates a virtual index. - - Parameter: character string of the statement for creating an index - - Return type: record - - Virtual Index describes the examples. - -- hypopg_display_index() - - Description: Displays information about all created virtual indexes. - - Parameter: none - - Return type: record - - Virtual Index describes the examples. - -- hypopg_drop_index(oid) - - Description: Deletes a specified virtual index. - - Parameter: OID of the index - - Return type: Boolean - - Virtual Index describes the examples. - -- hypopg_reset_index() - - Description: Clears all virtual indexes. - - Parameter: none - - Return type: none - - Virtual Index describes the examples. - -- hypopg_estimate_size(oid) - - Description: Estimates the space required for creating a specified index. - - Parameter: OID of the index - - Return type: int8 - - Virtual Index describes the examples. - -- check_engine_status(ip text, port text) - - Description: Tests whether a predictor engine provides services on a specified IP address and port. - - Parameter: IP address and port number of the predictor engine. - - Return type: text - - **User Guide** describes the examples. - -- encode_plan_node(optname text, orientation text, strategy text, options text, dop int8, quals text, projection text) - - Description: Encodes the plan operator information in the input parameters. - - Parameter: plan operator information - - Return type: text. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function is an internal function. You are not advised to use it directly. - -- model_train_opt(template text, model text) - - Description: Trains a given query performance prediction model. - - Parameters: template name and model name of the performance prediction model - - Return type: tartup_time_accuracy FLOAT8, total_time_accuracy FLOAT8, rows_accuracy FLOAT8, peak_memory_accuracy FLOAT8 - - **User Guide** describes the examples. - -- track_model_train_opt(ip text, port text) - - Description: Returns the training log address of the specified IP address and port predictor engine. - - Parameter: IP address and port number of the predictor engine - - Return type: text - - **User Guide** describes the examples. - -- encode_feature_perf_hist(datname text) - - Description: Encodes historical plan operators collected in the target database. - - Parameter: database name - - Return type: queryid bigint, plan_node_id int, parent_node_id int, left_child_id int, right_child_id int, encode text, startup_time bigint, total_time bigint, rows bigint, and peak_memory int - - **User Guide** describes the examples. - -- gather_encoding_info(datname text) - - Description: Invokes **encode_feature_perf_hist** to save the encoded data persistently. - - Parameter: database name - - Return type: int - - **User Guide** describes the examples. - -- db4ai_predict_by_bool (text, VARIADIC "any") - - Description: Obtains a model whose return value is of the Boolean type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. - - Parameter: model name and input column name of the inference task - - Return type: Boolean - -- db4ai_predict_by_float4(text, VARIADIC "any") - - Description: Obtains a model whose return value is of the float4 type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. - - Parameter: model name and input column name of the inference task - - Return type: float - -- db4ai_predict_by_float8(text, VARIADIC "any") - - Description: Obtains a model whose return value is of the float8 type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. - - Parameter: model name and input column name of the inference task - - Return type: float - -- db4ai_predict_by_int32(text, VARIADIC "any") - - Description: Obtains a model whose return value is of the int32 type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. - - Parameter: model name and input column name of the inference task - - Return type: int - -- db4ai_predict_by_int64(text, VARIADIC "any") - - Description: Obtains a model whose return value is of the int64 type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. - - Parameter: model name and input column name of the inference task - - Return type: int - -- db4ai_predict_by_numeric(text, VARIADIC "any") - - Description: Obtains a model whose return value is of the numeric type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. - - Parameter: model name and input column name of the inference task - - Return type: numeric - -- db4ai_predict_by_text(text, VARIADIC "any") - - Description: Obtains a model whose return value is of the character type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. - - Parameter: model name and input column name of the inference task - - Return type: text - -- db4ai\_predict\_by\_float8\_array\(text, VARIADIC "any"\) - - Description: Obtains a model whose return value is of the character type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. - - Parameter: model name and input column name of the inference task - - Return type: text - -- gs\_explain\_model\(text\) - - Description: Obtains the model whose return value is of the character type for text-based model parsing. - - Parameter: model name - - Return type: text +--- +title: AI Feature Functions +summary: AI Feature Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# AI Feature Functions + +- gs_index_advise(text) + + Description: Recommends an index for a single query statement. + + Parameter: SQL statement string + + Return type: record + + Single-query Index Recommendation describes the examples. + +- hypopg_create_index(text) + + Description: Creates a virtual index. + + Parameter: character string of the statement for creating an index + + Return type: record + + Virtual Index describes the examples. + +- hypopg_display_index() + + Description: Displays information about all created virtual indexes. + + Parameter: none + + Return type: record + + Virtual Index describes the examples. + +- hypopg_drop_index(oid) + + Description: Deletes a specified virtual index. + + Parameter: OID of the index + + Return type: Boolean + + Virtual Index describes the examples. + +- hypopg_reset_index() + + Description: Clears all virtual indexes. + + Parameter: none + + Return type: none + + Virtual Index describes the examples. + +- hypopg_estimate_size(oid) + + Description: Estimates the space required for creating a specified index. + + Parameter: OID of the index + + Return type: int8 + + Virtual Index describes the examples. + +- check_engine_status(ip text, port text) + + Description: Tests whether a predictor engine provides services on a specified IP address and port. + + Parameter: IP address and port number of the predictor engine. + + Return type: text + + **User Guide** describes the examples. + +- encode_plan_node(optname text, orientation text, strategy text, options text, dop int8, quals text, projection text) + + Description: Encodes the plan operator information in the input parameters. + + Parameter: plan operator information + + Return type: text. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function is an internal function. You are not advised to use it directly. + +- model_train_opt(template text, model text) + + Description: Trains a given query performance prediction model. + + Parameters: template name and model name of the performance prediction model + + Return type: tartup_time_accuracy FLOAT8, total_time_accuracy FLOAT8, rows_accuracy FLOAT8, peak_memory_accuracy FLOAT8 + + **User Guide** describes the examples. + +- track_model_train_opt(ip text, port text) + + Description: Returns the training log address of the specified IP address and port predictor engine. + + Parameter: IP address and port number of the predictor engine + + Return type: text + + **User Guide** describes the examples. + +- encode_feature_perf_hist(datname text) + + Description: Encodes historical plan operators collected in the target database. + + Parameter: database name + + Return type: queryid bigint, plan_node_id int, parent_node_id int, left_child_id int, right_child_id int, encode text, startup_time bigint, total_time bigint, rows bigint, and peak_memory int + + **User Guide** describes the examples. + +- gather_encoding_info(datname text) + + Description: Invokes **encode_feature_perf_hist** to save the encoded data persistently. + + Parameter: database name + + Return type: int + + **User Guide** describes the examples. + +- db4ai_predict_by_bool (text, VARIADIC "any") + + Description: Obtains a model whose return value is of the Boolean type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. + + Parameter: model name and input column name of the inference task + + Return type: Boolean + +- db4ai_predict_by_float4(text, VARIADIC "any") + + Description: Obtains a model whose return value is of the float4 type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. + + Parameter: model name and input column name of the inference task + + Return type: float + +- db4ai_predict_by_float8(text, VARIADIC "any") + + Description: Obtains a model whose return value is of the float8 type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. + + Parameter: model name and input column name of the inference task + + Return type: float + +- db4ai_predict_by_int32(text, VARIADIC "any") + + Description: Obtains a model whose return value is of the int32 type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. + + Parameter: model name and input column name of the inference task + + Return type: int + +- db4ai_predict_by_int64(text, VARIADIC "any") + + Description: Obtains a model whose return value is of the int64 type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. + + Parameter: model name and input column name of the inference task + + Return type: int + +- db4ai_predict_by_numeric(text, VARIADIC "any") + + Description: Obtains a model whose return value is of the numeric type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. + + Parameter: model name and input column name of the inference task + + Return type: numeric + +- db4ai_predict_by_text(text, VARIADIC "any") + + Description: Obtains a model whose return value is of the character type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. + + Parameter: model name and input column name of the inference task + + Return type: text + +- db4ai\_predict\_by\_float8\_array\(text, VARIADIC "any"\) + + Description: Obtains a model whose return value is of the character type for model inference. This function is an internal function. You are advised to use the **PREDICT BY** syntax for inference. + + Parameter: model name and input column name of the inference task + + Return type: text + +- gs\_explain\_model\(text\) + + Description: Obtains the model whose return value is of the character type for text-based model parsing. + + Parameter: model name + + Return type: text diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/array-functions-and-operators.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/array-functions-and-operators.md index 0f2b7255..509455a7 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/array-functions-and-operators.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/array-functions-and-operators.md @@ -1,630 +1,630 @@ ---- -title: Array Functions and Operators -summary: Array Functions and Operators -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Array Functions and Operators - -## Array Operators - -- = - - Description: Specifies whether two arrays are equal. - - Example: - - ```sql - MogDB=# SELECT ARRAY[1.1,2.1,3.1]::int[] = ARRAY[1,2,3] AS RESULT ; - result - -------- - t - (1 row) - ``` - -- <> - - Description: Specifies whether two arrays are not equal. - - Example: - - ```sql - MogDB=# SELECT ARRAY[1,2,3] <> ARRAY[1,2,4] AS RESULT; - result - -------- - t - (1 row) - ``` - -- < - - Description: Specifies whether an array is less than another. - - Example: - - ```sql - MogDB=# SELECT ARRAY[1,2,3] < ARRAY[1,2,4] AS RESULT; - result - -------- - t - (1 row) - ``` - -- \> - - Description: Specifies whether an array is greater than another. - - Example: - - ```sql - MogDB=# SELECT ARRAY[1,4,3] > ARRAY[1,2,4] AS RESULT; - result - -------- - t - (1 row) - ``` - -- <= - - Description: Specifies whether an array is less than another. - - Example: - - ```sql - MogDB=# SELECT ARRAY[1,2,3] <= ARRAY[1,2,3] AS RESULT; - result - -------- - t - (1 row) - ``` - -- \>= - - Description: Specifies whether an array is greater than or equal to another. - - Example: - - ```sql - MogDB=# SELECT ARRAY[1,4,3] >= ARRAY[1,4,3] AS RESULT; - result - -------- - t - (1 row) - ``` - -- @> - - Description: Specifies whether an array contains another. - - Example: - - ```sql - MogDB=# SELECT ARRAY[1,4,3] @> ARRAY[3,1] AS RESULT; - result - -------- - t - (1 row) - ``` - -- <@ - - Description: Specifies whether an array is contained in another. - - Example: - - ```sql - MogDB=# SELECT ARRAY[2,7] <@ ARRAY[1,7,4,2,6] AS RESULT; - result - -------- - t - (1 row) - ``` - -- && - - Description: Specifies whether an array overlaps another (have common elements). - - Example: - - ```sql - MogDB=# SELECT ARRAY[1,4,3] && ARRAY[2,1] AS RESULT; - result - -------- - t - (1 row) - ``` - -- || - - Description: Array-to-array concatenation - - Example: - - ```markdown - MogDB=# SELECT ARRAY[1,2,3] || ARRAY[4,5,6] AS RESULT; - result - --------------- - {1,2,3,4,5,6} - (1 row) - ``` - - ```lua - MogDB=# SELECT ARRAY[1,2,3] || ARRAY[[4,5,6],[7,8,9]] AS RESULT; - result - --------------------------- - {{1,2,3},{4,5,6},{7,8,9}} - (1 row) - ``` - -- || - - Description: Element-to-array concatenation - - Example: - - ```markdown - MogDB=# SELECT 3 || ARRAY[4,5,6] AS RESULT; - result - ----------- - {3,4,5,6} - (1 row) - ``` - -- || - - Description: Array-to-element concatenation - - Example: - - ```markdown - MogDB=# SELECT ARRAY[4,5,6] || 7 AS RESULT; - result - ----------- - {4,5,6,7} - (1 row) - ``` - -Array comparisons compare the array contents element-by-element, using the default B-tree comparison function for the element data type. In multidimensional arrays, the elements are accessed in row-major order. If the contents of two arrays are equal but the dimensionality is different, the first difference in the dimensionality information determines the sort order. - -## Array Functions - -- array_append(anyarray, anyelement) - - Description: Appends an element to the end of an array, and only supports dimension-1 arrays. - - Return type: anyarray - - Example: - - ```makefile - MogDB=# SELECT array_append(ARRAY[1,2], 3) AS RESULT; - result - --------- - {1,2,3} - (1 row) - ``` - -- array_prepend(anyelement, anyarray) - - Description: Appends an element to the beginning of an array, and only supports dimension-1 arrays. - - Return type: anyarray - - Example: - - ```makefile - MogDB=# SELECT array_prepend(1, ARRAY[2,3]) AS RESULT; - result - --------- - {1,2,3} - (1 row) - ``` - -- array_cat(anyarray, anyarray) - - Description: Concatenates two arrays, and supports multi-dimensional arrays. - - Return type: anyarray - - Example: - - ```lua - MogDB=# SELECT array_cat(ARRAY[1,2,3], ARRAY[4,5]) AS RESULT; - result - ------------- - {1,2,3,4,5} - (1 row) - - MogDB=# SELECT array_cat(ARRAY[[1,2],[4,5]], ARRAY[6,7]) AS RESULT; - result - --------------------- - {{1,2},{4,5},{6,7}} - (1 row) - ``` - -- array_union(anyarray, anyarray) - - Description: Concatenates two arrays, and supports only one-dimensional arrays. - - Return type: anyarray - - Example: - - ```makefile - MogDB=# SELECT array_union(ARRAY[1,2,3], ARRAY[3,4,5]) AS RESULT; - result - ------------- - {1,2,3,3,4,5} - (1 row) - ``` - -- array_union_distinct(anyarray, anyarray) - - Description: Concatenates two arrays and deduplicates them. Only one-dimensional arrays are supported. - - Return type: anyarray - - Example: - - ```markdown - MogDB=# SELECT array_union_distinct(ARRAY[1,2,3], ARRAY[3,4,5]) AS RESULT; - result - ------------- - {1,2,3,4,5} - (1 row) - ``` - -- array_intersect(anyarray, anyarray) - - Description: Intersects two arrays. Only one-dimensional arrays are supported. - - Return type: anyarray - - Example: - - ```makefile - MogDB=# SELECT array_intersect(ARRAY[1,2,3], ARRAY[3,4,5]) AS RESULT; - result - ------------- - {3} - (1 row) - ``` - -- array_intersect_distinct(anyarray, anyarray) - - Description: Intersects two arrays and deduplicates them. Only one-dimensional arrays are supported. - - Return type: anyarray - - Example: - - ```markdown - MogDB=# SELECT array_intersect_distinct(ARRAY[1,2,2], ARRAY[2,2,4,5]) AS RESULT; - result - ------------- - {2} - (1 row) - ``` - -- array_except(anyarray, anyarray) - - Description: Calculates the difference between two arrays. Only one-dimensional arrays are supported. - - Return type: anyarray - - Example: - - ```makefile - MogDB=# SELECT array_except(ARRAY[1,2,3], ARRAY[3,4,5]) AS RESULT; - result - ------------- - {1,2} - (1 row) - ``` - -- array_except_distinct(anyarray, anyarray) - - Description: Calculates the difference between two arrays and deduplicates them. Only one-dimensional arrays are supported. - - Return type: anyarray - - Example: - - ```markdown - MogDB=# SELECT array_except_distinct(ARRAY[1,2,2,3], ARRAY[3,4,5]) AS RESULT; - result - ------------- - {1,2} - (1 row) - ``` - -- array_ndims(anyarray) - - Description: Returns the number of dimensions of an array. - - Return type: int - - Example: - - ```lua - MogDB=# SELECT array_ndims(ARRAY[[1,2,3], [4,5,6]]) AS RESULT; - result - -------- - 2 - (1 row) - ``` - -- array_dims(anyarray) - - Description: Returns the low-order flag bits and high-order flag bits of each dimension in an array. - - Return type: text - - Example: - - ```lua - MogDB=# SELECT array_dims(ARRAY[[1,2,3], [4,5,6]]) AS RESULT; - result - ------------ - [1:2][1:3] - (1 row) - ``` - -- array_length(anyarray, int) - - Description: Returns the length of the requested array dimension. **int** is the requested array dimension. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT array_length(array[1,2,3], 1) AS RESULT; - result - -------- - 3 - (1 row) - - MogDB=# SELECT array_length(array[[1,2,3],[4,5,6]], 2) AS RESULT; - result - -------- - 3 - (1 row) - ``` - -- array_lower(anyarray, int) - - Description: Returns lower bound of the requested array dimension. **int** is the requested array dimension. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT array_lower('[0:2]={1,2,3}'::int[], 1) AS RESULT; - result - -------- - 0 - (1 row) - ``` - -- array_upper(anyarray, int) - - Description: Returns upper bound of the requested array dimension. **int** is the requested array dimension. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT array_upper(ARRAY[1,8,3,7], 1) AS RESULT; - result - -------- - 4 - (1 row) - ``` - -- array_upper(anyarray, int) - - Description: Returns upper bound of the requested array dimension. **int** is the requested array dimension. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT array_upper(ARRAY[1,8,3,7], 1) AS RESULT; - result - -------- - 4 - (1 row) - ``` - -- array_remove(anyarray, anyelement) - - Description: Removes all specified elements from an array. Only one-dimensional arrays are supported. - - Return type: anyarray - - Example: - - ```makefile - MogDB=# SELECT array_remove(ARRAY[1,8,8,7], 8) AS RESULT; - result - -------- - {1,7} - (1 row) - ``` - -- array_to_string(anyarray, text [, text]) - - Description: Uses the first **text** as the new delimiter and the second **text** to replace **NULL** values. - - Return type: text - - Example: - - ```sql - MogDB=# SELECT array_to_string(ARRAY[1, 2, 3, NULL, 5], ',', '*') AS RESULT; - result - ----------- - 1,2,3,*,5 - (1 row) - ``` - -- array_delete(anyarray) - - Description: Clears elements in an array and returns an empty array of the same type. - - Return type: anyarray - - Example: - - ```makefile - MogDB=# SELECT array_delete(ARRAY[1,8,3,7]) AS RESULT; - result - -------- - {} - (1 row) - ``` - -- array_deleteidx(anyarray, int) - - Description: Deletes specified subscript elements from an array and returns an array consisting of the remaining elements. - - Return type: anyarray - - Example: - - ```makefile - MogDB=# SELECT array_deleteidx(ARRAY[1,2,3,4,5], 1) AS RESULT; - result - ----------- - {2,3,4,5} - (1 row) - ``` - -- array_extendnull(anyarray, int) - - Description: Adds a specified number of null elements to the end of an array. - - Return type: anyarray - - Example: - - ```csharp - MogDB=# SELECT array_extendnull(ARRAY[1,8,3,7],1) AS RESULT; - result - -------------- - {1,8,3,7,null} - (1 row) - ``` - -- array_trim(anyarray, int) - - Description: Deletes a specified number of elements from the end of an array. - - Return type: anyarray - - Example: - - ```makefile - MogDB=# SELECT array_trim(ARRAY[1,8,3,7],1) AS RESULT; - result - --------- - {1,8,3} - (1 row) - ``` - -- array_exists(anyarray, int) - - Description: Checks whether the second parameter is a valid subscript of an array. - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT array_exists(ARRAY[1,8,3,7],1) AS RESULT; - result - -------- - t - (1 row) - ``` - -- array_next(anyarray, int) - - Description: Returns the subscript of the element following a specified subscript in an array based on the second input parameter. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT array_next(ARRAY[1,8,3,7],1) AS RESULT; - result - -------- - 2 - (1 row) - ``` - -- array_prior(anyarray, int) - - Description: Returns the subscript of the element followed by a specified subscript in an array based on the second input parameter. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT array_prior(ARRAY[1,8,3,7],2) AS RESULT; - result - -------- - 1 - (1 row) - ``` - -- string_to_array(text, text [, text]) - - Description: Uses the second **text** as the new delimiter and the third **text** as the substring to be replaced by **NULL** values. A substring can be replaced by **NULL** values only when it is the same as the third **text**. - - Return type: text[] - - Example: - - ```lua - MogDB=# SELECT string_to_array('xx~^~yy~^~zz', '~^~', 'yy') AS RESULT; - result - -------------- - {xx,NULL,zz} - (1 row) - MogDB=# SELECT string_to_array('xx~^~yy~^~zz', '~^~', 'y') AS RESULT; - result - ------------ - {xx,yy,zz} - (1 row) - ``` - -- unnest(anyarray) - - Description: Expands an array to a set of rows. - - Return type: setof anyelement - - Example: - - ```sql - MogDB=# SELECT unnest(ARRAY[1,2]) AS RESULT; - result - -------- - 1 - 2 - (2 rows) - ``` - -In **string_to_array**, if the delimiter parameter is NULL, each character in the input string will become a separate element in the resulting array. If the delimiter is an empty string, then the entire input string is returned as a one-element array. Otherwise the input string is split at each occurrence of the delimiter string. - -In **string_to_array**, if the null-string parameter is omitted or NULL, none of the substrings of the input will be replaced by NULL. - +--- +title: Array Functions and Operators +summary: Array Functions and Operators +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Array Functions and Operators + +## Array Operators + +- = + + Description: Specifies whether two arrays are equal. + + Example: + + ```sql + MogDB=# SELECT ARRAY[1.1,2.1,3.1]::int[] = ARRAY[1,2,3] AS RESULT ; + result + -------- + t + (1 row) + ``` + +- <> + + Description: Specifies whether two arrays are not equal. + + Example: + + ```sql + MogDB=# SELECT ARRAY[1,2,3] <> ARRAY[1,2,4] AS RESULT; + result + -------- + t + (1 row) + ``` + +- < + + Description: Specifies whether an array is less than another. + + Example: + + ```sql + MogDB=# SELECT ARRAY[1,2,3] < ARRAY[1,2,4] AS RESULT; + result + -------- + t + (1 row) + ``` + +- \> + + Description: Specifies whether an array is greater than another. + + Example: + + ```sql + MogDB=# SELECT ARRAY[1,4,3] > ARRAY[1,2,4] AS RESULT; + result + -------- + t + (1 row) + ``` + +- <= + + Description: Specifies whether an array is less than another. + + Example: + + ```sql + MogDB=# SELECT ARRAY[1,2,3] <= ARRAY[1,2,3] AS RESULT; + result + -------- + t + (1 row) + ``` + +- \>= + + Description: Specifies whether an array is greater than or equal to another. + + Example: + + ```sql + MogDB=# SELECT ARRAY[1,4,3] >= ARRAY[1,4,3] AS RESULT; + result + -------- + t + (1 row) + ``` + +- @> + + Description: Specifies whether an array contains another. + + Example: + + ```sql + MogDB=# SELECT ARRAY[1,4,3] @> ARRAY[3,1] AS RESULT; + result + -------- + t + (1 row) + ``` + +- <@ + + Description: Specifies whether an array is contained in another. + + Example: + + ```sql + MogDB=# SELECT ARRAY[2,7] <@ ARRAY[1,7,4,2,6] AS RESULT; + result + -------- + t + (1 row) + ``` + +- && + + Description: Specifies whether an array overlaps another (have common elements). + + Example: + + ```sql + MogDB=# SELECT ARRAY[1,4,3] && ARRAY[2,1] AS RESULT; + result + -------- + t + (1 row) + ``` + +- || + + Description: Array-to-array concatenation + + Example: + + ```markdown + MogDB=# SELECT ARRAY[1,2,3] || ARRAY[4,5,6] AS RESULT; + result + --------------- + {1,2,3,4,5,6} + (1 row) + ``` + + ```lua + MogDB=# SELECT ARRAY[1,2,3] || ARRAY[[4,5,6],[7,8,9]] AS RESULT; + result + --------------------------- + {{1,2,3},{4,5,6},{7,8,9}} + (1 row) + ``` + +- || + + Description: Element-to-array concatenation + + Example: + + ```markdown + MogDB=# SELECT 3 || ARRAY[4,5,6] AS RESULT; + result + ----------- + {3,4,5,6} + (1 row) + ``` + +- || + + Description: Array-to-element concatenation + + Example: + + ```markdown + MogDB=# SELECT ARRAY[4,5,6] || 7 AS RESULT; + result + ----------- + {4,5,6,7} + (1 row) + ``` + +Array comparisons compare the array contents element-by-element, using the default B-tree comparison function for the element data type. In multidimensional arrays, the elements are accessed in row-major order. If the contents of two arrays are equal but the dimensionality is different, the first difference in the dimensionality information determines the sort order. + +## Array Functions + +- array_append(anyarray, anyelement) + + Description: Appends an element to the end of an array, and only supports dimension-1 arrays. + + Return type: anyarray + + Example: + + ```makefile + MogDB=# SELECT array_append(ARRAY[1,2], 3) AS RESULT; + result + --------- + {1,2,3} + (1 row) + ``` + +- array_prepend(anyelement, anyarray) + + Description: Appends an element to the beginning of an array, and only supports dimension-1 arrays. + + Return type: anyarray + + Example: + + ```makefile + MogDB=# SELECT array_prepend(1, ARRAY[2,3]) AS RESULT; + result + --------- + {1,2,3} + (1 row) + ``` + +- array_cat(anyarray, anyarray) + + Description: Concatenates two arrays, and supports multi-dimensional arrays. + + Return type: anyarray + + Example: + + ```lua + MogDB=# SELECT array_cat(ARRAY[1,2,3], ARRAY[4,5]) AS RESULT; + result + ------------- + {1,2,3,4,5} + (1 row) + + MogDB=# SELECT array_cat(ARRAY[[1,2],[4,5]], ARRAY[6,7]) AS RESULT; + result + --------------------- + {{1,2},{4,5},{6,7}} + (1 row) + ``` + +- array_union(anyarray, anyarray) + + Description: Concatenates two arrays, and supports only one-dimensional arrays. + + Return type: anyarray + + Example: + + ```makefile + MogDB=# SELECT array_union(ARRAY[1,2,3], ARRAY[3,4,5]) AS RESULT; + result + ------------- + {1,2,3,3,4,5} + (1 row) + ``` + +- array_union_distinct(anyarray, anyarray) + + Description: Concatenates two arrays and deduplicates them. Only one-dimensional arrays are supported. + + Return type: anyarray + + Example: + + ```markdown + MogDB=# SELECT array_union_distinct(ARRAY[1,2,3], ARRAY[3,4,5]) AS RESULT; + result + ------------- + {1,2,3,4,5} + (1 row) + ``` + +- array_intersect(anyarray, anyarray) + + Description: Intersects two arrays. Only one-dimensional arrays are supported. + + Return type: anyarray + + Example: + + ```makefile + MogDB=# SELECT array_intersect(ARRAY[1,2,3], ARRAY[3,4,5]) AS RESULT; + result + ------------- + {3} + (1 row) + ``` + +- array_intersect_distinct(anyarray, anyarray) + + Description: Intersects two arrays and deduplicates them. Only one-dimensional arrays are supported. + + Return type: anyarray + + Example: + + ```markdown + MogDB=# SELECT array_intersect_distinct(ARRAY[1,2,2], ARRAY[2,2,4,5]) AS RESULT; + result + ------------- + {2} + (1 row) + ``` + +- array_except(anyarray, anyarray) + + Description: Calculates the difference between two arrays. Only one-dimensional arrays are supported. + + Return type: anyarray + + Example: + + ```makefile + MogDB=# SELECT array_except(ARRAY[1,2,3], ARRAY[3,4,5]) AS RESULT; + result + ------------- + {1,2} + (1 row) + ``` + +- array_except_distinct(anyarray, anyarray) + + Description: Calculates the difference between two arrays and deduplicates them. Only one-dimensional arrays are supported. + + Return type: anyarray + + Example: + + ```markdown + MogDB=# SELECT array_except_distinct(ARRAY[1,2,2,3], ARRAY[3,4,5]) AS RESULT; + result + ------------- + {1,2} + (1 row) + ``` + +- array_ndims(anyarray) + + Description: Returns the number of dimensions of an array. + + Return type: int + + Example: + + ```lua + MogDB=# SELECT array_ndims(ARRAY[[1,2,3], [4,5,6]]) AS RESULT; + result + -------- + 2 + (1 row) + ``` + +- array_dims(anyarray) + + Description: Returns the low-order flag bits and high-order flag bits of each dimension in an array. + + Return type: text + + Example: + + ```lua + MogDB=# SELECT array_dims(ARRAY[[1,2,3], [4,5,6]]) AS RESULT; + result + ------------ + [1:2][1:3] + (1 row) + ``` + +- array_length(anyarray, int) + + Description: Returns the length of the requested array dimension. **int** is the requested array dimension. + + Return type: int + + Example: + + ```sql + MogDB=# SELECT array_length(array[1,2,3], 1) AS RESULT; + result + -------- + 3 + (1 row) + + MogDB=# SELECT array_length(array[[1,2,3],[4,5,6]], 2) AS RESULT; + result + -------- + 3 + (1 row) + ``` + +- array_lower(anyarray, int) + + Description: Returns lower bound of the requested array dimension. **int** is the requested array dimension. + + Return type: int + + Example: + + ```sql + MogDB=# SELECT array_lower('[0:2]={1,2,3}'::int[], 1) AS RESULT; + result + -------- + 0 + (1 row) + ``` + +- array_upper(anyarray, int) + + Description: Returns upper bound of the requested array dimension. **int** is the requested array dimension. + + Return type: int + + Example: + + ```sql + MogDB=# SELECT array_upper(ARRAY[1,8,3,7], 1) AS RESULT; + result + -------- + 4 + (1 row) + ``` + +- array_upper(anyarray, int) + + Description: Returns upper bound of the requested array dimension. **int** is the requested array dimension. + + Return type: int + + Example: + + ```sql + MogDB=# SELECT array_upper(ARRAY[1,8,3,7], 1) AS RESULT; + result + -------- + 4 + (1 row) + ``` + +- array_remove(anyarray, anyelement) + + Description: Removes all specified elements from an array. Only one-dimensional arrays are supported. + + Return type: anyarray + + Example: + + ```makefile + MogDB=# SELECT array_remove(ARRAY[1,8,8,7], 8) AS RESULT; + result + -------- + {1,7} + (1 row) + ``` + +- array_to_string(anyarray, text [, text]) + + Description: Uses the first **text** as the new delimiter and the second **text** to replace **NULL** values. + + Return type: text + + Example: + + ```sql + MogDB=# SELECT array_to_string(ARRAY[1, 2, 3, NULL, 5], ',', '*') AS RESULT; + result + ----------- + 1,2,3,*,5 + (1 row) + ``` + +- array_delete(anyarray) + + Description: Clears elements in an array and returns an empty array of the same type. + + Return type: anyarray + + Example: + + ```makefile + MogDB=# SELECT array_delete(ARRAY[1,8,3,7]) AS RESULT; + result + -------- + {} + (1 row) + ``` + +- array_deleteidx(anyarray, int) + + Description: Deletes specified subscript elements from an array and returns an array consisting of the remaining elements. + + Return type: anyarray + + Example: + + ```makefile + MogDB=# SELECT array_deleteidx(ARRAY[1,2,3,4,5], 1) AS RESULT; + result + ----------- + {2,3,4,5} + (1 row) + ``` + +- array_extendnull(anyarray, int) + + Description: Adds a specified number of null elements to the end of an array. + + Return type: anyarray + + Example: + + ```csharp + MogDB=# SELECT array_extendnull(ARRAY[1,8,3,7],1) AS RESULT; + result + -------------- + {1,8,3,7,null} + (1 row) + ``` + +- array_trim(anyarray, int) + + Description: Deletes a specified number of elements from the end of an array. + + Return type: anyarray + + Example: + + ```makefile + MogDB=# SELECT array_trim(ARRAY[1,8,3,7],1) AS RESULT; + result + --------- + {1,8,3} + (1 row) + ``` + +- array_exists(anyarray, int) + + Description: Checks whether the second parameter is a valid subscript of an array. + + Return type: Boolean + + Example: + + ```sql + MogDB=# SELECT array_exists(ARRAY[1,8,3,7],1) AS RESULT; + result + -------- + t + (1 row) + ``` + +- array_next(anyarray, int) + + Description: Returns the subscript of the element following a specified subscript in an array based on the second input parameter. + + Return type: int + + Example: + + ```sql + MogDB=# SELECT array_next(ARRAY[1,8,3,7],1) AS RESULT; + result + -------- + 2 + (1 row) + ``` + +- array_prior(anyarray, int) + + Description: Returns the subscript of the element followed by a specified subscript in an array based on the second input parameter. + + Return type: int + + Example: + + ```sql + MogDB=# SELECT array_prior(ARRAY[1,8,3,7],2) AS RESULT; + result + -------- + 1 + (1 row) + ``` + +- string_to_array(text, text [, text]) + + Description: Uses the second **text** as the new delimiter and the third **text** as the substring to be replaced by **NULL** values. A substring can be replaced by **NULL** values only when it is the same as the third **text**. + + Return type: text[] + + Example: + + ```lua + MogDB=# SELECT string_to_array('xx~^~yy~^~zz', '~^~', 'yy') AS RESULT; + result + -------------- + {xx,NULL,zz} + (1 row) + MogDB=# SELECT string_to_array('xx~^~yy~^~zz', '~^~', 'y') AS RESULT; + result + ------------ + {xx,yy,zz} + (1 row) + ``` + +- unnest(anyarray) + + Description: Expands an array to a set of rows. + + Return type: setof anyelement + + Example: + + ```sql + MogDB=# SELECT unnest(ARRAY[1,2]) AS RESULT; + result + -------- + 1 + 2 + (2 rows) + ``` + +In **string_to_array**, if the delimiter parameter is NULL, each character in the input string will become a separate element in the resulting array. If the delimiter is an empty string, then the entire input string is returned as a one-element array. Otherwise the input string is split at each occurrence of the delimiter string. + +In **string_to_array**, if the null-string parameter is omitted or NULL, none of the substrings of the input will be replaced by NULL. + In **array_to_string**, if the null-string parameter is omitted or NULL, any null elements in the array are simply skipped and not represented in the output string. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/binary-string-functions-and-operators.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/binary-string-functions-and-operators.md index d435fdf5..3bd06aea 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/binary-string-functions-and-operators.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/binary-string-functions-and-operators.md @@ -1,224 +1,224 @@ ---- -title: Binary String Functions and Operators -summary: Binary String Functions and Operators -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Binary String Functions and Operators - -## String Operators - -SQL defines some string functions that use keywords, rather than commas, to separate arguments. - -- octet_length(string) - - Description: Specifies the number of bytes in a binary string. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT octet_length(E'jo\\000se'::bytea) AS RESULT; - result - -------- - 5 - (1 row) - ``` - -- overlay(string placing string from int [for int]) - - Description: Replaces substrings. - - Return type: bytea - - Example: - - ```sql - MogDB=# SELECT overlay(E'Th\\000omas'::bytea placing E'\\002\\003'::bytea from 2 for 3) AS RESULT; - result - ---------------- - \x5402036d6173 - (1 row) - ``` - -- position(substring in string) - - Description: Specifies the location of a specified substring. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT position(E'\\000om'::bytea in E'Th\\000omas'::bytea) AS RESULT; - result - -------- - 3 - (1 row) - ``` - -- substring(string [from int] [for int]) - - Description: Truncates a substring. - - Return type: bytea - - Example: - - ```sql - MogDB=# SELECT substring(E'Th\\000omas'::bytea from 2 for 3) AS RESULT; - result - ---------- - \x68006f - (1 row) - ``` - -- substr(string, from int [, for int]) - - Description: Truncates a substring. - - Return type: bytea - - Example: - - ```sql - MogDB=# select substr(E'Th\\000omas'::bytea,2, 3) as result; - result - ---------- - \x68006f - (1 row) - ``` - -- trim([both] bytes from string) - - Description: Removes the longest string containing only bytes from **bytes** from the start and end of **string**. - - Return type: bytea - - Example: - - ```sql - MogDB=# SELECT trim(E'\\000'::bytea from E'\\000Tom\\000'::bytea) AS RESULT; - result - ---------- - \x546f6d - (1 row) - ``` - -## Other Binary String Functions - -MogDB provides common syntax used for calling functions. - -- btrim(string bytea,bytes bytea) - - Description: Removes the longest string containing only bytes from **bytes** from the start and end of **string**. - - Return type: bytea - - Example: - - ```sql - MogDB=# SELECT btrim(E'\\000trim\\000'::bytea, E'\\000'::bytea) AS RESULT; - result - ------------ - \x7472696d - (1 row) - ``` - -- decode(string text, format text) - - Description: Decodes binary data from textual representation. Supported formats are: `base64`, `hex`, `escape`. - - Return type: bytea - - Example: - - ```sql - MogDB=# SELECT decode('MTIzAAE=', 'base64'); - decode - -------------- - \x3132330001 - (1 row) - ``` - -- encode(data bytea, format text) - - Description: Encodes binary data into a textual representation. Supported formats are: `base64`, `hex`, `escape`. `escape` converts zero bytes and high-bit-set bytes to octal sequences (`\nnn`) and doubles backslashes. - - Return type: text - - Example: - - ```sql - MogDB=# SELECT encode(E'123\\000\\001', 'base64'); - encode - ---------- - MTIzAAE= - (1 row) - ``` - -- get_bit(string, offset) - - Description: Extracts bits from a string. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT get_bit(E'Th\\000omas'::bytea, 45) AS RESULT; - result - -------- - 1 - (1 row) - ``` - -- get_byte(string, offset) - - Description: Extracts bytes from a string. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT get_byte(E'Th\\000omas'::bytea, 4) AS RESULT; - result - -------- - 109 - (1 row) - ``` - -- set_bit(string,offset, newvalue) - - Description: Sets bits in a string. - - Return type: bytea - - Example: - - ```sql - MogDB=# SELECT set_bit(E'Th\\000omas'::bytea, 45, 0) AS RESULT; - result - ------------------ - \x5468006f6d4173 - (1 row) - ``` - -- set_byte(string,offset, newvalue) - - Description: Sets bytes in a string. - - Return type: bytea - - Example: - - ```sql - MogDB=# SELECT set_byte(E'Th\\000omas'::bytea, 4, 64) AS RESULT; - result - ------------------ - \x5468006f406173 - (1 row) - ``` +--- +title: Binary String Functions and Operators +summary: Binary String Functions and Operators +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Binary String Functions and Operators + +## String Operators + +SQL defines some string functions that use keywords, rather than commas, to separate arguments. + +- octet_length(string) + + Description: Specifies the number of bytes in a binary string. + + Return type: int + + Example: + + ```sql + MogDB=# SELECT octet_length(E'jo\\000se'::bytea) AS RESULT; + result + -------- + 5 + (1 row) + ``` + +- overlay(string placing string from int [for int]) + + Description: Replaces substrings. + + Return type: bytea + + Example: + + ```sql + MogDB=# SELECT overlay(E'Th\\000omas'::bytea placing E'\\002\\003'::bytea from 2 for 3) AS RESULT; + result + ---------------- + \x5402036d6173 + (1 row) + ``` + +- position(substring in string) + + Description: Specifies the location of a specified substring. + + Return type: int + + Example: + + ```sql + MogDB=# SELECT position(E'\\000om'::bytea in E'Th\\000omas'::bytea) AS RESULT; + result + -------- + 3 + (1 row) + ``` + +- substring(string [from int] [for int]) + + Description: Truncates a substring. + + Return type: bytea + + Example: + + ```sql + MogDB=# SELECT substring(E'Th\\000omas'::bytea from 2 for 3) AS RESULT; + result + ---------- + \x68006f + (1 row) + ``` + +- substr(string, from int [, for int]) + + Description: Truncates a substring. + + Return type: bytea + + Example: + + ```sql + MogDB=# select substr(E'Th\\000omas'::bytea,2, 3) as result; + result + ---------- + \x68006f + (1 row) + ``` + +- trim([both] bytes from string) + + Description: Removes the longest string containing only bytes from **bytes** from the start and end of **string**. + + Return type: bytea + + Example: + + ```sql + MogDB=# SELECT trim(E'\\000'::bytea from E'\\000Tom\\000'::bytea) AS RESULT; + result + ---------- + \x546f6d + (1 row) + ``` + +## Other Binary String Functions + +MogDB provides common syntax used for calling functions. + +- btrim(string bytea,bytes bytea) + + Description: Removes the longest string containing only bytes from **bytes** from the start and end of **string**. + + Return type: bytea + + Example: + + ```sql + MogDB=# SELECT btrim(E'\\000trim\\000'::bytea, E'\\000'::bytea) AS RESULT; + result + ------------ + \x7472696d + (1 row) + ``` + +- decode(string text, format text) + + Description: Decodes binary data from textual representation. Supported formats are: `base64`, `hex`, `escape`. + + Return type: bytea + + Example: + + ```sql + MogDB=# SELECT decode('MTIzAAE=', 'base64'); + decode + -------------- + \x3132330001 + (1 row) + ``` + +- encode(data bytea, format text) + + Description: Encodes binary data into a textual representation. Supported formats are: `base64`, `hex`, `escape`. `escape` converts zero bytes and high-bit-set bytes to octal sequences (`\nnn`) and doubles backslashes. + + Return type: text + + Example: + + ```sql + MogDB=# SELECT encode(E'123\\000\\001', 'base64'); + encode + ---------- + MTIzAAE= + (1 row) + ``` + +- get_bit(string, offset) + + Description: Extracts bits from a string. + + Return type: int + + Example: + + ```sql + MogDB=# SELECT get_bit(E'Th\\000omas'::bytea, 45) AS RESULT; + result + -------- + 1 + (1 row) + ``` + +- get_byte(string, offset) + + Description: Extracts bytes from a string. + + Return type: int + + Example: + + ```sql + MogDB=# SELECT get_byte(E'Th\\000omas'::bytea, 4) AS RESULT; + result + -------- + 109 + (1 row) + ``` + +- set_bit(string,offset, newvalue) + + Description: Sets bits in a string. + + Return type: bytea + + Example: + + ```sql + MogDB=# SELECT set_bit(E'Th\\000omas'::bytea, 45, 0) AS RESULT; + result + ------------------ + \x5468006f6d4173 + (1 row) + ``` + +- set_byte(string,offset, newvalue) + + Description: Sets bytes in a string. + + Return type: bytea + + Example: + + ```sql + MogDB=# SELECT set_byte(E'Th\\000omas'::bytea, 4, 64) AS RESULT; + result + ------------------ + \x5468006f406173 + (1 row) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/bit-string-functions-and-operators.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/bit-string-functions-and-operators.md index 8ffa9997..5b2a531a 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/bit-string-functions-and-operators.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/bit-string-functions-and-operators.md @@ -1,153 +1,153 @@ ---- -title: Bit String Functions and Operators -summary: Bit String Functions and Operators -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Bit String Functions and Operators - -Aside from the usual comparison operators, the following operators can be used. Bit string operands of **&**, **|**, and **\#** must be of equal length. In case of bit shifting, the original length of the string is preserved by zero padding (if necessary). - -- || - - Description: Connects bit strings. - - Example: - - ```sql - MogDB=# SELECT B'10001' || B'011' AS RESULT; - result - ---------- - 10001011 - (1 row) - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > It is recommended that a column have no more than 180 consecutive internal joins. A column with over 180 joins will be split into joined consecutive strings. - > Example:**str1||str2||str3||str4** is split into **(str1||str2)||(str3||str4)**. - -- & - - Description: Specifies the AND operation between bit strings. - - Example: - - ```sql - MogDB=# SELECT B'10001' & B'01101' AS RESULT; - result - -------- - 00001 - (1 row) - ``` - -- | - - Description: Specifies the OR operation between bit strings. - - Example: - - ```sql - MogDB=# SELECT B'10001' | B'01101' AS RESULT; - result - -------- - 11101 - (1 row) - ``` - -- \# - - Description: Specifies the OR operation between bit strings if they are inconsistent. If the same positions in the two bit strings are both 1 or 0, the position returns **0**. - - Example: - - ```sql - MogDB=# SELECT B'10001' # B'01101' AS RESULT; - result - -------- - 11100 - (1 row) - ``` - -- ~ - - Description: Specifies the NOT operation between bit strings. - - Example: - - ```sql - MogDB=# SELECT ~B'10001'AS RESULT; - result - ---------- - 01110 - (1 row) - ``` - -- << - - Description: Shifts left in a bit string. - - Example: - - ```sql - MogDB=# SELECT B'10001' << 3 AS RESULT; - result - ---------- - 01000 - (1 row) - ``` - -- >> - - Description: Shifts right in a bit string. - - Example: - - ```sql - MogDB=# SELECT B'10001' >> 2 AS RESULT; - result - ---------- - 00100 - (1 row) - ``` - -The following SQL-standard functions work on bit strings as well as strings:**length**, **bit_length**, **octet_length**, **position**, **substring**, and **overlay**. - -The following functions work on bit strings as well as binary strings:**get_bit** and **set_bit**. When working with a bit string, these functions number the first (leftmost) bit of the string as bit 0. - -In addition, it is possible to convert between integral values and type **bit**. Example: - -```sql -MogDB=# SELECT 44::bit(10) AS RESULT; - result ------------- - 0000101100 -(1 row) - -MogDB=# SELECT 44::bit(3) AS RESULT; - result --------- - 100 -(1 row) - -MogDB=# SELECT cast(-44 as bit(12)) AS RESULT; - result --------------- - 111111010100 -(1 row) - -MogDB=# SELECT '1110'::bit(4)::integer AS RESULT; - result --------- - 14 -(1 row) - -MogDB=# select substring('10101111'::bit(8), 2); - substring ------------ - 0101111 -(1 row) -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> Casting to just "bit" means casting to bit(1), and so will deliver only the least significant bit of the integer. +--- +title: Bit String Functions and Operators +summary: Bit String Functions and Operators +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Bit String Functions and Operators + +Aside from the usual comparison operators, the following operators can be used. Bit string operands of **&**, **|**, and **\#** must be of equal length. In case of bit shifting, the original length of the string is preserved by zero padding (if necessary). + +- || + + Description: Connects bit strings. + + Example: + + ```sql + MogDB=# SELECT B'10001' || B'011' AS RESULT; + result + ---------- + 10001011 + (1 row) + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > It is recommended that a column have no more than 180 consecutive internal joins. A column with over 180 joins will be split into joined consecutive strings. + > Example:**str1||str2||str3||str4** is split into **(str1||str2)||(str3||str4)**. + +- & + + Description: Specifies the AND operation between bit strings. + + Example: + + ```sql + MogDB=# SELECT B'10001' & B'01101' AS RESULT; + result + -------- + 00001 + (1 row) + ``` + +- | + + Description: Specifies the OR operation between bit strings. + + Example: + + ```sql + MogDB=# SELECT B'10001' | B'01101' AS RESULT; + result + -------- + 11101 + (1 row) + ``` + +- \# + + Description: Specifies the OR operation between bit strings if they are inconsistent. If the same positions in the two bit strings are both 1 or 0, the position returns **0**. + + Example: + + ```sql + MogDB=# SELECT B'10001' # B'01101' AS RESULT; + result + -------- + 11100 + (1 row) + ``` + +- ~ + + Description: Specifies the NOT operation between bit strings. + + Example: + + ```sql + MogDB=# SELECT ~B'10001'AS RESULT; + result + ---------- + 01110 + (1 row) + ``` + +- << + + Description: Shifts left in a bit string. + + Example: + + ```sql + MogDB=# SELECT B'10001' << 3 AS RESULT; + result + ---------- + 01000 + (1 row) + ``` + +- >> + + Description: Shifts right in a bit string. + + Example: + + ```sql + MogDB=# SELECT B'10001' >> 2 AS RESULT; + result + ---------- + 00100 + (1 row) + ``` + +The following SQL-standard functions work on bit strings as well as strings:**length**, **bit_length**, **octet_length**, **position**, **substring**, and **overlay**. + +The following functions work on bit strings as well as binary strings:**get_bit** and **set_bit**. When working with a bit string, these functions number the first (leftmost) bit of the string as bit 0. + +In addition, it is possible to convert between integral values and type **bit**. Example: + +```sql +MogDB=# SELECT 44::bit(10) AS RESULT; + result +------------ + 0000101100 +(1 row) + +MogDB=# SELECT 44::bit(3) AS RESULT; + result +-------- + 100 +(1 row) + +MogDB=# SELECT cast(-44 as bit(12)) AS RESULT; + result +-------------- + 111111010100 +(1 row) + +MogDB=# SELECT '1110'::bit(4)::integer AS RESULT; + result +-------- + 14 +(1 row) + +MogDB=# select substring('10101111'::bit(8), 2); + substring +----------- + 0101111 +(1 row) +``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> Casting to just "bit" means casting to bit(1), and so will deliver only the least significant bit of the integer. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/comparison-operators.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/comparison-operators.md index edf5f626..8bb04aa9 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/comparison-operators.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/comparison-operators.md @@ -1,27 +1,27 @@ ---- -title: Comparison Operators -summary: Comparison Operators -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Comparison Operators - -Comparison operators are available for the most data types and return Boolean values. - -All comparison operators are binary operators. Only data types that are the same or can be implicitly converted can be compared using comparison operators. - -Table 1 describes comparison operators provided by MogDB. - -**Table 1** Comparison operators - -| Operator | Description | -| :----------------- | :----------------------- | -| < | Less than | -| > | Greater than | -| <= | Less than or equal to | -| >= | Greater than or equal to | -| = | Equal to | -| <>, != or ^= | Not equal to | - -Comparison operators are available for all relevant data types. All comparison operators are binary operators that returned values of Boolean type. The calculation priority of the inequality sign is higher than that of the equality sign. If the entered data is different and cannot be implicitly converted, the comparison fails. For example, an expression such as 1<2<3 is invalid because the less-than sign (<) cannot be used to compare Boolean values and 3. +--- +title: Comparison Operators +summary: Comparison Operators +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Comparison Operators + +Comparison operators are available for the most data types and return Boolean values. + +All comparison operators are binary operators. Only data types that are the same or can be implicitly converted can be compared using comparison operators. + +Table 1 describes comparison operators provided by MogDB. + +**Table 1** Comparison operators + +| Operator | Description | +| :----------------- | :----------------------- | +| < | Less than | +| > | Greater than | +| <= | Less than or equal to | +| >= | Greater than or equal to | +| = | Equal to | +| <>, != or ^= | Not equal to | + +Comparison operators are available for all relevant data types. All comparison operators are binary operators that returned values of Boolean type. The calculation priority of the inequality sign is higher than that of the equality sign. If the entered data is different and cannot be implicitly converted, the comparison fails. For example, an expression such as 1<2<3 is invalid because the less-than sign (<) cannot be used to compare Boolean values and 3. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/data-damage-detection-and-repair-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/data-damage-detection-and-repair-functions.md index f99f3f82..c617c39e 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/data-damage-detection-and-repair-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/data-damage-detection-and-repair-functions.md @@ -1,207 +1,207 @@ ---- -title: Data Damage Detection and Repair Functions -summary: Data Damage Detection and Repair Functions -author: Guo Huan -date: 2022-05-10 ---- - -# Data Damage Detection and Repair Functions - -- gs_verify_data_file(verify_segment bool) - - Description: Checks whether files in the current database of the current instance are lost. The verification only checks whether intermediate segments are lost in the main file of the data table. The default value is **false**, indicating that the segment-page table data file is not verified. If this parameter is set to **true**, only segment-page table files are verified. By default, only initial users, users with the **SYSADMIN** permission, and users with the O&M administrator attribute in the O&M mode can view the information. Other users can use the information only after being granted with permissions. - - The returned result is as follows: - - - Non-segment-page table: **rel_oid** and **rel_name** indicate the table OID and table name of the corresponding file, and **miss_file_path** indicates the relative path of the lost file. - - Segment-paged table: All tables are stored in the same file. Therefore, **rel_oid** and **rel_name** cannot display information about a specific table. For a segment-page table, if the first file is damaged, the subsequent files such as .1 and .2 are not checked. For example, if 3, 3.1, and 3.2 are damaged, only 3 damage can be detected. When the number of segment-page files is less than 5, the files that are not generated are also detected during function detection. For example, if there are only files 1 and 2, files 3, 4, and 5 are detected during segment-page file detection. In the following examples, the first is an example of checking a non-segment-page table, and the second is an example of checking a segment-page table. - - Parameter description: - - - verify_segment - - Specifies the range of files to be checked. **false** indicates that non-segment-page tables are verified. **true** indicates that segment-page tables are verified. - - The value can be **true** or **false** (default value). - - Return type: record - - Example: - - Verify a non-segment-page table. - - ``` - MogDB=# select * from gs_verify_data_file(); - node_name | rel_oid | rel_name | miss_file_path - ------------------+---------+--------------+------------------ - dn_6001_6002_6003 | 16554 | test | base/16552/24745 - ``` - - Verify a segment-page table. - - ``` - MogDB=# select * from gs_verify_data_file(true); - node_name | rel_oid | rel_name | miss_file_path - -------------------+---------+----------+---------------- - dn_6001_6002_6003 | 0 | none | base/16573/2 - ``` - -- gs_repair_file(tableoid Oid,path text, timeout int) - - Description: Repairs the file based on the input parameters. Only the primary DN with normal primary/standby connection is supported. The parameter is set based on the OID and path returned by the **gs_verify_data_file** function. The value of table OID for a segment-page table ranges from 0 to 4294967295. (The internal verification determines whether a file is a segment-page table file based on the file path. The table OID is not used for a segment-page table file.) If the repair is successful, **true** is returned. If the repair fails, the failure cause is displayed. By default, only the initial user, users with the **SYSADMIN** permission, and users with the O&M administrator attribute in O&M mode on the primary DN can view the table. Other users can use the table only after being granted with permissions. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **CAUTION:** 1. If a file on a DN is damaged, a verification error occurs when the DN is promoted to primary. An error message is displayed, indicating that the DN cannot be promoted to primary due to PANIC exit. This is normal. After another DN is promoted to primary, the fault can be rectified through the automatic recovery of the standby DN. 2. If a file exists but its size is 0, the file will not be repaired. To repair the file, you need to delete the file whose size is 0 and then repair it. 3. You can delete a file only after the file FD is automatically closed. You can manually restart the process or perform a primary/standby switchover. - -Parameter description: - -- tableoid - - OID of the table corresponding to the file to be repaired. Set this parameter based on the **rel_oid** column in the list returned by the **gs_verify_data_file** function. - - Value range: OID ranging from 0 to 4294967295. Note: A negative value will be forcibly converted to a non-negative integer. - -- path - - Path of the file to be repaired. Set this parameter based on the **miss_file_path** column in the list returned by the **gs_verify_data_file** function. - - Value range: a string - -- timeout - - Specifies the duration for waiting for the standby DN to replay. The repair file needs to wait for the standby DN to be put back to the corresponding location on the current primary DN. Set this parameter based on the replay duration of the standby DN. - - Value range: 60s to 3600s. - -Return type: Boolean - -Example: - -```` -``` -MogDB=# select * from gs_repair_file(16554,'base/16552/24745',360); -gs_repair_file ----------------- -t -``` -```` - -- local_bad_block_info() - - Description: Displays the page damage of the instance. You can read the page from the disk and record the page CRC failure. By default, only initial users, users with the **SYSADMIN** permission, users with the monitoring administrator attribute, users with the O&M administrator attribute in the O&M mode, and monitoring users can view the information. Other users can use the information only after being granted with permissions. - - In the displayed information, **file_path** indicates the relative path of the damaged file. If the table is a segment-page table, the logical information instead of the actual physical file information is displayed. **block_num** indicates the number of the page where the file is damaged. The page number starts from 0. **check_time** indicates the time when the page damage is detected. **repair_time** indicates the time when the page is repaired. - - Return type: record - - Example: - - ``` - MogDB=# select * from local_bad_block_info(); - node_name | spc_node | db_node | rel_node| bucket_node | fork_num | block_num | file_path | check_time | repair_time - -----------------+-------+--------+--------+--------------+----------+-----------+-----------------+--------------------------+------------------------------- - dn_6001_6002_6003| 1663 | 16552 | 24745 | -1 | 0 | 0 | base/16552/24745 | 2022-01-13 20:19:08.385004+08 | 2022-01-13 20:19:08.407314+08 - - ``` - -- local_clear_bad_block_info() - - Description: Deletes data of repaired pages from **local_bad_block_info**, that is, information whose **repair_time** is not empty. By default, only initial users, users with the **SYSADMIN** permission, users with the O&M administrator attribute in the O&M mode, and monitoring users can view the information. Other users can use the information only after being granted with permissions. - - Return type: Boolean - - Example: - - ``` - MogDB=# select * from local_clear_bad_block_info(); - result - -------- - t - ``` - -- gs_verify_and_tryrepair_page (path text, blocknum oid, verify_mem bool, is_segment bool) - - Description: Verifies the page specified by the instance. By default, only the initial user, users with the **SYSADMIN** permission, and users with the O&M administrator attribute in O&M mode on the primary DN can view the table. Other users can use the table only after being granted with permissions. - - In the command output, **disk_page_res** indicates the verification result of the page on the disk, **mem_page_res** indicates the verification result of the page in the memory, and **is_repair** indicates whether the repair function is triggered during the verification. **t** indicates that the page is repaired, and **f** indicates that the page is not repaired. - - Note: If a page on a DN is damaged, a verification error occurs when the DN is promoted to primary. An error message is displayed, indicating that the DN cannot be promoted to primary due to PANIC exit. This is normal. After another DN is promoted to primary, the fault can be rectified through the automatic recovery of the standby DN. - - Parameter description: - - - path - - Path of the damaged file. Set this parameter based on the **file_path** column in the **local_bad_block_info** file. - - Value range: a string - - - blocknum - - Page number of the damaged file. Set this parameter based on the **block_num** column in the **local_bad_block_info** file. - - Value range: OID ranging from 0 to 4294967295. Note: A negative value will be forcibly converted to a non-negative integer. - - - verify_mem - - Specifies whether to verify a specified page in the memory. If this parameter is set to **false**, only pages on the disk are verified. If this parameter is set to **true**, pages in the memory and on the disk are verified. If a page on the disk is damaged, the system verifies the basic information of the page in the memory and flushes the page to the disk to restore the page. If a page is not found in the memory during memory page verification, the page on the disk is read through the memory API. During this process, if the disk page is faulty, the remote read automatic repair function is triggered. - - Value range: The value is of a Boolean type and can be **true** or **false**. - - - is_segment - - Determines whether the table is a segment-page table. Set this parameter based on the value of **bucket_node** in the **local_bad_block_info** file. If the value of **bucket_node** is **–1**, the table is not a segment-page table. In this case, set **is_segment** to **false**. If the value of **bucket_node** is not –1, set **is_segment** to **true**. - - Value range: The value is of a Boolean type and can be **true** or **false**. - - Return type: record - - Example: - - ``` - MogDB=# select * from gs_verify_and_tryrepair_page('base/16552/24745',0,false,false); - node_name | path | blocknum | disk_page_res | mem_page_res | is_repair - ------------------+------------------+------------+-----------------------------+---------------+---------- - dn_6001_6002_6003 | base/16552/24745 | 0 | page verification succeeded.| | f - ``` - -- gs_repair_page(path text, blocknum oid, is_segment bool, timeout int) - - Description: Restores the specified page of the instance. This function can be used only by the primary DN that is properly connected to the primary and standby DNs. If the page is successfully restored, **true** is returned. If an error occurs during the restoration, an error message is displayed. By default, only the initial user, users with the **SYSADMIN** permission, and users with the O&M administrator attribute in O&M mode on the primary DN can view the table. Other users can use the table only after being granted with permissions. - - Note: If a page on a DN is damaged, a verification error occurs when the DN is promoted to primary. An error message is displayed, indicating that the DN cannot be promoted to primary due to PANIC exit. This is normal. After another DN is promoted to primary, the fault can be rectified through the automatic recovery of the standby DN. - - Parameter description: - - - path - - Path of the damaged page. Set this parameter based on the **file_path** column in **local_bad_block_info** or the **path** column in the **gs_verify_and_tryrepair_page** function. - - Value range: a string - - - blocknum - - Number of the damaged page. Set this parameter based on the **block_num** column in **local_bad_block_info** or the **blocknum** column in the **gs_verify_and_tryrepair_page** function. - - Value range: OID ranging from 0 to 4294967295. Note: A negative value will be forcibly converted to a non-negative integer. - - - is_segment - - Determines whether the table is a segment-page table. The value of this parameter is determined by the value of **bucket_node** in **local_bad_block_info**. If the value of **bucket_node** is **–1**, the table is not a segment-page table and **is_segment** is set to **false**. If the value of **bucket_node** is not –1, **is_segment** is set to true. - - Value range: The value is of a Boolean type and can be **true** or **false**. - - - timeout - - Duration of waiting for standby DN replay. The repair page needs to wait for the standby DN to move back to the location of the current primary DN. Set this parameter based on the playback duration of the standby DN. - - Value range: 60s to 3600s. - - Return type: Boolean - - Example: - - ``` - MogDB=# select * from gs_repair_page('base/16552/24745',0,false,60); - result - -------- - t +--- +title: Data Damage Detection and Repair Functions +summary: Data Damage Detection and Repair Functions +author: Guo Huan +date: 2022-05-10 +--- + +# Data Damage Detection and Repair Functions + +- gs_verify_data_file(verify_segment bool) + + Description: Checks whether files in the current database of the current instance are lost. The verification only checks whether intermediate segments are lost in the main file of the data table. The default value is **false**, indicating that the segment-page table data file is not verified. If this parameter is set to **true**, only segment-page table files are verified. By default, only initial users, users with the **SYSADMIN** permission, and users with the O&M administrator attribute in the O&M mode can view the information. Other users can use the information only after being granted with permissions. + + The returned result is as follows: + + - Non-segment-page table: **rel_oid** and **rel_name** indicate the table OID and table name of the corresponding file, and **miss_file_path** indicates the relative path of the lost file. + - Segment-paged table: All tables are stored in the same file. Therefore, **rel_oid** and **rel_name** cannot display information about a specific table. For a segment-page table, if the first file is damaged, the subsequent files such as .1 and .2 are not checked. For example, if 3, 3.1, and 3.2 are damaged, only 3 damage can be detected. When the number of segment-page files is less than 5, the files that are not generated are also detected during function detection. For example, if there are only files 1 and 2, files 3, 4, and 5 are detected during segment-page file detection. In the following examples, the first is an example of checking a non-segment-page table, and the second is an example of checking a segment-page table. + + Parameter description: + + - verify_segment + + Specifies the range of files to be checked. **false** indicates that non-segment-page tables are verified. **true** indicates that segment-page tables are verified. + + The value can be **true** or **false** (default value). + + Return type: record + + Example: + + Verify a non-segment-page table. + + ``` + MogDB=# select * from gs_verify_data_file(); + node_name | rel_oid | rel_name | miss_file_path + ------------------+---------+--------------+------------------ + dn_6001_6002_6003 | 16554 | test | base/16552/24745 + ``` + + Verify a segment-page table. + + ``` + MogDB=# select * from gs_verify_data_file(true); + node_name | rel_oid | rel_name | miss_file_path + -------------------+---------+----------+---------------- + dn_6001_6002_6003 | 0 | none | base/16573/2 + ``` + +- gs_repair_file(tableoid Oid,path text, timeout int) + + Description: Repairs the file based on the input parameters. Only the primary DN with normal primary/standby connection is supported. The parameter is set based on the OID and path returned by the **gs_verify_data_file** function. The value of table OID for a segment-page table ranges from 0 to 4294967295. (The internal verification determines whether a file is a segment-page table file based on the file path. The table OID is not used for a segment-page table file.) If the repair is successful, **true** is returned. If the repair fails, the failure cause is displayed. By default, only the initial user, users with the **SYSADMIN** permission, and users with the O&M administrator attribute in O&M mode on the primary DN can view the table. Other users can use the table only after being granted with permissions. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **CAUTION:** 1. If a file on a DN is damaged, a verification error occurs when the DN is promoted to primary. An error message is displayed, indicating that the DN cannot be promoted to primary due to PANIC exit. This is normal. After another DN is promoted to primary, the fault can be rectified through the automatic recovery of the standby DN. 2. If a file exists but its size is 0, the file will not be repaired. To repair the file, you need to delete the file whose size is 0 and then repair it. 3. You can delete a file only after the file FD is automatically closed. You can manually restart the process or perform a primary/standby switchover. + +Parameter description: + +- tableoid + + OID of the table corresponding to the file to be repaired. Set this parameter based on the **rel_oid** column in the list returned by the **gs_verify_data_file** function. + + Value range: OID ranging from 0 to 4294967295. Note: A negative value will be forcibly converted to a non-negative integer. + +- path + + Path of the file to be repaired. Set this parameter based on the **miss_file_path** column in the list returned by the **gs_verify_data_file** function. + + Value range: a string + +- timeout + + Specifies the duration for waiting for the standby DN to replay. The repair file needs to wait for the standby DN to be put back to the corresponding location on the current primary DN. Set this parameter based on the replay duration of the standby DN. + + Value range: 60s to 3600s. + +Return type: Boolean + +Example: + +```` +``` +MogDB=# select * from gs_repair_file(16554,'base/16552/24745',360); +gs_repair_file +---------------- +t +``` +```` + +- local_bad_block_info() + + Description: Displays the page damage of the instance. You can read the page from the disk and record the page CRC failure. By default, only initial users, users with the **SYSADMIN** permission, users with the monitoring administrator attribute, users with the O&M administrator attribute in the O&M mode, and monitoring users can view the information. Other users can use the information only after being granted with permissions. + + In the displayed information, **file_path** indicates the relative path of the damaged file. If the table is a segment-page table, the logical information instead of the actual physical file information is displayed. **block_num** indicates the number of the page where the file is damaged. The page number starts from 0. **check_time** indicates the time when the page damage is detected. **repair_time** indicates the time when the page is repaired. + + Return type: record + + Example: + + ``` + MogDB=# select * from local_bad_block_info(); + node_name | spc_node | db_node | rel_node| bucket_node | fork_num | block_num | file_path | check_time | repair_time + -----------------+-------+--------+--------+--------------+----------+-----------+-----------------+--------------------------+------------------------------- + dn_6001_6002_6003| 1663 | 16552 | 24745 | -1 | 0 | 0 | base/16552/24745 | 2022-01-13 20:19:08.385004+08 | 2022-01-13 20:19:08.407314+08 + + ``` + +- local_clear_bad_block_info() + + Description: Deletes data of repaired pages from **local_bad_block_info**, that is, information whose **repair_time** is not empty. By default, only initial users, users with the **SYSADMIN** permission, users with the O&M administrator attribute in the O&M mode, and monitoring users can view the information. Other users can use the information only after being granted with permissions. + + Return type: Boolean + + Example: + + ``` + MogDB=# select * from local_clear_bad_block_info(); + result + -------- + t + ``` + +- gs_verify_and_tryrepair_page (path text, blocknum oid, verify_mem bool, is_segment bool) + + Description: Verifies the page specified by the instance. By default, only the initial user, users with the **SYSADMIN** permission, and users with the O&M administrator attribute in O&M mode on the primary DN can view the table. Other users can use the table only after being granted with permissions. + + In the command output, **disk_page_res** indicates the verification result of the page on the disk, **mem_page_res** indicates the verification result of the page in the memory, and **is_repair** indicates whether the repair function is triggered during the verification. **t** indicates that the page is repaired, and **f** indicates that the page is not repaired. + + Note: If a page on a DN is damaged, a verification error occurs when the DN is promoted to primary. An error message is displayed, indicating that the DN cannot be promoted to primary due to PANIC exit. This is normal. After another DN is promoted to primary, the fault can be rectified through the automatic recovery of the standby DN. + + Parameter description: + + - path + + Path of the damaged file. Set this parameter based on the **file_path** column in the **local_bad_block_info** file. + + Value range: a string + + - blocknum + + Page number of the damaged file. Set this parameter based on the **block_num** column in the **local_bad_block_info** file. + + Value range: OID ranging from 0 to 4294967295. Note: A negative value will be forcibly converted to a non-negative integer. + + - verify_mem + + Specifies whether to verify a specified page in the memory. If this parameter is set to **false**, only pages on the disk are verified. If this parameter is set to **true**, pages in the memory and on the disk are verified. If a page on the disk is damaged, the system verifies the basic information of the page in the memory and flushes the page to the disk to restore the page. If a page is not found in the memory during memory page verification, the page on the disk is read through the memory API. During this process, if the disk page is faulty, the remote read automatic repair function is triggered. + + Value range: The value is of a Boolean type and can be **true** or **false**. + + - is_segment + + Determines whether the table is a segment-page table. Set this parameter based on the value of **bucket_node** in the **local_bad_block_info** file. If the value of **bucket_node** is **–1**, the table is not a segment-page table. In this case, set **is_segment** to **false**. If the value of **bucket_node** is not –1, set **is_segment** to **true**. + + Value range: The value is of a Boolean type and can be **true** or **false**. + + Return type: record + + Example: + + ``` + MogDB=# select * from gs_verify_and_tryrepair_page('base/16552/24745',0,false,false); + node_name | path | blocknum | disk_page_res | mem_page_res | is_repair + ------------------+------------------+------------+-----------------------------+---------------+---------- + dn_6001_6002_6003 | base/16552/24745 | 0 | page verification succeeded.| | f + ``` + +- gs_repair_page(path text, blocknum oid, is_segment bool, timeout int) + + Description: Restores the specified page of the instance. This function can be used only by the primary DN that is properly connected to the primary and standby DNs. If the page is successfully restored, **true** is returned. If an error occurs during the restoration, an error message is displayed. By default, only the initial user, users with the **SYSADMIN** permission, and users with the O&M administrator attribute in O&M mode on the primary DN can view the table. Other users can use the table only after being granted with permissions. + + Note: If a page on a DN is damaged, a verification error occurs when the DN is promoted to primary. An error message is displayed, indicating that the DN cannot be promoted to primary due to PANIC exit. This is normal. After another DN is promoted to primary, the fault can be rectified through the automatic recovery of the standby DN. + + Parameter description: + + - path + + Path of the damaged page. Set this parameter based on the **file_path** column in **local_bad_block_info** or the **path** column in the **gs_verify_and_tryrepair_page** function. + + Value range: a string + + - blocknum + + Number of the damaged page. Set this parameter based on the **block_num** column in **local_bad_block_info** or the **blocknum** column in the **gs_verify_and_tryrepair_page** function. + + Value range: OID ranging from 0 to 4294967295. Note: A negative value will be forcibly converted to a non-negative integer. + + - is_segment + + Determines whether the table is a segment-page table. The value of this parameter is determined by the value of **bucket_node** in **local_bad_block_info**. If the value of **bucket_node** is **–1**, the table is not a segment-page table and **is_segment** is set to **false**. If the value of **bucket_node** is not –1, **is_segment** is set to true. + + Value range: The value is of a Boolean type and can be **true** or **false**. + + - timeout + + Duration of waiting for standby DN replay. The repair page needs to wait for the standby DN to move back to the location of the current primary DN. Set this parameter based on the playback duration of the standby DN. + + Value range: 60s to 3600s. + + Return type: Boolean + + Example: + + ``` + MogDB=# select * from gs_repair_page('base/16552/24745',0,false,60); + result + -------- + t ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/date-and-time-processing-functions-and-operators.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/date-and-time-processing-functions-and-operators.md index d3a9fa68..d3eec6e1 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/date-and-time-processing-functions-and-operators.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/date-and-time-processing-functions-and-operators.md @@ -1,1320 +1,1320 @@ ---- -title: Date and Time Processing Functions and Operators -summary: Date and Time Processing Functions and Operators -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Date and Time Processing Functions and Operators - -## Date and Time Operators - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-warning.gif) **WARNING:** -> When the user uses date/time operators, explicit type prefixes are modified for corresponding operands to ensure that the operands parsed by the database are consistent with what the user expects, and no unexpected results occur. -> For example, abnormal mistakes will occur in the following example without an explicit data type. -> -> ```sql -> SELECT date '2001-10-01' - '7' AS RESULT; -> ``` - -**Table 1** Time and date operators - -| Operator | Example | -| -------- | ------------------------------------------------------------ | -| + | MogDB=# SELECT date '2001-09-28' + integer '7' AS RESULT;
result
-----------------
2001-10-05
(1 row) | -| | MogDB=# SELECT date '2001-09-28' + interval '1 hour' AS RESULT;
result
-----------------------------
2001-09-28 01:00:00
(1 row) | -| | MogDB=# SELECT date '2001-09-28' + time '03:00' AS RESULT;
result
-------------------------------
2001-09-28 03:00:00
(1 row) | -| | MogDB=# SELECT interval '1 day' + interval '1 hour' AS RESULT;
result
----------------------
1 day 01:00:00
(1 row) | -| | MogDB=# SELECT timestamp '2001-09-28 01:00' + interval '23 hours' AS RESULT;
result
------------------------------
2001-09-29 00:00:00
(1 row) | -| | MogDB=# SELECT time '01:00' + interval '3 hours' AS RESULT;
result
-------------
04:00:00
(1 row) | -| - | MogDB=# SELECT date '2001-10-01' - date '2001-09-28' AS RESULT;
result
----------
3days
(1 row) | -| | MogDB=# SELECT date '2001-10-01' - integer '7' AS RESULT;
result
-------------------------------
2001-09-24 00:00:00
(1 row) | -| | MogDB=# SELECT date '2001-09-28' - interval '1 hour' AS RESULT;
result
--------------------------------
2001-09-27 23:00:00
(1 row) | -| | MogDB=# SELECT time '05:00' - time '03:00' AS RESULT;
result
-------------
02:00:00
(1 row) | -| | MogDB=# SELECT time '05:00' - interval '2 hours' AS RESULT;
result
-------------
03:00:00
(1 row) | -| | MogDB=# SELECT timestamp '2001-09-28 23:00' - interval '23 hours' AS RESULT;
result
-------------------------------
2001-09-28 00:00:00
(1 row) | -| | MogDB=# SELECT interval '1 day' - interval '1 hour' AS RESULT;
result
-------------
23:00:00
(1 row) | -| | MogDB=# SELECT timestamp '2001-09-29 03:00' - timestamp '2001-09-27 12:00' AS RESULT;
result
---------------------
1 day 15:00:00
(1 row) | -| * | MogDB=# SELECT 900 * interval '1 second' AS RESULT;
result
--------------
00:15:00
(1 row) | -| | MogDB=# SELECT 21 * interval '1 day' AS RESULT;
result
-------------
21 days
(1 row) | -| | MogDB=# SELECT double precision '3.5' * interval '1 hour' AS RESULT;
result
--------------
03:30:00
(1 row) | -| / | MogDB=# SELECT interval '1 hour' / double precision '1.5' AS RESULT;
result
-------------
00:40:00
(1 row) | - -## Time/Date Functions - -- age(timestamp, timestamp) - - Description: Subtracts parameters, producing a result in YYYY-MM-DD format. If the result is negative, the returned result is also negative. The input parameters can contain timezone or not. - - Return type: interval - - Example: - - ```sql - MogDB=# SELECT age(timestamp '2001-04-10', timestamp '1957-06-13'); - age - ------------------------- - 43 years 9 mons 27 days - (1 row) - ``` - -- age(timestamp) - - Description: Minuses the current time with the parameter. The input parameter can contain timezone or not. - - Return type: interval - - Example: - - ```sql - MogDB=# SELECT age(timestamp '1957-06-13'); - age - ------------------------- - 60 years 2 mons 18 days - (1 row) - ``` - -- clock_timestamp() - - Description: Specifies the current timestamp of the real-time clock. - - Return type: timestamp with time zone - - Example: - - ```sql - MogDB=# SELECT clock_timestamp(); - clock_timestamp - ------------------------------- - 2017-09-01 16:57:36.636205+08 - (1 row) - ``` - -- current_date - - Description: Specifies the current date. - - Return type: date - - Example: - - ```sql - MogDB=# SELECT current_date; - date - ------------ - 2017-09-01 - (1 row) - ``` - -- current_time - - Description: Specifies the current time. - - Return type: time with time zone - - Example: - - ```sql - MogDB=# SELECT current_time; - timetz - -------------------- - 16:58:07.086215+08 - (1 row) - ``` - -- current_timestamp - - Description: Specifies the current date and time. - - Return type: timestamp with time zone - - Example: - - ```sql - MogDB=# SELECT current_timestamp; - pg_systimestamp - ------------------------------ - 2017-09-01 16:58:19.22173+08 - (1 row) - ``` - -- systimestamp - - Description: Returns the system date of the system where the database is located, including fractional seconds and time zone. - - Return type: timestamp with time zone - - ```sql - MogDB=# select systimestamp; - pg_systimestamp - ----------------------------- - 2021-12-24 14:34:24.6903+08 - (1 row) - ``` - -- date_part(text, timestamp) - - Description: Obtains the value of a subdomain in date or time, for example, the year or hour. It is equivalent to **extract(field from timestamp)**. - - Timestamp types: abstime, date, interval, reltime, time with time zone, time without time zone, timestamp with time zone, timestamp without time zone - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT date_part('hour', timestamp '2001-02-16 20:38:40'); - date_part - ----------- - 20 - (1 row) - ``` - -- date_part(text, interval) - - Description: Obtains the subdomain value of the date/time value. When obtaining the month value, if the value is greater than 12, obtain the remainder after it is divided by 12. It is equivalent to **extract(field from timestamp)**. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT date_part('month', interval '2 years 3 months'); - date_part - ----------- - 3 - (1 row) - ``` - -- date_trunc(text, timestamp) - - Description: Truncates to the precision specified by **text**. - - Return type: interval, timestamp with time zone, timestamp without time zone - - Example: - - ```sql - MogDB=# SELECT date_trunc('hour', timestamp '2001-02-16 20:38:40'); - date_trunc - --------------------- - 2001-02-16 20:00:00 - (1 row) - ``` - -- trunc(timestamp) - - Description: Truncates to day by default. - - Example: - - ```sql - MogDB=# SELECT trunc(timestamp '2001-02-16 20:38:40'); trunc - --------------------- - 2001-02-16 00:00:00 - (1 row) - ``` - -- daterange(arg1, arg2) - - Description: Obtains time boundary information. The type of **arg1** and **arg2** is **date**. - - Return type: daterange - - Example: - - ```sql - MogDB=# select daterange('2000-05-06','2000-08-08'); - daterange - ------------------------- - [2000-05-06,2000-08-08) - (1 row) - ``` - -- daterange(arg1, arg2, text) - - Description: Obtains time boundary information. The type of **arg1** and **arg2** is **date**, and the type of **text** is **text**. - - Return type: daterange - - Example: - - ```sql - MogDB=# select daterange('2000-05-06','2000-08-08','[]'); - daterange - ------------------------- - [2000-05-06,2000-08-09) - (1 row) - ``` - -- extract(field from timestamp) - - Description: Obtains the hour. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT extract(hour from timestamp '2001-02-16 20:38:40'); - date_part - ----------- - 20 - (1 row) - ``` - -- extract(field from interval) - - Description: Obtains the month. If the value is greater than 12, obtain the remainder after it is divided by 12. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT extract(month from interval '2 years 3 months'); - date_part - ----------- - 3 - (1 row) - ``` - -- isfinite(date) - - Description: Tests for a valid date. - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT isfinite(date '2001-02-16'); - isfinite - ---------- - t - (1 row) - ``` - -- isfinite(timestamp) - - Description: Tests for a valid timestamp. - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT isfinite(timestamp '2001-02-16 21:28:30'); - isfinite - ---------- - t - (1 row) - ``` - -- isfinite(interval) - - Description: Tests for a valid interval. - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT isfinite(interval '4 hours'); - isfinite - ---------- - t - (1 row) - ``` - -- justify_days(interval) - - Description: Adjusts intervals to 30-day time periods, which are represented as months. - - Return type: interval - - Example: - - ```sql - MogDB=# SELECT justify_days(interval '35 days'); - justify_days - -------------- - 1 mon 5 days - (1 row) - ``` - -- justify_hours(interval) - - Description: Sets the time interval in days (24 hours is one day). - - Return type: interval - - Example: - - ```sql - MogDB=# SELECT JUSTIFY_HOURS(INTERVAL '27 HOURS'); - justify_hours - ---------------- - 1 day 03:00:00 - (1 row) - ``` - -- justify_interval(interval) - - Description: Adjusts **interval** using **justify_days** and **justify_hours**. - - Return type: interval - - Example: - - ```sql - MogDB=# SELECT JUSTIFY_INTERVAL(INTERVAL '1 MON -1 HOUR'); - justify_interval - ------------------ - 29 days 23:00:00 - (1 row) - ``` - -- localtime - - Description: Specifies the current time. - - Return type: time - - Example: - - ```sql - MogDB=# SELECT localtime AS RESULT; - result - ---------------- - 16:05:55.664681 - (1 row) - ``` - -- localtimestamp - - Description: Specifies the current date and time. - - Return type: timestamp - - Example: - - ```sql - MogDB=# SELECT localtimestamp; - timestamp - ---------------------------- - 2017-09-01 17:03:30.781902 - (1 row) - ``` - -- now() - - Description: Specifies the current date and time. - - Return type: timestamp with time zone - - Example: - - ```sql - MogDB=# SELECT now(); - now - ------------------------------- - 2017-09-01 17:03:42.549426+08 - (1 row) - ``` - -- timenow - - Description: Specifies the current date and time. - - Return type: timestamp with time zone - - Example: - - ```sql - MogDB=# select timenow(); - timenow - ------------------------ - 2020-06-23 20:36:56+08 - (1 row) - ``` - -- numtodsinterval(num, interval_unit) - - Description: Converts a number to the interval type. **num** is a numeric-typed number. **interval_unit** is a string in the following format: 'DAY' | 'HOUR' | 'MINUTE' | 'SECOND' - - You can set the IntervalStyle parameter to **a** to be compatible with the interval output format of the function. - - Example: - - ```sql - MogDB=# SELECT numtodsinterval(100, 'HOUR'); - numtodsinterval - ----------------- - 100:00:00 - (1 row) - - MogDB=# SET intervalstyle = a; - SET - MogDB=# SELECT numtodsinterval(100, 'HOUR'); - numtodsinterval - ------------------------------- - +000000004 04:00:00.000000000 - (1 row) - ``` - -- pg_sleep(seconds) - - Description: Specifies the delay time of the server thread in unit of second. - - Return type: void - - Example: - - ```sql - MogDB=# SELECT pg_sleep(10); - pg_sleep - ---------- - - (1 row) - ``` - -- statement_timestamp() - - Description: Specifies the current date and time. - - Return type: timestamp with time zone - - Example: - - ```sql - MogDB=# SELECT statement_timestamp(); - statement_timestamp - ------------------------------- - 2017-09-01 17:04:39.119267+08 - (1 row) - ``` - -- sysdate - - Description: Specifies the current date and time. - - Return type: timestamp - - Example: - - ```sql - MogDB=# SELECT sysdate; - sysdate - --------------------- - 2017-09-01 17:04:49 - (1 row) - ``` - -- timeofday() - - Description: Specifies the current date and time (like **clock_timestamp**, but returned as a **text** string) - - Return type: text - - Example: - - ```sql - MogDB=# SELECT timeofday(); - timeofday - ------------------------------------- - Fri Sep 01 17:05:01.167506 2017 CST - (1 row) - ``` - -- transaction_timestamp() - - Description: Specifies the current date and time (equivalent to **current_timestamp**) - - Return type: timestamp with time zone - - Example: - - ```sql - MogDB=# SELECT transaction_timestamp(); - transaction_timestamp - ------------------------------- - 2017-09-01 17:05:13.534454+08 - (1 row) - ``` - -- add_months(d,n) - - Description: Returns the date *date* plus *integer* months. - - Return type: timestamp - - Example: - - ```sql - MogDB=# SELECT add_months(to_date('2017-5-29', 'yyyy-mm-dd'), 11) FROM dual; - add_months - --------------------- - 2018-04-29 00:00:00 - (1 row) - ``` - -- last_day(d) - - Description: Returns the date of the last day of the month that contains *date*. - - Return type: timestamp - - Example: - - ```sql - MogDB=# select last_day(to_date('2017-01-01', 'YYYY-MM-DD')) AS cal_result; - cal_result - --------------------- - 2017-01-31 00:00:00 - (1 row) - ``` - -- next_day(x,y) - - Description: Calculates the time of the next week y started from x. - - Return type: timestamp - - Example: - - ```sql - MogDB=# select next_day(timestamp '2017-05-25 00:00:00','Sunday')AS cal_result; - cal_result - --------------------- - 2017-05-28 00:00:00 - (1 row) - ``` - -- tinterval(abstime, abstime ) - - Description: Creates a time interval with two pieces of absolute time. - - Return type: tinterval - - Example: - - ```sql - MogDB=# call tinterval(abstime 'May 10, 1947 23:59:12', abstime 'Mon May 1 00:30:30 1995'); - tinterval - ----------------------------------------------------- - ["1947-05-10 23:59:12+08" "1995-05-01 00:30:30+08"] - (1 row) - ``` - -- tintervalend(tinterval) - - Description: Returns the end time of **tinterval**. - - Return type: abstime - - Example: - - ```sql - MogDB=# select tintervalend('["Sep 4, 1983 23:59:12" "Oct4, 1983 23:59:12"]'); - tintervalend - ------------------------ - 1983-10-04 23:59:12+08 - (1 row) - ``` - -- tintervalrel(tinterval) - - Description: Calculates and returns the relative time of **tinterval**. - - Return type: reltime - - Example: - - ```sql - MogDB=# select tintervalrel('["Sep 4, 1983 23:59:12" "Oct4, 1983 23:59:12"]'); - tintervalrel - -------------- - 1 mon - (1 row) - ``` - -- smalldatetime_ge - - Description: Checks whether the value of the first parameter is greater than or equal to that of the second parameter. - - Parameter: smalldatetime, smalldatetime - - Return type: Boolean - -- smalldatetime_cmp - - Description: Compares two smalldatetime values to check whether they are the same. - - Parameter: smalldatetime, smalldatetime - - Return type: integer - -- smalldatetime_eq - - Description: Compares two smalldatetime values to check whether they are the same. - - Parameter: smalldatetime, smalldatetime - - Return type: Boolean - -- smalldatetime_gt - - Description: Determines whether the first parameter is greater than the second. - - Parameter: smalldatetime, smalldatetime - - Return type: Boolean - -- smalldatetime_hash - - Description: Calculates the hash value corresponding to a timestamp. - - Parameter: smalldatetime - - Return type: integer - -- smalldatetime_in - - Description: Inputs a timestamp. - - Parameter: cstring, oid, integer - - Return type: smalldatetime - -- smalldatetime_larger - - Description: Returns a larger timestamp. - - Parameter: smalldatetime, smalldatetime - - Return type: smalldatetime - -- smalldatetime_le - - Description: Checks whether the value of the first parameter is less than or equal to that of the second parameter. - - Parameter: smalldatetime, smalldatetime - - Return type: Boolean - -- smalldatetime_lt - - Description: Determines whether the first parameter is less than the second parameter. - - Parameter: smalldatetime, smalldatetime - - Return type: Boolean - -- smalldatetime_ne - - Description: Compares two timestamps to check whether they are different. - - Parameter: smalldatetime, smalldatetime - - Return type: Boolean - -- smalldatetime_out - - Description: Converts a timestamp into the external form. - - Parameter: smalldatetime - - Return type: cstring - -- smalldatetime_send - - Description: Converts a timestamp to the binary format. - - Parameter: smalldatetime - - Return type: bytea - -- smalldatetime_smaller - - Description: Returns a smaller smalldatetime. - - Parameter: smalldatetime, smalldatetime - - Return type: smalldatetime - -- smalldatetime_to_abstime - - Description: Converts smalldatetime to abstime. - - Parameter: smalldatetime - - Return type: abstime - -- smalldatetime_to_time - - Description: Converts smalldatetime to time. - - Parameter: smalldatetime - - Return type: time without time zone - -- smalldatetime_to_timestamp - - Description: Converts smalldatetime to timestamp. - - Parameter: smalldatetime - - Return type: timestamp without time zone - -- smalldatetime_to_timestamptz - - Description: Converts smalldatetime to timestamptz. - - Parameter: smalldatetime - - Return type: timestamp with time zone - -- smalldatetime_to_varchar2 - - Description: Converts smalldatetime to varchar2. - - Parameter: smalldatetime - - Return type: character varying - - >![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**NOTE:** - > - > There are multiple methods for obtaining the current time. Select an appropriate API based on the actual service scenario. - > - > 1. The following APIs return values based on the start time of the current transaction: - > - > ```sql - > CURRENT_DATE CURRENT_TIME CURRENT_TIME(precision) CURRENT_TIMESTAMP(precision) LOCALTIME LOCALTIMESTAMP LOCALTIME(precision) LOCALTIMESTAMP(precision) - > ``` - > - > **CURRENT_TIME** and **CURRENT_TIMESTAMP(precision)** transfer values with time zones. The values of **LOCALTIME** and **LOCALTIMESTAMP** do not contain time zone information. **CURRENT_TIME**, **LOCALTIME**, and **LOCALTIMESTAMP** can be optionally attached with a precision parameter, which rounds the second field of the result to the specified decimal place. If there is no precision parameter, the result is given the full precision that can be obtained. - > Because these functions all return results by the start time of the current transaction, their values do not change throughout the transaction. We think this is a feature with the purpose to allow a transaction to have a consistent concept at the "current" time, so that multiple modifications in the same transaction can maintain the same timestamp. - > - > 2. The following APIs return the start time of the current statement: - > - > ```sql - > transaction_timestamp() statement_timestamp() now() - > ``` - > - > **transaction_timestamp()** is equivalent to **CURRENT_TIMESTAMP(precision)**, and its name clearly reflects its return value. **statement_timestamp()** returns the start time of the current statement (more accurately, the time when the last instruction is received from the client). The return values of **statement_timestamp()** and **transaction_timestamp()** are the same during the execution of the first instruction of a transaction, but may be different in subsequent instructions. - > **now()** is equivalent to **transaction_timestamp()**. - > - > 3. The following APIs return the actual current time when the function is called: - > - > ```sql - > clock_timestamp() timeofday() - > ``` - > - > **clock_timestamp()** returns the actual "current" time, and its value changes even in the same SQL instruction. Similar to **clock_timestamp()**, **timeofday()** also returns the actual current time. However, the result of **timeofday()** is a formatted text string instead of a timestamp with time zone information. - -## TIMESTAMPDIFF - -- **TIMESTAMPDIFF(unit , timestamp_expr1, timestamp_expr2)** - -The timestampdiff function returns the result of **timestamp_expr2** - **timestamp_expr1** in the specified unit. **timestamp_expr1** and **timestamp_expr2** must be value expressions of the **timestamp**, **timestamptz**, or **date** type. **unit** indicates the unit of the difference between two dates. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> This function is valid only when MogDB is compatible with the MY type (that is, dbcompatibility = 'B'). - -- year - - Year. - - ```sql - MogDB=# SELECT TIMESTAMPDIFF(YEAR, '2018-01-01', '2020-01-01'); - timestamp_diff - ---------------- - 2 - (1 row) - ``` - -- quarter - - Quarter. - - ```sql - MogDB=# SELECT TIMESTAMPDIFF(QUARTER, '2018-01-01', '2020-01-01'); - timestamp_diff - ---------------- - 8 - (1 row) - ``` - -- month - - Month. - - ```sql - MogDB=# SELECT TIMESTAMPDIFF(MONTH, '2018-01-01', '2020-01-01'); - timestamp_diff - ---------------- - 24 - (1 row) - ``` - -- week - - Week. - - ```sql - MogDB=# SELECT TIMESTAMPDIFF(WEEK, '2018-01-01', '2020-01-01'); - timestamp_diff - ---------------- - 104 - (1 row) - ``` - -- day - - Day. - - ```sql - MogDB=# SELECT TIMESTAMPDIFF(DAY, '2018-01-01', '2020-01-01'); - timestamp_diff - ---------------- - 730 - (1 row) - ``` - -- hour - - Hour. - - ```sql - MogDB=# SELECT TIMESTAMPDIFF(HOUR, '2020-01-01 10:10:10', '2020-01-01 11:11:11'); - timestamp_diff - ---------------- - 1 - (1 row) - - ``` - -- minute - - Minute. - - ```sql - MogDB=# SELECT TIMESTAMPDIFF(MINUTE, '2020-01-01 10:10:10', '2020-01-01 11:11:11'); - timestamp_diff - ---------------- - 61 - (1 row) - - ``` - -- second - - Second. - - ```sql - MogDB=# SELECT TIMESTAMPDIFF(SECOND, '2020-01-01 10:10:10', '2020-01-01 11:11:11'); - timestamp_diff - ---------------- - 3661 - (1 row) - - ``` - -- microseconds - - The seconds column, including fractional parts, is multiplied by 1,000,000. - - ```sql - MogDB=# SELECT TIMESTAMPDIFF(MICROSECOND, '2020-01-01 10:10:10.000000', '2020-01-01 10:10:10.111111'); - timestamp_diff - ---------------- - 111111 - (1 row) - - ``` - -- timestamp_expr with the time zone - - ```sql - MogDB=# SELECT TIMESTAMPDIFF(HOUR,'2020-05-01 10:10:10-01','2020-05-01 10:10:10-03'); - timestamp_diff - ---------------- - 2 - (1 row) - ``` - -## EXTRACT - -- **EXTRACT(field FROM source)** - - The **extract** function retrieves subcolumns such as year or hour from date/time values. **source** must be a value expression of type **timestamp**, **time**, or **interval**. (Expressions of type **date** are cast to **timestamp** and can therefore be used as well.) **field** is an identifier or string that selects what column to extract from the source value. The **extract** function returns values of type **double precision**. The following are valid **field** names: - -- century - - The first century starts at 0001-01-01 00:00:00 AD. This definition applies to all Gregorian calendar countries. There is no century number 0. You go from **-1** century to **1** century. - - Example: - - ```sql - MogDB=# SELECT EXTRACT(CENTURY FROM TIMESTAMP '2000-12-16 12:21:13'); - date_part - ----------- - 20 - (1 row) - ``` - -- day - - - For **timestamp** values, the day (of the month) column (1-31) - - ```sql - MogDB=# SELECT EXTRACT(DAY FROM TIMESTAMP '2001-02-16 20:38:40'); - date_part - ----------- - 16 - (1 row) - ``` - - - For **interval** values, the number of days - - ```sql - MogDB=# SELECT EXTRACT(DAY FROM INTERVAL '40 days 1 minute'); - date_part - ----------- - 40 - (1 row) - ``` - -- decade - - Year column divided by 10 - - ```sql - MogDB=# SELECT EXTRACT(DECADE FROM TIMESTAMP '2001-02-16 20:38:40'); - date_part - ----------- - 200 - (1 row) - ``` - -- dow - - Day of the week as Sunday(**0**) to Saturday (**6**) - - ```sql - MogDB=# SELECT EXTRACT(DOW FROM TIMESTAMP '2001-02-16 20:38:40'); - date_part - ----------- - 5 - (1 row) - ``` - -- doy - - Day of the year (1-365 or 366) - - ```sql - MogDB=# SELECT EXTRACT(DOY FROM TIMESTAMP '2001-02-16 20:38:40'); - date_part - ----------- - 47 - (1 row) - ``` - -- epoch - - - For **timestamp with time zone** values, the number of seconds since 1970-01-01 00:00:00-00 UTC (can be negative). - - For **date** and **timestamp** values, the number of seconds since 1970-01-01 00:00:00-00 local time. - - For **interval** values, the total number of seconds in the interval. - - ```sql - MogDB=# SELECT EXTRACT(EPOCH FROM TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40.12-08'); - date_part - -------------- - 982384720.12 - (1 row) - ``` - - ```sql - MogDB=# SELECT EXTRACT(EPOCH FROM INTERVAL '5 days 3 hours'); - date_part - ----------- - 442800 - (1 row) - ``` - - - Way to convert an epoch value back to a timestamp - - ```sql - MogDB=# SELECT TIMESTAMP WITH TIME ZONE 'epoch' + 982384720.12 * INTERVAL '1 second' AS RESULT; - result - --------------------------- - 2001-02-17 12:38:40.12+08 - (1 row) - ``` - -- hour - - Hour column (0-23) - - ```sql - MogDB=# SELECT EXTRACT(HOUR FROM TIMESTAMP '2001-02-16 20:38:40'); - date_part - ----------- - 20 - (1 row) - ``` - -- isodow - - Day of the week (1-7) - - Monday is 1 and Sunday is 7. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > This is identical to **dow** except for Sunday. - - ```sql - MogDB=# SELECT EXTRACT(ISODOW FROM TIMESTAMP '2001-02-18 20:38:40'); - date_part - ----------- - 7 - (1 row) - ``` - -- isoyear - - The ISO 8601 year that the date falls in (not applicable to intervals). - - Each ISO year begins with the Monday of the week containing January 4, so in early January or late December the ISO year may be different from the Gregorian year. See the **week** column for more information. - - ```sql - MogDB=# SELECT EXTRACT(ISOYEAR FROM DATE '2006-01-01'); - date_part - ----------- - 2005 - (1 row) - ``` - - ```sql - MogDB=# SELECT EXTRACT(ISOYEAR FROM DATE '2006-01-02'); - date_part - ----------- - 2006 - (1 row) - ``` - -- microseconds - - The seconds column, including fractional parts, is multiplied by 1,000,000. - - ```sql - MogDB=# SELECT EXTRACT(MICROSECONDS FROM TIME '17:12:28.5'); - date_part - ----------- - 28500000 - (1 row) - ``` - -- millennium - - Years in the 1900s are in the second millennium. The third millennium started from January 1, 2001. - - ```sql - MogDB=# SELECT EXTRACT(MILLENNIUM FROM TIMESTAMP '2001-02-16 20:38:40'); - date_part - ----------- - 3 - (1 row) - ``` - -- milliseconds - - Seconds column, including fractional parts, is multiplied by 1000. Note that this includes full seconds. - - ```sql - MogDB=# SELECT EXTRACT(MILLISECONDS FROM TIME '17:12:28.5'); - date_part - ----------- - 28500 - (1 row) - ``` - -- minute - - Minutes column (0-59). - - ```sql - MogDB=# SELECT EXTRACT(MINUTE FROM TIMESTAMP '2001-02-16 20:38:40'); - date_part - ----------- - 38 - (1 row) - ``` - -- month - - For **timestamp** values, the specific month in the year (1-12). - - ```sql - MogDB=# SELECT EXTRACT(MONTH FROM TIMESTAMP '2001-02-16 20:38:40'); - date_part - ----------- - 2 - (1 row) - ``` - - For **interval** values, the number of months, modulo 12 (0-11). - - ```sql - MogDB=# SELECT EXTRACT(MONTH FROM INTERVAL '2 years 13 months'); - date_part - ----------- - 1 - (1 row) - ``` - -- quarter - - Quarter of the year (1-4) that the date is in. - - ```sql - MogDB=# SELECT EXTRACT(QUARTER FROM TIMESTAMP '2001-02-16 20:38:40'); - date_part - ----------- - 1 - (1 row) - ``` - -- second - - Seconds column, including fractional parts (0-59). - - ```sql - MogDB=# SELECT EXTRACT(SECOND FROM TIME '17:12:28.5'); - date_part - ----------- - 28.5 - (1 row) - ``` - -- timezone - - Time zone offset from UTC, measured in seconds. Positive values correspond to time zones east of UTC, negative values to zones west of UTC. - -- timezone_hour - - Hour component of the time zone offset. - -- timezone_minute - - Minute component of the time zone offset. - -- week - - Number of the week of the year that the day is in. By definition (ISO 8601), the first week of a year contains January 4 of that year. (The ISO-8601 week starts on Monday.) In other words, the first Thursday of a year is in week 1 of that year. - - Because of this, it is possible for early January dates to be part of the 52nd or 53rd week of the previous year, and late December dates to be part of the 1st week of the next year. For example, **2005-01-01** is part of the 53rd week of year 2004, **2006-01-01** is part of the 52nd week of year 2005, and **2012-12-31** is part of the 1st week of year 2013. You are advised to use the columns **isoyear** and **week** together to ensure consistency. - - ```sql - MogDB=# SELECT EXTRACT(WEEK FROM TIMESTAMP '2001-02-16 20:38:40'); - date_part - ----------- - 7 - (1 row) - ``` - -- year - - Year column. - - ```sql - MogDB=# SELECT EXTRACT(YEAR FROM TIMESTAMP '2001-02-16 20:38:40'); - date_part - ----------- - 2001 - (1 row) - ``` - -## date_part - -The **date_part** function is modeled on the traditional Ingres equivalent to the SQL-standard function **extract**: - -- **date_part('***field***', ***source***)** - -Note that here the **field** parameter needs to be a string value, not a name. The valid field names for **field** are the same as for **extract**. For details, see [EXTRACT](#extract). - -Example: - -```sql -MogDB=# SELECT date_part('day', TIMESTAMP '2001-02-16 20:38:40'); - date_part ------------ - 16 -(1 row) -``` - -```sql -MogDB=# SELECT date_part('hour', INTERVAL '4 hours 3 minutes'); - date_part ------------ - 4 -(1 row) -``` - -Table 2 specifies the schema for formatting date and time values. - -**Table 2** Schema for formatting date and time - -| Category | Format | Description | -| --------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| Hour | HH | Number of hours in one day (01-12) | -| HH12 | Number of hours in one day (01-12) | | -| HH24 | Number of hours in one day (00-23) | | -| Minute | MI | Minute (00-59) | -| Second | SS | Second (00-59) | -| FF | Microsecond (000000-999999) | | -| SSSSS | Second after midnight (0-86399) | | -| Morning and afternoon | AM or A.M. | Morning identifier | -| PM or P.M. | Afternoon identifier | | -| Year | Y,YYY | Year with comma (with four digits or more) | -| SYYYY | Year with four digits BC | | -| YYYY | Year (with four digits or more) | | -| YYY | Last three digits of a year | | -| YY | Last two digits of a year | | -| Y | Last one digit of a year | | -| IYYY | ISO year (with four digits or more) | | -| IYY | Last three digits of an ISO year | | -| IY | Last two digits of an ISO year | | -| I | Last one digit of an ISO year | | -| RR | Last two digits of a year (A year of the 20th century can be stored in the 21st century.) | | -| RRRR | Capable of receiving a year with four digits or two digits. If there are 2 digits, the value is the same as the returned value of RR. If there are 4 digits, the value is the same as YYYY. | | -| BC or B.C.AD or A.D. | Era indicator Before Christ (BC) and After Christ (AD) | | -| Month | MONTH | Full spelling of a month in uppercase (9 characters are filled in if the value is empty.) | -| MON | Month in abbreviated format in uppercase (with three characters) | | -| MM | Month (01-12) | | -| RM | Month in Roman numerals (I-XII; I=JAN) and uppercase | | -| Day | DAY | Full spelling of a date in uppercase (9 characters are filled in if the value is empty.) | -| DY | Day in abbreviated format in uppercase (with three characters) | | -| DDD | Day in a year (001-366) | | -| DD | Day in a month (01-31) | | -| D | Day in a week (1-7). | | -| Week | W | Week in a month (1-5) (The first week starts from the first day of the month.) | -| WW | Week in a year (1-53) (The first week starts from the first day of the year.) | | -| IW | Week in an ISO year (The first Thursday is in the first week.) | | -| Century | CC | Century (with two digits) (The 21st century starts from 2001-01-01.) | -| Julian date | J | Julian date (starting from January 1 of 4712 BC) | -| Quarter | Q | Quarter | - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> In the table, the rules for RR to calculate years are as follows: -> -> - If the range of the input two-digit year is between 00 and 49: -> If the last two digits of the current year are between 00 and 49, the first two digits of the returned year are the same as the first two digits of the current year. -> If the last two digits of the current year are between 50 and 99, the first two digits of the returned year equal to the first two digits of the current year plus 1. -> - If the range of the input two-digit year is between 50 and 99: -> If the last two digits of the current year are between 00 and 49, the first two digits of the returned year equal to the first two digits of the current year minus 1. -> - If the last two digits of the current year are between 50 and 99, the first two digits of the returned year are the same as the first two digits of the current year. +--- +title: Date and Time Processing Functions and Operators +summary: Date and Time Processing Functions and Operators +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Date and Time Processing Functions and Operators + +## Date and Time Operators + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-warning.gif) **WARNING:** +> When the user uses date/time operators, explicit type prefixes are modified for corresponding operands to ensure that the operands parsed by the database are consistent with what the user expects, and no unexpected results occur. +> For example, abnormal mistakes will occur in the following example without an explicit data type. +> +> ```sql +> SELECT date '2001-10-01' - '7' AS RESULT; +> ``` + +**Table 1** Time and date operators + +| Operator | Example | +| -------- | ------------------------------------------------------------ | +| + | MogDB=# SELECT date '2001-09-28' + integer '7' AS RESULT;
result
-----------------
2001-10-05
(1 row) | +| | MogDB=# SELECT date '2001-09-28' + interval '1 hour' AS RESULT;
result
-----------------------------
2001-09-28 01:00:00
(1 row) | +| | MogDB=# SELECT date '2001-09-28' + time '03:00' AS RESULT;
result
-------------------------------
2001-09-28 03:00:00
(1 row) | +| | MogDB=# SELECT interval '1 day' + interval '1 hour' AS RESULT;
result
----------------------
1 day 01:00:00
(1 row) | +| | MogDB=# SELECT timestamp '2001-09-28 01:00' + interval '23 hours' AS RESULT;
result
------------------------------
2001-09-29 00:00:00
(1 row) | +| | MogDB=# SELECT time '01:00' + interval '3 hours' AS RESULT;
result
-------------
04:00:00
(1 row) | +| - | MogDB=# SELECT date '2001-10-01' - date '2001-09-28' AS RESULT;
result
----------
3days
(1 row) | +| | MogDB=# SELECT date '2001-10-01' - integer '7' AS RESULT;
result
-------------------------------
2001-09-24 00:00:00
(1 row) | +| | MogDB=# SELECT date '2001-09-28' - interval '1 hour' AS RESULT;
result
--------------------------------
2001-09-27 23:00:00
(1 row) | +| | MogDB=# SELECT time '05:00' - time '03:00' AS RESULT;
result
-------------
02:00:00
(1 row) | +| | MogDB=# SELECT time '05:00' - interval '2 hours' AS RESULT;
result
-------------
03:00:00
(1 row) | +| | MogDB=# SELECT timestamp '2001-09-28 23:00' - interval '23 hours' AS RESULT;
result
-------------------------------
2001-09-28 00:00:00
(1 row) | +| | MogDB=# SELECT interval '1 day' - interval '1 hour' AS RESULT;
result
-------------
23:00:00
(1 row) | +| | MogDB=# SELECT timestamp '2001-09-29 03:00' - timestamp '2001-09-27 12:00' AS RESULT;
result
---------------------
1 day 15:00:00
(1 row) | +| * | MogDB=# SELECT 900 * interval '1 second' AS RESULT;
result
--------------
00:15:00
(1 row) | +| | MogDB=# SELECT 21 * interval '1 day' AS RESULT;
result
-------------
21 days
(1 row) | +| | MogDB=# SELECT double precision '3.5' * interval '1 hour' AS RESULT;
result
--------------
03:30:00
(1 row) | +| / | MogDB=# SELECT interval '1 hour' / double precision '1.5' AS RESULT;
result
-------------
00:40:00
(1 row) | + +## Time/Date Functions + +- age(timestamp, timestamp) + + Description: Subtracts parameters, producing a result in YYYY-MM-DD format. If the result is negative, the returned result is also negative. The input parameters can contain timezone or not. + + Return type: interval + + Example: + + ```sql + MogDB=# SELECT age(timestamp '2001-04-10', timestamp '1957-06-13'); + age + ------------------------- + 43 years 9 mons 27 days + (1 row) + ``` + +- age(timestamp) + + Description: Minuses the current time with the parameter. The input parameter can contain timezone or not. + + Return type: interval + + Example: + + ```sql + MogDB=# SELECT age(timestamp '1957-06-13'); + age + ------------------------- + 60 years 2 mons 18 days + (1 row) + ``` + +- clock_timestamp() + + Description: Specifies the current timestamp of the real-time clock. + + Return type: timestamp with time zone + + Example: + + ```sql + MogDB=# SELECT clock_timestamp(); + clock_timestamp + ------------------------------- + 2017-09-01 16:57:36.636205+08 + (1 row) + ``` + +- current_date + + Description: Specifies the current date. + + Return type: date + + Example: + + ```sql + MogDB=# SELECT current_date; + date + ------------ + 2017-09-01 + (1 row) + ``` + +- current_time + + Description: Specifies the current time. + + Return type: time with time zone + + Example: + + ```sql + MogDB=# SELECT current_time; + timetz + -------------------- + 16:58:07.086215+08 + (1 row) + ``` + +- current_timestamp + + Description: Specifies the current date and time. + + Return type: timestamp with time zone + + Example: + + ```sql + MogDB=# SELECT current_timestamp; + pg_systimestamp + ------------------------------ + 2017-09-01 16:58:19.22173+08 + (1 row) + ``` + +- systimestamp + + Description: Returns the system date of the system where the database is located, including fractional seconds and time zone. + + Return type: timestamp with time zone + + ```sql + MogDB=# select systimestamp; + pg_systimestamp + ----------------------------- + 2021-12-24 14:34:24.6903+08 + (1 row) + ``` + +- date_part(text, timestamp) + + Description: Obtains the value of a subdomain in date or time, for example, the year or hour. It is equivalent to **extract(field from timestamp)**. + + Timestamp types: abstime, date, interval, reltime, time with time zone, time without time zone, timestamp with time zone, timestamp without time zone + + Return type: double precision + + Example: + + ```sql + MogDB=# SELECT date_part('hour', timestamp '2001-02-16 20:38:40'); + date_part + ----------- + 20 + (1 row) + ``` + +- date_part(text, interval) + + Description: Obtains the subdomain value of the date/time value. When obtaining the month value, if the value is greater than 12, obtain the remainder after it is divided by 12. It is equivalent to **extract(field from timestamp)**. + + Return type: double precision + + Example: + + ```sql + MogDB=# SELECT date_part('month', interval '2 years 3 months'); + date_part + ----------- + 3 + (1 row) + ``` + +- date_trunc(text, timestamp) + + Description: Truncates to the precision specified by **text**. + + Return type: interval, timestamp with time zone, timestamp without time zone + + Example: + + ```sql + MogDB=# SELECT date_trunc('hour', timestamp '2001-02-16 20:38:40'); + date_trunc + --------------------- + 2001-02-16 20:00:00 + (1 row) + ``` + +- trunc(timestamp) + + Description: Truncates to day by default. + + Example: + + ```sql + MogDB=# SELECT trunc(timestamp '2001-02-16 20:38:40'); trunc + --------------------- + 2001-02-16 00:00:00 + (1 row) + ``` + +- daterange(arg1, arg2) + + Description: Obtains time boundary information. The type of **arg1** and **arg2** is **date**. + + Return type: daterange + + Example: + + ```sql + MogDB=# select daterange('2000-05-06','2000-08-08'); + daterange + ------------------------- + [2000-05-06,2000-08-08) + (1 row) + ``` + +- daterange(arg1, arg2, text) + + Description: Obtains time boundary information. The type of **arg1** and **arg2** is **date**, and the type of **text** is **text**. + + Return type: daterange + + Example: + + ```sql + MogDB=# select daterange('2000-05-06','2000-08-08','[]'); + daterange + ------------------------- + [2000-05-06,2000-08-09) + (1 row) + ``` + +- extract(field from timestamp) + + Description: Obtains the hour. + + Return type: double precision + + Example: + + ```sql + MogDB=# SELECT extract(hour from timestamp '2001-02-16 20:38:40'); + date_part + ----------- + 20 + (1 row) + ``` + +- extract(field from interval) + + Description: Obtains the month. If the value is greater than 12, obtain the remainder after it is divided by 12. + + Return type: double precision + + Example: + + ```sql + MogDB=# SELECT extract(month from interval '2 years 3 months'); + date_part + ----------- + 3 + (1 row) + ``` + +- isfinite(date) + + Description: Tests for a valid date. + + Return type: Boolean + + Example: + + ```sql + MogDB=# SELECT isfinite(date '2001-02-16'); + isfinite + ---------- + t + (1 row) + ``` + +- isfinite(timestamp) + + Description: Tests for a valid timestamp. + + Return type: Boolean + + Example: + + ```sql + MogDB=# SELECT isfinite(timestamp '2001-02-16 21:28:30'); + isfinite + ---------- + t + (1 row) + ``` + +- isfinite(interval) + + Description: Tests for a valid interval. + + Return type: Boolean + + Example: + + ```sql + MogDB=# SELECT isfinite(interval '4 hours'); + isfinite + ---------- + t + (1 row) + ``` + +- justify_days(interval) + + Description: Adjusts intervals to 30-day time periods, which are represented as months. + + Return type: interval + + Example: + + ```sql + MogDB=# SELECT justify_days(interval '35 days'); + justify_days + -------------- + 1 mon 5 days + (1 row) + ``` + +- justify_hours(interval) + + Description: Sets the time interval in days (24 hours is one day). + + Return type: interval + + Example: + + ```sql + MogDB=# SELECT JUSTIFY_HOURS(INTERVAL '27 HOURS'); + justify_hours + ---------------- + 1 day 03:00:00 + (1 row) + ``` + +- justify_interval(interval) + + Description: Adjusts **interval** using **justify_days** and **justify_hours**. + + Return type: interval + + Example: + + ```sql + MogDB=# SELECT JUSTIFY_INTERVAL(INTERVAL '1 MON -1 HOUR'); + justify_interval + ------------------ + 29 days 23:00:00 + (1 row) + ``` + +- localtime + + Description: Specifies the current time. + + Return type: time + + Example: + + ```sql + MogDB=# SELECT localtime AS RESULT; + result + ---------------- + 16:05:55.664681 + (1 row) + ``` + +- localtimestamp + + Description: Specifies the current date and time. + + Return type: timestamp + + Example: + + ```sql + MogDB=# SELECT localtimestamp; + timestamp + ---------------------------- + 2017-09-01 17:03:30.781902 + (1 row) + ``` + +- now() + + Description: Specifies the current date and time. + + Return type: timestamp with time zone + + Example: + + ```sql + MogDB=# SELECT now(); + now + ------------------------------- + 2017-09-01 17:03:42.549426+08 + (1 row) + ``` + +- timenow + + Description: Specifies the current date and time. + + Return type: timestamp with time zone + + Example: + + ```sql + MogDB=# select timenow(); + timenow + ------------------------ + 2020-06-23 20:36:56+08 + (1 row) + ``` + +- numtodsinterval(num, interval_unit) + + Description: Converts a number to the interval type. **num** is a numeric-typed number. **interval_unit** is a string in the following format: 'DAY' | 'HOUR' | 'MINUTE' | 'SECOND' + + You can set the IntervalStyle parameter to **a** to be compatible with the interval output format of the function. + + Example: + + ```sql + MogDB=# SELECT numtodsinterval(100, 'HOUR'); + numtodsinterval + ----------------- + 100:00:00 + (1 row) + + MogDB=# SET intervalstyle = a; + SET + MogDB=# SELECT numtodsinterval(100, 'HOUR'); + numtodsinterval + ------------------------------- + +000000004 04:00:00.000000000 + (1 row) + ``` + +- pg_sleep(seconds) + + Description: Specifies the delay time of the server thread in unit of second. + + Return type: void + + Example: + + ```sql + MogDB=# SELECT pg_sleep(10); + pg_sleep + ---------- + + (1 row) + ``` + +- statement_timestamp() + + Description: Specifies the current date and time. + + Return type: timestamp with time zone + + Example: + + ```sql + MogDB=# SELECT statement_timestamp(); + statement_timestamp + ------------------------------- + 2017-09-01 17:04:39.119267+08 + (1 row) + ``` + +- sysdate + + Description: Specifies the current date and time. + + Return type: timestamp + + Example: + + ```sql + MogDB=# SELECT sysdate; + sysdate + --------------------- + 2017-09-01 17:04:49 + (1 row) + ``` + +- timeofday() + + Description: Specifies the current date and time (like **clock_timestamp**, but returned as a **text** string) + + Return type: text + + Example: + + ```sql + MogDB=# SELECT timeofday(); + timeofday + ------------------------------------- + Fri Sep 01 17:05:01.167506 2017 CST + (1 row) + ``` + +- transaction_timestamp() + + Description: Specifies the current date and time (equivalent to **current_timestamp**) + + Return type: timestamp with time zone + + Example: + + ```sql + MogDB=# SELECT transaction_timestamp(); + transaction_timestamp + ------------------------------- + 2017-09-01 17:05:13.534454+08 + (1 row) + ``` + +- add_months(d,n) + + Description: Returns the date *date* plus *integer* months. + + Return type: timestamp + + Example: + + ```sql + MogDB=# SELECT add_months(to_date('2017-5-29', 'yyyy-mm-dd'), 11) FROM dual; + add_months + --------------------- + 2018-04-29 00:00:00 + (1 row) + ``` + +- last_day(d) + + Description: Returns the date of the last day of the month that contains *date*. + + Return type: timestamp + + Example: + + ```sql + MogDB=# select last_day(to_date('2017-01-01', 'YYYY-MM-DD')) AS cal_result; + cal_result + --------------------- + 2017-01-31 00:00:00 + (1 row) + ``` + +- next_day(x,y) + + Description: Calculates the time of the next week y started from x. + + Return type: timestamp + + Example: + + ```sql + MogDB=# select next_day(timestamp '2017-05-25 00:00:00','Sunday')AS cal_result; + cal_result + --------------------- + 2017-05-28 00:00:00 + (1 row) + ``` + +- tinterval(abstime, abstime ) + + Description: Creates a time interval with two pieces of absolute time. + + Return type: tinterval + + Example: + + ```sql + MogDB=# call tinterval(abstime 'May 10, 1947 23:59:12', abstime 'Mon May 1 00:30:30 1995'); + tinterval + ----------------------------------------------------- + ["1947-05-10 23:59:12+08" "1995-05-01 00:30:30+08"] + (1 row) + ``` + +- tintervalend(tinterval) + + Description: Returns the end time of **tinterval**. + + Return type: abstime + + Example: + + ```sql + MogDB=# select tintervalend('["Sep 4, 1983 23:59:12" "Oct4, 1983 23:59:12"]'); + tintervalend + ------------------------ + 1983-10-04 23:59:12+08 + (1 row) + ``` + +- tintervalrel(tinterval) + + Description: Calculates and returns the relative time of **tinterval**. + + Return type: reltime + + Example: + + ```sql + MogDB=# select tintervalrel('["Sep 4, 1983 23:59:12" "Oct4, 1983 23:59:12"]'); + tintervalrel + -------------- + 1 mon + (1 row) + ``` + +- smalldatetime_ge + + Description: Checks whether the value of the first parameter is greater than or equal to that of the second parameter. + + Parameter: smalldatetime, smalldatetime + + Return type: Boolean + +- smalldatetime_cmp + + Description: Compares two smalldatetime values to check whether they are the same. + + Parameter: smalldatetime, smalldatetime + + Return type: integer + +- smalldatetime_eq + + Description: Compares two smalldatetime values to check whether they are the same. + + Parameter: smalldatetime, smalldatetime + + Return type: Boolean + +- smalldatetime_gt + + Description: Determines whether the first parameter is greater than the second. + + Parameter: smalldatetime, smalldatetime + + Return type: Boolean + +- smalldatetime_hash + + Description: Calculates the hash value corresponding to a timestamp. + + Parameter: smalldatetime + + Return type: integer + +- smalldatetime_in + + Description: Inputs a timestamp. + + Parameter: cstring, oid, integer + + Return type: smalldatetime + +- smalldatetime_larger + + Description: Returns a larger timestamp. + + Parameter: smalldatetime, smalldatetime + + Return type: smalldatetime + +- smalldatetime_le + + Description: Checks whether the value of the first parameter is less than or equal to that of the second parameter. + + Parameter: smalldatetime, smalldatetime + + Return type: Boolean + +- smalldatetime_lt + + Description: Determines whether the first parameter is less than the second parameter. + + Parameter: smalldatetime, smalldatetime + + Return type: Boolean + +- smalldatetime_ne + + Description: Compares two timestamps to check whether they are different. + + Parameter: smalldatetime, smalldatetime + + Return type: Boolean + +- smalldatetime_out + + Description: Converts a timestamp into the external form. + + Parameter: smalldatetime + + Return type: cstring + +- smalldatetime_send + + Description: Converts a timestamp to the binary format. + + Parameter: smalldatetime + + Return type: bytea + +- smalldatetime_smaller + + Description: Returns a smaller smalldatetime. + + Parameter: smalldatetime, smalldatetime + + Return type: smalldatetime + +- smalldatetime_to_abstime + + Description: Converts smalldatetime to abstime. + + Parameter: smalldatetime + + Return type: abstime + +- smalldatetime_to_time + + Description: Converts smalldatetime to time. + + Parameter: smalldatetime + + Return type: time without time zone + +- smalldatetime_to_timestamp + + Description: Converts smalldatetime to timestamp. + + Parameter: smalldatetime + + Return type: timestamp without time zone + +- smalldatetime_to_timestamptz + + Description: Converts smalldatetime to timestamptz. + + Parameter: smalldatetime + + Return type: timestamp with time zone + +- smalldatetime_to_varchar2 + + Description: Converts smalldatetime to varchar2. + + Parameter: smalldatetime + + Return type: character varying + + >![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**NOTE:** + > + > There are multiple methods for obtaining the current time. Select an appropriate API based on the actual service scenario. + > + > 1. The following APIs return values based on the start time of the current transaction: + > + > ```sql + > CURRENT_DATE CURRENT_TIME CURRENT_TIME(precision) CURRENT_TIMESTAMP(precision) LOCALTIME LOCALTIMESTAMP LOCALTIME(precision) LOCALTIMESTAMP(precision) + > ``` + > + > **CURRENT_TIME** and **CURRENT_TIMESTAMP(precision)** transfer values with time zones. The values of **LOCALTIME** and **LOCALTIMESTAMP** do not contain time zone information. **CURRENT_TIME**, **LOCALTIME**, and **LOCALTIMESTAMP** can be optionally attached with a precision parameter, which rounds the second field of the result to the specified decimal place. If there is no precision parameter, the result is given the full precision that can be obtained. + > Because these functions all return results by the start time of the current transaction, their values do not change throughout the transaction. We think this is a feature with the purpose to allow a transaction to have a consistent concept at the "current" time, so that multiple modifications in the same transaction can maintain the same timestamp. + > + > 2. The following APIs return the start time of the current statement: + > + > ```sql + > transaction_timestamp() statement_timestamp() now() + > ``` + > + > **transaction_timestamp()** is equivalent to **CURRENT_TIMESTAMP(precision)**, and its name clearly reflects its return value. **statement_timestamp()** returns the start time of the current statement (more accurately, the time when the last instruction is received from the client). The return values of **statement_timestamp()** and **transaction_timestamp()** are the same during the execution of the first instruction of a transaction, but may be different in subsequent instructions. + > **now()** is equivalent to **transaction_timestamp()**. + > + > 3. The following APIs return the actual current time when the function is called: + > + > ```sql + > clock_timestamp() timeofday() + > ``` + > + > **clock_timestamp()** returns the actual "current" time, and its value changes even in the same SQL instruction. Similar to **clock_timestamp()**, **timeofday()** also returns the actual current time. However, the result of **timeofday()** is a formatted text string instead of a timestamp with time zone information. + +## TIMESTAMPDIFF + +- **TIMESTAMPDIFF(unit , timestamp_expr1, timestamp_expr2)** + +The timestampdiff function returns the result of **timestamp_expr2** - **timestamp_expr1** in the specified unit. **timestamp_expr1** and **timestamp_expr2** must be value expressions of the **timestamp**, **timestamptz**, or **date** type. **unit** indicates the unit of the difference between two dates. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> This function is valid only when MogDB is compatible with the MY type (that is, dbcompatibility = 'B'). + +- year + + Year. + + ```sql + MogDB=# SELECT TIMESTAMPDIFF(YEAR, '2018-01-01', '2020-01-01'); + timestamp_diff + ---------------- + 2 + (1 row) + ``` + +- quarter + + Quarter. + + ```sql + MogDB=# SELECT TIMESTAMPDIFF(QUARTER, '2018-01-01', '2020-01-01'); + timestamp_diff + ---------------- + 8 + (1 row) + ``` + +- month + + Month. + + ```sql + MogDB=# SELECT TIMESTAMPDIFF(MONTH, '2018-01-01', '2020-01-01'); + timestamp_diff + ---------------- + 24 + (1 row) + ``` + +- week + + Week. + + ```sql + MogDB=# SELECT TIMESTAMPDIFF(WEEK, '2018-01-01', '2020-01-01'); + timestamp_diff + ---------------- + 104 + (1 row) + ``` + +- day + + Day. + + ```sql + MogDB=# SELECT TIMESTAMPDIFF(DAY, '2018-01-01', '2020-01-01'); + timestamp_diff + ---------------- + 730 + (1 row) + ``` + +- hour + + Hour. + + ```sql + MogDB=# SELECT TIMESTAMPDIFF(HOUR, '2020-01-01 10:10:10', '2020-01-01 11:11:11'); + timestamp_diff + ---------------- + 1 + (1 row) + + ``` + +- minute + + Minute. + + ```sql + MogDB=# SELECT TIMESTAMPDIFF(MINUTE, '2020-01-01 10:10:10', '2020-01-01 11:11:11'); + timestamp_diff + ---------------- + 61 + (1 row) + + ``` + +- second + + Second. + + ```sql + MogDB=# SELECT TIMESTAMPDIFF(SECOND, '2020-01-01 10:10:10', '2020-01-01 11:11:11'); + timestamp_diff + ---------------- + 3661 + (1 row) + + ``` + +- microseconds + + The seconds column, including fractional parts, is multiplied by 1,000,000. + + ```sql + MogDB=# SELECT TIMESTAMPDIFF(MICROSECOND, '2020-01-01 10:10:10.000000', '2020-01-01 10:10:10.111111'); + timestamp_diff + ---------------- + 111111 + (1 row) + + ``` + +- timestamp_expr with the time zone + + ```sql + MogDB=# SELECT TIMESTAMPDIFF(HOUR,'2020-05-01 10:10:10-01','2020-05-01 10:10:10-03'); + timestamp_diff + ---------------- + 2 + (1 row) + ``` + +## EXTRACT + +- **EXTRACT(field FROM source)** + + The **extract** function retrieves subcolumns such as year or hour from date/time values. **source** must be a value expression of type **timestamp**, **time**, or **interval**. (Expressions of type **date** are cast to **timestamp** and can therefore be used as well.) **field** is an identifier or string that selects what column to extract from the source value. The **extract** function returns values of type **double precision**. The following are valid **field** names: + +- century + + The first century starts at 0001-01-01 00:00:00 AD. This definition applies to all Gregorian calendar countries. There is no century number 0. You go from **-1** century to **1** century. + + Example: + + ```sql + MogDB=# SELECT EXTRACT(CENTURY FROM TIMESTAMP '2000-12-16 12:21:13'); + date_part + ----------- + 20 + (1 row) + ``` + +- day + + - For **timestamp** values, the day (of the month) column (1-31) + + ```sql + MogDB=# SELECT EXTRACT(DAY FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 16 + (1 row) + ``` + + - For **interval** values, the number of days + + ```sql + MogDB=# SELECT EXTRACT(DAY FROM INTERVAL '40 days 1 minute'); + date_part + ----------- + 40 + (1 row) + ``` + +- decade + + Year column divided by 10 + + ```sql + MogDB=# SELECT EXTRACT(DECADE FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 200 + (1 row) + ``` + +- dow + + Day of the week as Sunday(**0**) to Saturday (**6**) + + ```sql + MogDB=# SELECT EXTRACT(DOW FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 5 + (1 row) + ``` + +- doy + + Day of the year (1-365 or 366) + + ```sql + MogDB=# SELECT EXTRACT(DOY FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 47 + (1 row) + ``` + +- epoch + + - For **timestamp with time zone** values, the number of seconds since 1970-01-01 00:00:00-00 UTC (can be negative). + + For **date** and **timestamp** values, the number of seconds since 1970-01-01 00:00:00-00 local time. + + For **interval** values, the total number of seconds in the interval. + + ```sql + MogDB=# SELECT EXTRACT(EPOCH FROM TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40.12-08'); + date_part + -------------- + 982384720.12 + (1 row) + ``` + + ```sql + MogDB=# SELECT EXTRACT(EPOCH FROM INTERVAL '5 days 3 hours'); + date_part + ----------- + 442800 + (1 row) + ``` + + - Way to convert an epoch value back to a timestamp + + ```sql + MogDB=# SELECT TIMESTAMP WITH TIME ZONE 'epoch' + 982384720.12 * INTERVAL '1 second' AS RESULT; + result + --------------------------- + 2001-02-17 12:38:40.12+08 + (1 row) + ``` + +- hour + + Hour column (0-23) + + ```sql + MogDB=# SELECT EXTRACT(HOUR FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 20 + (1 row) + ``` + +- isodow + + Day of the week (1-7) + + Monday is 1 and Sunday is 7. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > This is identical to **dow** except for Sunday. + + ```sql + MogDB=# SELECT EXTRACT(ISODOW FROM TIMESTAMP '2001-02-18 20:38:40'); + date_part + ----------- + 7 + (1 row) + ``` + +- isoyear + + The ISO 8601 year that the date falls in (not applicable to intervals). + + Each ISO year begins with the Monday of the week containing January 4, so in early January or late December the ISO year may be different from the Gregorian year. See the **week** column for more information. + + ```sql + MogDB=# SELECT EXTRACT(ISOYEAR FROM DATE '2006-01-01'); + date_part + ----------- + 2005 + (1 row) + ``` + + ```sql + MogDB=# SELECT EXTRACT(ISOYEAR FROM DATE '2006-01-02'); + date_part + ----------- + 2006 + (1 row) + ``` + +- microseconds + + The seconds column, including fractional parts, is multiplied by 1,000,000. + + ```sql + MogDB=# SELECT EXTRACT(MICROSECONDS FROM TIME '17:12:28.5'); + date_part + ----------- + 28500000 + (1 row) + ``` + +- millennium + + Years in the 1900s are in the second millennium. The third millennium started from January 1, 2001. + + ```sql + MogDB=# SELECT EXTRACT(MILLENNIUM FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 3 + (1 row) + ``` + +- milliseconds + + Seconds column, including fractional parts, is multiplied by 1000. Note that this includes full seconds. + + ```sql + MogDB=# SELECT EXTRACT(MILLISECONDS FROM TIME '17:12:28.5'); + date_part + ----------- + 28500 + (1 row) + ``` + +- minute + + Minutes column (0-59). + + ```sql + MogDB=# SELECT EXTRACT(MINUTE FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 38 + (1 row) + ``` + +- month + + For **timestamp** values, the specific month in the year (1-12). + + ```sql + MogDB=# SELECT EXTRACT(MONTH FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 2 + (1 row) + ``` + + For **interval** values, the number of months, modulo 12 (0-11). + + ```sql + MogDB=# SELECT EXTRACT(MONTH FROM INTERVAL '2 years 13 months'); + date_part + ----------- + 1 + (1 row) + ``` + +- quarter + + Quarter of the year (1-4) that the date is in. + + ```sql + MogDB=# SELECT EXTRACT(QUARTER FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 1 + (1 row) + ``` + +- second + + Seconds column, including fractional parts (0-59). + + ```sql + MogDB=# SELECT EXTRACT(SECOND FROM TIME '17:12:28.5'); + date_part + ----------- + 28.5 + (1 row) + ``` + +- timezone + + Time zone offset from UTC, measured in seconds. Positive values correspond to time zones east of UTC, negative values to zones west of UTC. + +- timezone_hour + + Hour component of the time zone offset. + +- timezone_minute + + Minute component of the time zone offset. + +- week + + Number of the week of the year that the day is in. By definition (ISO 8601), the first week of a year contains January 4 of that year. (The ISO-8601 week starts on Monday.) In other words, the first Thursday of a year is in week 1 of that year. + + Because of this, it is possible for early January dates to be part of the 52nd or 53rd week of the previous year, and late December dates to be part of the 1st week of the next year. For example, **2005-01-01** is part of the 53rd week of year 2004, **2006-01-01** is part of the 52nd week of year 2005, and **2012-12-31** is part of the 1st week of year 2013. You are advised to use the columns **isoyear** and **week** together to ensure consistency. + + ```sql + MogDB=# SELECT EXTRACT(WEEK FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 7 + (1 row) + ``` + +- year + + Year column. + + ```sql + MogDB=# SELECT EXTRACT(YEAR FROM TIMESTAMP '2001-02-16 20:38:40'); + date_part + ----------- + 2001 + (1 row) + ``` + +## date_part + +The **date_part** function is modeled on the traditional Ingres equivalent to the SQL-standard function **extract**: + +- **date_part('***field***', ***source***)** + +Note that here the **field** parameter needs to be a string value, not a name. The valid field names for **field** are the same as for **extract**. For details, see [EXTRACT](#extract). + +Example: + +```sql +MogDB=# SELECT date_part('day', TIMESTAMP '2001-02-16 20:38:40'); + date_part +----------- + 16 +(1 row) +``` + +```sql +MogDB=# SELECT date_part('hour', INTERVAL '4 hours 3 minutes'); + date_part +----------- + 4 +(1 row) +``` + +Table 2 specifies the schema for formatting date and time values. + +**Table 2** Schema for formatting date and time + +| Category | Format | Description | +| --------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| Hour | HH | Number of hours in one day (01-12) | +| HH12 | Number of hours in one day (01-12) | | +| HH24 | Number of hours in one day (00-23) | | +| Minute | MI | Minute (00-59) | +| Second | SS | Second (00-59) | +| FF | Microsecond (000000-999999) | | +| SSSSS | Second after midnight (0-86399) | | +| Morning and afternoon | AM or A.M. | Morning identifier | +| PM or P.M. | Afternoon identifier | | +| Year | Y,YYY | Year with comma (with four digits or more) | +| SYYYY | Year with four digits BC | | +| YYYY | Year (with four digits or more) | | +| YYY | Last three digits of a year | | +| YY | Last two digits of a year | | +| Y | Last one digit of a year | | +| IYYY | ISO year (with four digits or more) | | +| IYY | Last three digits of an ISO year | | +| IY | Last two digits of an ISO year | | +| I | Last one digit of an ISO year | | +| RR | Last two digits of a year (A year of the 20th century can be stored in the 21st century.) | | +| RRRR | Capable of receiving a year with four digits or two digits. If there are 2 digits, the value is the same as the returned value of RR. If there are 4 digits, the value is the same as YYYY. | | +| BC or B.C.AD or A.D. | Era indicator Before Christ (BC) and After Christ (AD) | | +| Month | MONTH | Full spelling of a month in uppercase (9 characters are filled in if the value is empty.) | +| MON | Month in abbreviated format in uppercase (with three characters) | | +| MM | Month (01-12) | | +| RM | Month in Roman numerals (I-XII; I=JAN) and uppercase | | +| Day | DAY | Full spelling of a date in uppercase (9 characters are filled in if the value is empty.) | +| DY | Day in abbreviated format in uppercase (with three characters) | | +| DDD | Day in a year (001-366) | | +| DD | Day in a month (01-31) | | +| D | Day in a week (1-7). | | +| Week | W | Week in a month (1-5) (The first week starts from the first day of the month.) | +| WW | Week in a year (1-53) (The first week starts from the first day of the year.) | | +| IW | Week in an ISO year (The first Thursday is in the first week.) | | +| Century | CC | Century (with two digits) (The 21st century starts from 2001-01-01.) | +| Julian date | J | Julian date (starting from January 1 of 4712 BC) | +| Quarter | Q | Quarter | + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> In the table, the rules for RR to calculate years are as follows: +> +> - If the range of the input two-digit year is between 00 and 49: +> If the last two digits of the current year are between 00 and 49, the first two digits of the returned year are the same as the first two digits of the current year. +> If the last two digits of the current year are between 50 and 99, the first two digits of the returned year equal to the first two digits of the current year plus 1. +> - If the range of the input two-digit year is between 50 and 99: +> If the last two digits of the current year are between 00 and 49, the first two digits of the returned year equal to the first two digits of the current year minus 1. +> - If the last two digits of the current year are between 50 and 99, the first two digits of the returned year are the same as the first two digits of the current year. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/dynamic-data-masking-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/dynamic-data-masking-functions.md index c148b461..249c432a 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/dynamic-data-masking-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/dynamic-data-masking-functions.md @@ -1,66 +1,66 @@ ---- -title: Dynamic Data Masking Functions -summary: Dynamic Data Masking Functions -author: Guo Huan -date: 2021-10-28 ---- - -# Dynamic Data Masking Functions - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function is an internal function. For details, see "Database Security > Dynamic Data Anonymization". - -- creditcardmasking(col text, letter char default 'x') - - Description: Replaces the digits before the last four bits following the col string with letters. - - Parameter: Character string to be replaced or character string used for replacement - - Return type: text - -- basicmailmasking(col text, letter char default 'x') - - Description: Replaces the characters before the first at sign (@) in the col string with letters. - - Parameter: Character string to be replaced or character string used for replacement - - Return type: text - -- fullmailmasking(col text, letter char default 'x') - - Description: Replaces the characters (except **@**) before the last period (.) in the col string with letters. - - Parameter: Character string to be replaced or character string used for replacement - - Return type: text - -- alldigitsmasking(col text, letter char default '0') - - Description: Replaces the digits in the col string with letters. - - Parameter: Character string to be replaced or character string used for replacement - - Return type: text - -- shufflemasking(col text) - - Description: Sorts the characters in the col string out of order. - - Parameter: Character string to be replaced or character string used for replacement - - Return type: text - -- randommasking(col text) - - Description: Randomizes the characters in the col string. - - Parameter: Character string to be replaced or character string used for replacement - - Return type: text - -- regexpmasking - - Description: Specifies the internal function of the masking policy, which is used to replace characters using a regular expression. - - Parameter: col text, reg text, replace\_text text, pos INTEGER default 0, reg\_len INTEGER default -1 - +--- +title: Dynamic Data Masking Functions +summary: Dynamic Data Masking Functions +author: Guo Huan +date: 2021-10-28 +--- + +# Dynamic Data Masking Functions + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function is an internal function. For details, see "Database Security > Dynamic Data Anonymization". + +- creditcardmasking(col text, letter char default 'x') + + Description: Replaces the digits before the last four bits following the col string with letters. + + Parameter: Character string to be replaced or character string used for replacement + + Return type: text + +- basicmailmasking(col text, letter char default 'x') + + Description: Replaces the characters before the first at sign (@) in the col string with letters. + + Parameter: Character string to be replaced or character string used for replacement + + Return type: text + +- fullmailmasking(col text, letter char default 'x') + + Description: Replaces the characters (except **@**) before the last period (.) in the col string with letters. + + Parameter: Character string to be replaced or character string used for replacement + + Return type: text + +- alldigitsmasking(col text, letter char default '0') + + Description: Replaces the digits in the col string with letters. + + Parameter: Character string to be replaced or character string used for replacement + + Return type: text + +- shufflemasking(col text) + + Description: Sorts the characters in the col string out of order. + + Parameter: Character string to be replaced or character string used for replacement + + Return type: text + +- randommasking(col text) + + Description: Randomizes the characters in the col string. + + Parameter: Character string to be replaced or character string used for replacement + + Return type: text + +- regexpmasking + + Description: Specifies the internal function of the masking policy, which is used to replace characters using a regular expression. + + Parameter: col text, reg text, replace\_text text, pos INTEGER default 0, reg\_len INTEGER default -1 + Return type: text \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/encrypted-equality-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/encrypted-equality-functions.md index cc87109e..29ddb783 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/encrypted-equality-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/encrypted-equality-functions.md @@ -1,188 +1,188 @@ ---- -title: Encrypted Equality Functions -summary: Encrypted Equality Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Encrypted Equality Functions - -- byteawithoutorderwithequalcolin(cstring) - - Description: Converts input data to the internal byteawithoutorderwithequalcol format. - - Parameter type: cstring - - Return type: byteawithoutorderwithequalcol - -- byteawithoutorderwithequalcolout(byteawithoutorderwithequalcol) - - Description: Converts internal data of the byteawithoutorderwithequalcol type to data of the cstring type. - - Parameter type: byteawithoutorderwithequalcol - - Return type: cstring - -- byteawithoutorderwithequalcolsend(byteawithoutorderwithequalcol) - - Description: Converts data of the byteawithoutorderwithequalcol type to data of the bytea type. - - Parameter type: byteawithoutorderwithequalcol - - Return type: bytea - -- byteawithoutorderwithequalcolrecv(internal) - - Description: Converts data of the byteawithoutorderwithequalcol type to data of the byteawithoutorderwithequalcol type. - - Parameter type: internal - - Return type: byteawithoutorderwithequalcol - -- byteawithoutorderwithequalcoltypmodin(cstring) - - Description: Converts data of the byteawithoutorderwithequalcol type to data of the byteawithoutorderwithequalcol type. - - Parameter type: cstring - - Return type: int4 - -- byteawithoutorderwithequalcoltypmodout(int4) - - Description: Converts data of the int4 type into data of the cstring type. - - Parameter type: int4 - - Return type: cstring - -- byteawithoutordercolin(cstring) - - Description: Converts input data to the internal byteawithoutordercolin format. - - Parameter type: cstring - - Return type: byteawithoutordercol - -- byteawithoutordercolout(byteawithoutordercol) - - Description: Converts internal data of the byteawithoutordercol type to data of the cstring type. - - Parameter type: byteawithoutordercol - - Return type: cstring - -- byteawithoutordercolsend(byteawithoutordercol) - - Description: Converts data of the byteawithoutordercol type to data of the bytea type. - - Parameter type: byteawithoutordercol - - Return type: bytea - -- byteawithoutordercolrecv(internal) - - Description: Converts data of the byteawithoutordercol type to data of the byteawithoutordercol type. - - Parameter type: internal - - Return type: byteawithoutordercol - -- byteawithoutorderwithequalcolcmp(byteawithoutorderwithequalcol, byteawithoutorderwithequalcol) - - Description: Compares two byteawithoutorderwithequalcol data sizes. If the first data size is smaller than the second one, **-1** is returned. If the first data size is equal to the second one, **0** is returned. If the first data size is larger than the second one, **1** is returned. - - Parameter type: byteawithoutorderwithequalcol, byteawithoutorderwithequalcol - - Return type: int4 - -- byteawithoutorderwithequalcolcmpbytear(byteawithoutorderwithequalcol, bytea) - - Description: Compares the byteawithoutorderwithequalcol and bytea data sizes. If the first data size is smaller than the second one, **-1** is returned. If the first data size is equal to the second one, **0** is returned. If the first data size is larger than the second one, **1** is returned. - - Parameter type: byteawithoutorderwithequalcol, bytea - - Return type: int4 - -- byteawithoutorderwithequalcolcmpbyteal(bytea, byteawithoutorderwithequalcol) - - Description: Compares the bytea and byteawithoutorderwithequalcol data sizes. If the first data size is smaller than the second one, **-1** is returned. If the first data size is equal to the second one, **0** is returned. If the first data size is larger than the second one, **1** is returned. - - Parameter type: byteawithoutorderwithequalcol, bytea - - Return type: int4 - -- byteawithoutorderwithequalcoleq(byteawithoutorderwithequalcol, byteawithoutorderwithequalcol) - - Description: Compares two byteawithoutorderwithequalcol data records. If they are the same, **true** is returned. Otherwise, **false** is returned. - - Parameter type: byteawithoutorderwithequalcol, bytea - - Return type: Boolean - -- byteawithoutorderwithequalcoleqbyteal(bytea, byteawithoutorderwithequalcol) - - Description: Compares the bytea and byteawithoutorderwithequalcol data records. If they are the same, **true** is returned. Otherwise, **false** is returned. - - Parameter type: bytea, byteawithoutorderwithequalcol - - Return type: Boolean - -- byteawithoutorderwithequalcoleqbytear(byteawithoutorderwithequalcol, bytea) - - Description: Compares the byteawithoutorderwithequalcol and bytea data records. If they are the same, **true** is returned. Otherwise, **false** is returned. - - Parameter type: byteawithoutorderwithequalcol, bytea - - Return type: Boolean - -- byteawithoutorderwithequalcolne(byteawithoutorderwithequalcol, byteawithoutorderwithequalcol) - - Description: Compares two byteawithoutorderwithequalcol data records. If they are different, **true** is returned. Otherwise, **false** is returned. - - Parameter type: byteawithoutorderwithequalcol, byteawithoutorderwithequalcol - - Return type: Boolean - -- byteawithoutorderwithequalcolnebyteal(bytea, byteawithoutorderwithequalcol) - - Description: Compares the bytea and byteawithoutorderwithequalcol data records. If they are the same, **true** is returned. Otherwise, **false** is returned. - - Parameter type: bytea, byteawithoutorderwithequalcol - - Return type: Boolean - -- byteawithoutorderwithequalcolnebytear(byteawithoutorderwithequalcol, bytea) - - Description: Compares the byteawithoutorderwithequalcol and bytea data records. If they are the same, **true** is returned. Otherwise, **false** is returned. - - Parameter type: byteawithoutorderwithequalcol, bytea - - Return type: Boolean - -- hll_hash_byteawithoutorderwithequalcol(byteawithoutorderwithequalcol) - - Description: Returns the hll hash value of byteawithoutorderwithequalcol. - - Parameter type: byteawithoutorderwithequalcol - - Return type: hll_hashval - - The implementation of byteawithoutorderwithequalcolin searches for CEK and determines whether it is a normal encrypted data type. Therefore, an error is reported when the user directly uses the function. - - ```sql - MogDB=# SELECT * FROM byteawithoutorderwithequalcolsend('\x907219912381298461289346129'::byteawithoutorderwithequalcol); - ERROR: cek with OID 596711794 not found - LINE 1: SELECT * FROM byteawithoutorderwithequalcolsend('\x907219912... - ^ - MogDB=# SELECT * FROM byteawithoutordercolout('\x90721901999999999999912381298461289346129'); - ERROR: cek with OID 2566986098 not found - LINE 1: SELECT * FROM byteawithoutordercolout('\x9072190199999999999... - - SELECT * FROM byteawithoutorderwithequalcolrecv('\x90721901999999999999912381298461289346129'::byteawithoutorderwithequalcol); - ERROR: cek with OID 2566986098 not found - ^ - MogDB=# SELECT * FROM byteawithoutorderwithequalcolsend('\x90721901999999999999912381298461289346129'::byteawithoutorderwithequalcol); - ERROR: cek with OID 2566986098 not found - LINE 1: SELECT * FROM byteawithoutorderwithequalcolsend('\x907219019... - ^ - ``` +--- +title: Encrypted Equality Functions +summary: Encrypted Equality Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Encrypted Equality Functions + +- byteawithoutorderwithequalcolin(cstring) + + Description: Converts input data to the internal byteawithoutorderwithequalcol format. + + Parameter type: cstring + + Return type: byteawithoutorderwithequalcol + +- byteawithoutorderwithequalcolout(byteawithoutorderwithequalcol) + + Description: Converts internal data of the byteawithoutorderwithequalcol type to data of the cstring type. + + Parameter type: byteawithoutorderwithequalcol + + Return type: cstring + +- byteawithoutorderwithequalcolsend(byteawithoutorderwithequalcol) + + Description: Converts data of the byteawithoutorderwithequalcol type to data of the bytea type. + + Parameter type: byteawithoutorderwithequalcol + + Return type: bytea + +- byteawithoutorderwithequalcolrecv(internal) + + Description: Converts data of the byteawithoutorderwithequalcol type to data of the byteawithoutorderwithequalcol type. + + Parameter type: internal + + Return type: byteawithoutorderwithequalcol + +- byteawithoutorderwithequalcoltypmodin(cstring) + + Description: Converts data of the byteawithoutorderwithequalcol type to data of the byteawithoutorderwithequalcol type. + + Parameter type: cstring + + Return type: int4 + +- byteawithoutorderwithequalcoltypmodout(int4) + + Description: Converts data of the int4 type into data of the cstring type. + + Parameter type: int4 + + Return type: cstring + +- byteawithoutordercolin(cstring) + + Description: Converts input data to the internal byteawithoutordercolin format. + + Parameter type: cstring + + Return type: byteawithoutordercol + +- byteawithoutordercolout(byteawithoutordercol) + + Description: Converts internal data of the byteawithoutordercol type to data of the cstring type. + + Parameter type: byteawithoutordercol + + Return type: cstring + +- byteawithoutordercolsend(byteawithoutordercol) + + Description: Converts data of the byteawithoutordercol type to data of the bytea type. + + Parameter type: byteawithoutordercol + + Return type: bytea + +- byteawithoutordercolrecv(internal) + + Description: Converts data of the byteawithoutordercol type to data of the byteawithoutordercol type. + + Parameter type: internal + + Return type: byteawithoutordercol + +- byteawithoutorderwithequalcolcmp(byteawithoutorderwithequalcol, byteawithoutorderwithequalcol) + + Description: Compares two byteawithoutorderwithequalcol data sizes. If the first data size is smaller than the second one, **-1** is returned. If the first data size is equal to the second one, **0** is returned. If the first data size is larger than the second one, **1** is returned. + + Parameter type: byteawithoutorderwithequalcol, byteawithoutorderwithequalcol + + Return type: int4 + +- byteawithoutorderwithequalcolcmpbytear(byteawithoutorderwithequalcol, bytea) + + Description: Compares the byteawithoutorderwithequalcol and bytea data sizes. If the first data size is smaller than the second one, **-1** is returned. If the first data size is equal to the second one, **0** is returned. If the first data size is larger than the second one, **1** is returned. + + Parameter type: byteawithoutorderwithequalcol, bytea + + Return type: int4 + +- byteawithoutorderwithequalcolcmpbyteal(bytea, byteawithoutorderwithequalcol) + + Description: Compares the bytea and byteawithoutorderwithequalcol data sizes. If the first data size is smaller than the second one, **-1** is returned. If the first data size is equal to the second one, **0** is returned. If the first data size is larger than the second one, **1** is returned. + + Parameter type: byteawithoutorderwithequalcol, bytea + + Return type: int4 + +- byteawithoutorderwithequalcoleq(byteawithoutorderwithequalcol, byteawithoutorderwithequalcol) + + Description: Compares two byteawithoutorderwithequalcol data records. If they are the same, **true** is returned. Otherwise, **false** is returned. + + Parameter type: byteawithoutorderwithequalcol, bytea + + Return type: Boolean + +- byteawithoutorderwithequalcoleqbyteal(bytea, byteawithoutorderwithequalcol) + + Description: Compares the bytea and byteawithoutorderwithequalcol data records. If they are the same, **true** is returned. Otherwise, **false** is returned. + + Parameter type: bytea, byteawithoutorderwithequalcol + + Return type: Boolean + +- byteawithoutorderwithequalcoleqbytear(byteawithoutorderwithequalcol, bytea) + + Description: Compares the byteawithoutorderwithequalcol and bytea data records. If they are the same, **true** is returned. Otherwise, **false** is returned. + + Parameter type: byteawithoutorderwithequalcol, bytea + + Return type: Boolean + +- byteawithoutorderwithequalcolne(byteawithoutorderwithequalcol, byteawithoutorderwithequalcol) + + Description: Compares two byteawithoutorderwithequalcol data records. If they are different, **true** is returned. Otherwise, **false** is returned. + + Parameter type: byteawithoutorderwithequalcol, byteawithoutorderwithequalcol + + Return type: Boolean + +- byteawithoutorderwithequalcolnebyteal(bytea, byteawithoutorderwithequalcol) + + Description: Compares the bytea and byteawithoutorderwithequalcol data records. If they are the same, **true** is returned. Otherwise, **false** is returned. + + Parameter type: bytea, byteawithoutorderwithequalcol + + Return type: Boolean + +- byteawithoutorderwithequalcolnebytear(byteawithoutorderwithequalcol, bytea) + + Description: Compares the byteawithoutorderwithequalcol and bytea data records. If they are the same, **true** is returned. Otherwise, **false** is returned. + + Parameter type: byteawithoutorderwithequalcol, bytea + + Return type: Boolean + +- hll_hash_byteawithoutorderwithequalcol(byteawithoutorderwithequalcol) + + Description: Returns the hll hash value of byteawithoutorderwithequalcol. + + Parameter type: byteawithoutorderwithequalcol + + Return type: hll_hashval + + The implementation of byteawithoutorderwithequalcolin searches for CEK and determines whether it is a normal encrypted data type. Therefore, an error is reported when the user directly uses the function. + + ```sql + MogDB=# SELECT * FROM byteawithoutorderwithequalcolsend('\x907219912381298461289346129'::byteawithoutorderwithequalcol); + ERROR: cek with OID 596711794 not found + LINE 1: SELECT * FROM byteawithoutorderwithequalcolsend('\x907219912... + ^ + MogDB=# SELECT * FROM byteawithoutordercolout('\x90721901999999999999912381298461289346129'); + ERROR: cek with OID 2566986098 not found + LINE 1: SELECT * FROM byteawithoutordercolout('\x9072190199999999999... + + SELECT * FROM byteawithoutorderwithequalcolrecv('\x90721901999999999999912381298461289346129'::byteawithoutorderwithequalcol); + ERROR: cek with OID 2566986098 not found + ^ + MogDB=# SELECT * FROM byteawithoutorderwithequalcolsend('\x90721901999999999999912381298461289346129'::byteawithoutorderwithequalcol); + ERROR: cek with OID 2566986098 not found + LINE 1: SELECT * FROM byteawithoutorderwithequalcolsend('\x907219019... + ^ + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/event-trigger-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/event-trigger-functions.md index d1d3e83d..6fe86aab 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/event-trigger-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/event-trigger-functions.md @@ -1,116 +1,116 @@ ---- -title: Event Trigger Functions -summary: Event Trigger Functions -author: zhang cuiping -date: 2023-04-07 ---- - -# Event Trigger Functions - -- pg_event_trigger_ddl_commands - - Description: Reports running DDL commands in the ddl_command_end event trigger. - - Parameter: null - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function is used only in event triggers. - - Return type: oid,oid,int4,text,text,text,text,bool,pg_ddl_command. - - Example: - - ```sql - MogDB=# CREATE OR REPLACE FUNCTION ddl_command_test() - RETURNS event_trigger - AS $$ - DECLARE - obj record; - BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() - LOOP - RAISE NOTICE 'command: %', - obj.command_tag; - - RAISE NOTICE 'triggered'; - END LOOP; - END; $$ LANGUAGE plpgsql; - ``` - -- pg_event_trigger_dropped_objects - - Description: Makes the list of deleted objects visible to users in the sql_drop event trigger. - - Parameter: null - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function is used only in event triggers. - - Return types: oid,oid,int4,bool,bool,booloid,text,text,text,text,TEXTARRAY,TEXTARRAY - - Example: - - ```sql - MogDB=# CREATE OR REPLACE FUNCTION test_evtrig_dropped_objects() RETURNS event_trigger - LANGUAGE plpgsql AS $$ - DECLARE - obj record; - BEGIN - FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() - LOOP - IF obj.object_type = 'table' THEN - EXECUTE format('DROP TABLE IF EXISTS audit_tbls.%I', - format('%s_%s', obj.schema_name, obj.object_name)); - END IF; - - INSERT INTO dropped_objects - (type, schema, object) VALUES - (obj.object_type, obj.schema_name, obj.object_identity); - END LOOP; - END - $$; - ``` - -- pg_event_trigger_table_rewrite_oid - - Description: Makes the overwritten object OIDs visible to users in the table_rewrite event trigger. - - Parameter: null - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function is used only in event triggers. - - Return type: oid - - Example: - - ```ruby - MogDB=# CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger - LANGUAGE plpgsql AS $$ - BEGIN - RAISE NOTICE 'Table ''%'' is being rewritten (reason = %)', - pg_event_trigger_table_rewrite_oid()::regclass, - pg_event_trigger_table_rewrite_reason(); - END; - $$; - ``` - -- pg_event_trigger_table_rewrite_reason - - Description: Makes reasons for rewriting objects visible to users in the table_rewrite event trigger. - - Parameter: null - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function is used only in event triggers. - - Return type: int4 - - Example: - - ```ruby - MogDB=# CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger - LANGUAGE plpgsql AS $$ - BEGIN - RAISE NOTICE 'Table ''%'' is being rewritten (reason = %)', - pg_event_trigger_table_rewrite_oid()::regclass, - pg_event_trigger_table_rewrite_reason(); - END; - $$; +--- +title: Event Trigger Functions +summary: Event Trigger Functions +author: zhang cuiping +date: 2023-04-07 +--- + +# Event Trigger Functions + +- pg_event_trigger_ddl_commands + + Description: Reports running DDL commands in the ddl_command_end event trigger. + + Parameter: null + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function is used only in event triggers. + + Return type: oid,oid,int4,text,text,text,text,bool,pg_ddl_command. + + Example: + + ```sql + MogDB=# CREATE OR REPLACE FUNCTION ddl_command_test() + RETURNS event_trigger + AS $$ + DECLARE + obj record; + BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + RAISE NOTICE 'command: %', + obj.command_tag; + + RAISE NOTICE 'triggered'; + END LOOP; + END; $$ LANGUAGE plpgsql; + ``` + +- pg_event_trigger_dropped_objects + + Description: Makes the list of deleted objects visible to users in the sql_drop event trigger. + + Parameter: null + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function is used only in event triggers. + + Return types: oid,oid,int4,bool,bool,booloid,text,text,text,text,TEXTARRAY,TEXTARRAY + + Example: + + ```sql + MogDB=# CREATE OR REPLACE FUNCTION test_evtrig_dropped_objects() RETURNS event_trigger + LANGUAGE plpgsql AS $$ + DECLARE + obj record; + BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type = 'table' THEN + EXECUTE format('DROP TABLE IF EXISTS audit_tbls.%I', + format('%s_%s', obj.schema_name, obj.object_name)); + END IF; + + INSERT INTO dropped_objects + (type, schema, object) VALUES + (obj.object_type, obj.schema_name, obj.object_identity); + END LOOP; + END + $$; + ``` + +- pg_event_trigger_table_rewrite_oid + + Description: Makes the overwritten object OIDs visible to users in the table_rewrite event trigger. + + Parameter: null + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function is used only in event triggers. + + Return type: oid + + Example: + + ```ruby + MogDB=# CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger + LANGUAGE plpgsql AS $$ + BEGIN + RAISE NOTICE 'Table ''%'' is being rewritten (reason = %)', + pg_event_trigger_table_rewrite_oid()::regclass, + pg_event_trigger_table_rewrite_reason(); + END; + $$; + ``` + +- pg_event_trigger_table_rewrite_reason + + Description: Makes reasons for rewriting objects visible to users in the table_rewrite event trigger. + + Parameter: null + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function is used only in event triggers. + + Return type: int4 + + Example: + + ```ruby + MogDB=# CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger + LANGUAGE plpgsql AS $$ + BEGIN + RAISE NOTICE 'Table ''%'' is being rewritten (reason = %)', + pg_event_trigger_table_rewrite_oid()::regclass, + pg_event_trigger_table_rewrite_reason(); + END; + $$; ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/fault-injection-system-function.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/fault-injection-system-function.md index 66485a7a..456401bc 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/fault-injection-system-function.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/fault-injection-system-function.md @@ -1,20 +1,20 @@ ---- -title: Fault Injection System Function -summary: Fault Injection System Function -author: Guo Huan -date: 2021-10-28 ---- - -# Fault Injection System Function - -- gs_fault_inject(int64, text, text, text, text, text) - - Description: This function cannot be called. WARNING information "unsupported fault injection" is reported when this function is called, which does not affect or change the database. - - Parameter: fault injection of the int64 type (**0**: CLOG extended page; **1**: CLOG page reading; **2**: forcible deadlock) - - - If the first input parameter of text is set to **2** and the second input parameter of text is set to **1**, the second input parameter deadlock occurs. Other input parameters are not deadlocked. When the first input parameter is **0** or **1**, the second input parameter indicates the number of the start page from which the CLOG starts to be extended or read. - - The third input parameter of text indicates the number of extended or read pages when the first input parameter is **0** or **1**. - - The fourth to sixth input parameters of text are reserved. - - Return type: int64 +--- +title: Fault Injection System Function +summary: Fault Injection System Function +author: Guo Huan +date: 2021-10-28 +--- + +# Fault Injection System Function + +- gs_fault_inject(int64, text, text, text, text, text) + + Description: This function cannot be called. WARNING information "unsupported fault injection" is reported when this function is called, which does not affect or change the database. + + Parameter: fault injection of the int64 type (**0**: CLOG extended page; **1**: CLOG page reading; **2**: forcible deadlock) + + - If the first input parameter of text is set to **2** and the second input parameter of text is set to **1**, the second input parameter deadlock occurs. Other input parameters are not deadlocked. When the first input parameter is **0** or **1**, the second input parameter indicates the number of the start page from which the CLOG starts to be extended or read. + - The third input parameter of text indicates the number of extended or read pages when the first input parameter is **0** or **1**. + - The fourth to sixth input parameters of text are reserved. + + Return type: int64 diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/functions-and-operators.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/functions-and-operators.md index 1c0aac20..8c094bef 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/functions-and-operators.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/functions-and-operators.md @@ -1,50 +1,50 @@ ---- -title: System Functions -summary: System Functions -author: zhang cuiping -date: 2023-04-23 ---- - -# System Functions - -- **[Logical Operators](logical-operators.md)** -- **[Comparison Operators](comparison-operators.md)** -- **[Character Processing Functions and Operators](character-processing-functions-and-operators.md)** -- **[Binary String Functions and Operators](binary-string-functions-and-operators.md)** -- **[Bit String Functions and Operators](bit-string-functions-and-operators.md)** -- **[Mode Matching Operators](mode-matching-operators.md)** -- **[Mathematical Functions and Operators](mathematical-functions-and-operators.md)** -- **[Date and Time Processing Functions and Operators](date-and-time-processing-functions-and-operators.md)** -- **[Type Conversion Functions](./type-conversion-functions/type-conversion-functions.md)** -- **[Geometric Functions and Operators](geometric-functions-and-operators.md)** -- **[Network Address Functions and Operators](network-address-functions-and-operators.md)** -- **[Text Search Functions and Operators](text-search-functions-and-operators.md)** -- **[JSON/JSONB Functions and Operators](json-functions.md)** -- **[HLL Functions and Operators](hll-functions-and-operators.md)** -- **[SEQUENCE Functions](sequence-functions.md)** -- **[Array Functions and Operators](array-functions-and-operators.md)** -- **[Range Functions and Operators](range-functions-and-operators.md)** -- **[Aggregate Functions](aggregate-functions.md)** -- **[Window Functions (Analysis Functions)](window-functions.md)** -- **[Security Functions](security-functions.md)** -- **[Ledger Database Functions](ledger-database-functions.md)** -- **[Encrypted Equality Functions](encrypted-equality-functions.md)** -- **[Set Returning Functions](set-returning-functions.md)** -- **[Conditional Expression Functions](conditional-expressions-functions.md)** -- **[System Information Functions](./system-information-functions/system-information-functions.md)** -- **[System Administration Functions](./system-management-functions/system-management-functions.md)** -- **[Statistics Information Functions](./statistics-information-functions/statistics-information-functions.md)** -- **[Statistics Information Functions](trigger-functions.md)** -- **[Event Trigger Functions](event-trigger-functions.md)** -- **[Hash Function](hash-function.md)** -- **[Prompt Message Function](prompt-message-function.md)** -- **[Global Temporary Table Functions](global-temporary-table-functions.md)** -- **[Fault Injection System Function](fault-injection-system-function.md)** -- **[AI Feature Functions](ai-feature-functions.md)** -- **[Dynamic Data Masking Functions](dynamic-data-masking-functions.md)** -- **[Other System Functions](./other-system-functions/other-system-functions.md)** -- **[Internal Functions](./internal-functions/internal-functions.md)** -- **[Global SysCache Feature Functions](global-syscache-feature-functions.md)** -- **[Data Damage Detection and Repair Functions](data-damage-detection-and-repair-functions.md)** -- **[XML Types](xml-functions.md)** +--- +title: System Functions +summary: System Functions +author: zhang cuiping +date: 2023-04-23 +--- + +# System Functions + +- **[Logical Operators](logical-operators.md)** +- **[Comparison Operators](comparison-operators.md)** +- **[Character Processing Functions and Operators](character-processing-functions-and-operators.md)** +- **[Binary String Functions and Operators](binary-string-functions-and-operators.md)** +- **[Bit String Functions and Operators](bit-string-functions-and-operators.md)** +- **[Mode Matching Operators](mode-matching-operators.md)** +- **[Mathematical Functions and Operators](mathematical-functions-and-operators.md)** +- **[Date and Time Processing Functions and Operators](date-and-time-processing-functions-and-operators.md)** +- **[Type Conversion Functions](./type-conversion-functions/type-conversion-functions.md)** +- **[Geometric Functions and Operators](geometric-functions-and-operators.md)** +- **[Network Address Functions and Operators](network-address-functions-and-operators.md)** +- **[Text Search Functions and Operators](text-search-functions-and-operators.md)** +- **[JSON/JSONB Functions and Operators](json-functions.md)** +- **[HLL Functions and Operators](hll-functions-and-operators.md)** +- **[SEQUENCE Functions](sequence-functions.md)** +- **[Array Functions and Operators](array-functions-and-operators.md)** +- **[Range Functions and Operators](range-functions-and-operators.md)** +- **[Aggregate Functions](aggregate-functions.md)** +- **[Window Functions (Analysis Functions)](window-functions.md)** +- **[Security Functions](security-functions.md)** +- **[Ledger Database Functions](ledger-database-functions.md)** +- **[Encrypted Equality Functions](encrypted-equality-functions.md)** +- **[Set Returning Functions](set-returning-functions.md)** +- **[Conditional Expression Functions](conditional-expressions-functions.md)** +- **[System Information Functions](./system-information-functions/system-information-functions.md)** +- **[System Administration Functions](./system-management-functions/system-management-functions.md)** +- **[Statistics Information Functions](./statistics-information-functions/statistics-information-functions.md)** +- **[Statistics Information Functions](trigger-functions.md)** +- **[Event Trigger Functions](event-trigger-functions.md)** +- **[Hash Function](hash-function.md)** +- **[Prompt Message Function](prompt-message-function.md)** +- **[Global Temporary Table Functions](global-temporary-table-functions.md)** +- **[Fault Injection System Function](fault-injection-system-function.md)** +- **[AI Feature Functions](ai-feature-functions.md)** +- **[Dynamic Data Masking Functions](dynamic-data-masking-functions.md)** +- **[Other System Functions](./other-system-functions/other-system-functions.md)** +- **[Internal Functions](./internal-functions/internal-functions.md)** +- **[Global SysCache Feature Functions](global-syscache-feature-functions.md)** +- **[Data Damage Detection and Repair Functions](data-damage-detection-and-repair-functions.md)** +- **[XML Types](xml-functions.md)** - **[Obsolete Functions](obsolete-functions.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/geometric-functions-and-operators.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/geometric-functions-and-operators.md index ff10731e..a868d13b 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/geometric-functions-and-operators.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/geometric-functions-and-operators.md @@ -1,948 +1,948 @@ ---- -title: Geometric Functions and Operators -summary: Geometric Functions and Operators -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Geometric Functions and Operators - -## Geometric Operators - -- \+ - - Description: Translation - - Example: - - ```sql - MogDB=# SELECT box '((0,0),(1,1))' + point '(2.0,0)' AS RESULT; - result - ------------- - (3,1),(2,0) - (1 row) - ``` - -- \- - - Description: Translation - - Example: - - ```sql - MogDB=# SELECT box '((0,0),(1,1))' - point '(2.0,0)' AS RESULT; - result - --------------- - (-1,1),(-2,0) - (1 row) - ``` - -- \* - - Description: Scaling out/Rotation - - Example: - - ```sql - MogDB=# SELECT box '((0,0),(1,1))' * point '(2.0,0)' AS RESULT; - result - ------------- - (2,2),(0,0) - (1 row) - ``` - -- / - - Description: Scaling in/Rotation - - Example: - - ```sql - MogDB=# SELECT box '((0,0),(2,2))' / point '(2.0,0)' AS RESULT; - result - ------------- - (1,1),(0,0) - (1 row) - ``` - -- \# - - Description: Intersection of two figures - - Example: - - ```sql - MogDB=# SELECT box '((1,-1),(-1,1))' # box '((1,1),(-2,-2))' AS RESULT; - result - --------------- - (1,1),(-1,-1) - (1 row) - ``` - -- \# - - Description: Number of paths or polygon vertexes of a figure - - Example: - - ```sql - MogDB=# SELECT # path'((1,0),(0,1),(-1,0))' AS RESULT; - result - -------- - 3 - (1 row) - ``` - -- @-@ - - Description: Length or circumference of a figure - - Example: - - ```sql - MogDB=# SELECT @-@ path '((0,0),(1,0))' AS RESULT; - result - -------- - 2 - (1 row) - ``` - -- @@ - - Description: Center of a figure - - Example: - - ```sql - MogDB=# SELECT @@ circle '((0,0),10)' AS RESULT; - result - -------- - (0,0) - (1 row) - ``` - -- <-> - - Description: Distance between two figures - - Example: - - ```sql - MogDB=# SELECT circle '((0,0),1)' <-> circle '((5,0),1)' AS RESULT; - result - -------- - 3 - (1 row) - ``` - -- && - - Description: Overlaps? (One point in common makes this true.) - - Example: - - ```sql - MogDB=# SELECT box '((0,0),(1,1))' && box '((0,0),(2,2))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- << - - Description: Is strictly left of (no common horizontal coordinate)? - - Example: - - ```sql - MogDB=# SELECT circle '((0,0),1)' << circle '((5,0),1)' AS RESULT; - result - -------- - t - (1 row) - ``` - -- >> - - Description: Is strictly right of (no common horizontal coordinate)? - - Example: - - ```sql - MogDB=# SELECT circle '((5,0),1)' >> circle '((0,0),1)' AS RESULT; - result - -------- - t - (1 row) - ``` - -- &< - - Description: Does not extend to the right of? - - Example: - - ```sql - MogDB=# SELECT box '((0,0),(1,1))' &< box '((0,0),(2,2))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- &> - - Description: Does not extend to the left of? - - Example: - - ```sql - MogDB=# SELECT box '((0,0),(3,3))' &> box '((0,0),(2,2))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- <<| - - Description: Is strictly below (no common horizontal coordinate)? - - Example: - - ```sql - MogDB=# SELECT box '((0,0),(3,3))' <<| box '((3,4),(5,5))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- |>> - - Description: Is strictly above (no common horizontal coordinate)? - - Example: - - ```sql - MogDB=# SELECT box '((3,4),(5,5))' |>> box '((0,0),(3,3))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- &<| - - Description: Does not extend above? - - Example: - - ```sql - MogDB=# SELECT box '((0,0),(1,1))' &<| box '((0,0),(2,2))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- |&> - - Description: Does not extend below? - - Example: - - ```sql - MogDB=# SELECT box '((0,0),(3,3))' |&> box '((0,0),(2,2))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- <^ - - Description: Is below (allows touching)? - - Example: - - ```sql - MogDB=# SELECT box '((0,0),(-3,-3))' <^ box '((0,0),(2,2))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- >^ - - Description: Is above (allows touching)? - - Example: - - ```sql - MogDB=# SELECT box '((0,0),(2,2))' >^ box '((0,0),(-3,-3))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- ?# - - Description: Intersect? - - Example: - - ```sql - MogDB=# SELECT lseg '((-1,0),(1,0))' ?# box '((-2,-2),(2,2))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- ?- - - Description: Is horizontal? - - Example: - - ```sql - MogDB=# SELECT ?- lseg '((-1,0),(1,0))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- ?- - - Description: Are horizontally aligned? - - Example: - - ```sql - MogDB=# SELECT point '(1,0)' ?- point '(0,0)' AS RESULT; - result - -------- - t - (1 row) - ``` - -- ?| - - Description: Is vertical? - - Example: - - ```sql - MogDB=# SELECT ?| lseg '((-1,0),(1,0))' AS RESULT; - result - -------- - f - (1 row) - ``` - -- ?| - - Description: Are vertically aligned? - - Example: - - ```sql - MogDB=# SELECT point '(0,1)' ?| point '(0,0)' AS RESULT; - result - -------- - t - (1 row) - ``` - -- ?-| - - Description: Are perpendicular? - - Example: - - ```sql - MogDB=# SELECT lseg '((0,0),(0,1))' ?-| lseg '((0,0),(1,0))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- ?|| - - Description: Are parallel? - - Example: - - ```sql - MogDB=# SELECT lseg '((-1,0),(1,0))' ?|| lseg '((-1,2),(1,2))' AS RESULT; - result - -------- - t - (1 row) - ``` - -- @> - - Description: Contains? - - Example: - - ```sql - MogDB=# SELECT circle '((0,0),2)' @> point '(1,1)' AS RESULT; - result - -------- - t - (1 row) - ``` - -- <@ - - Description: Contained in or on? - - Example: - - ```sql - MogDB=# SELECT point '(1,1)' <@ circle '((0,0),2)' AS RESULT; - result - -------- - t - (1 row) - ``` - -- ~= - - Description: Same as? - - Example: - - ```sql - MogDB=# SELECT polygon '((0,0),(1,1))' ~= polygon '((1,1),(0,0))' AS RESULT; - result - -------- - t - (1 row) - ``` - -## Geometric Functions - -- area(object) - - Description: Area calculation - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT area(box '((0,0),(1,1))') AS RESULT; - result - -------- - 1 - (1 row) - ``` - -- center(object) - - Description: Figure center calculation - - Return type: point - - Example: - - ```sql - MogDB=# SELECT center(box '((0,0),(1,2))') AS RESULT; - result - --------- - (0.5,1) - (1 row) - ``` - -- diameter(circle) - - Description: Circle diameter calculation - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT diameter(circle '((0,0),2.0)') AS RESULT; - result - -------- - 4 - (1 row) - ``` - -- height(box) - - Description: Vertical size of box - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT height(box '((0,0),(1,1))') AS RESULT; - result - -------- - 1 - (1 row) - ``` - -- isclosed(path) - - Description: A closed path? - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT isclosed(path '((0,0),(1,1),(2,0))') AS RESULT; - result - -------- - t - (1 row) - ``` - -- isopen(path) - - Description: An open path? - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT isopen(path '[(0,0),(1,1),(2,0)]') AS RESULT; - result - -------- - t - (1 row) - ``` - -- length(object) - - Description: Length calculation - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT length(path '((-1,0),(1,0))') AS RESULT; - result - -------- - 4 - (1 row) - ``` - -- npoints(path) - - Description: Number of points in a path - - Return type: int - - Example: - - ```sql - MogDB=# SELECT npoints(path '[(0,0),(1,1),(2,0)]') AS RESULT; - result - -------- - 3 - (1 row) - ``` - -- npoints(polygon) - - Description: Number of points in a polygon - - Return type: int - - Example: - - ```sql - MogDB=# SELECT npoints(polygon '((1,1),(0,0))') AS RESULT; - result - -------- - 2 - (1 row) - ``` - -- pclose(path) - - Description: Converts a path to closed. - - Return type: path - - Example: - - ```sql - MogDB=# SELECT pclose(path '[(0,0),(1,1),(2,0)]') AS RESULT; - result - --------------------- - ((0,0),(1,1),(2,0)) - (1 row) - ``` - -- popen(path) - - Description: Converts a path to open. - - Return type: path - - Example: - - ```sql - MogDB=# SELECT popen(path '((0,0),(1,1),(2,0))') AS RESULT; - result - --------------------- - [(0,0),(1,1),(2,0)] - (1 row) - ``` - -- radius(circle) - - Description: Circle radius calculation - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT radius(circle '((0,0),2.0)') AS RESULT; - result - -------- - 2 - (1 row) - ``` - -- width(box) - - Description: Horizontal size of a box - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT width(box '((0,0),(1,1))') AS RESULT; - result - -------- - 1 - (1 row) - ``` - -## Geometric Type Conversion Functions - -- box(circle) - - Description: Circle to box - - Return type: box - - Example: - - ```sql - MogDB=# SELECT box(circle '((0,0),2.0)') AS RESULT; - result - --------------------------------------------------------------------------- - (1.41421356237309,1.41421356237309),(-1.41421356237309,-1.41421356237309) - (1 row) - ``` - -- box(point, point) - - Description: Points to box - - Return type: box - - Example: - - ```sql - MogDB=# SELECT box(point '(0,0)', point '(1,1)') AS RESULT; - result - ------------- - (1,1),(0,0) - (1 row) - ``` - -- box(polygon) - - Description: Polygon to box - - Return type: box - - Example: - - ```sql - MogDB=# SELECT box(polygon '((0,0),(1,1),(2,0))') AS RESULT; - result - ------------- - (2,1),(0,0) - (1 row) - ``` - -- circle(box) - - Description: Box to circle - - Return type: circle - - Example: - - ```sql - MogDB=# SELECT circle(box '((0,0),(1,1))') AS RESULT; - result - ------------------------------- - <(0.5,0.5),0.707106781186548> - (1 row) - ``` - -- circle(point, double precision) - - Description: Center and radius to circle - - Return type: circle - - Example: - - ```sql - MogDB=# SELECT circle(point '(0,0)', 2.0) AS RESULT; - result - ----------- - <(0,0),2> - (1 row) - ``` - -- circle(polygon) - - Description: Polygon to circle - - Return type: circle - - Example: - - ```sql - MogDB=# SELECT circle(polygon '((0,0),(1,1),(2,0))') AS RESULT; - result - ------------------------------------------- - <(1,0.333333333333333),0.924950591148529> - (1 row) - ``` - -- lseg(box) - - Description: Box diagonal to line segment - - Return type: lseg - - Example: - - ```sql - MogDB=# SELECT lseg(box '((-1,0),(1,0))') AS RESULT; - result - ---------------- - [(1,0),(-1,0)] - (1 row) - ``` - -- lseg(point, point) - - Description: Points to line segment - - Return type: lseg - - Example: - - ```sql - MogDB=# SELECT lseg(point '(-1,0)', point '(1,0)') AS RESULT; - result - ---------------- - [(-1,0),(1,0)] - (1 row) - ``` - -- slope(point, point) - - Description: Calculates the slope of a straight line formed by two points. - - Return type: double - - Example: - - ```sql - MogDB=# SELECT slope(point '(1,1)', point '(0,0)') AS RESULT; - result - -------- - 1 - (1 row) - ``` - -- path(polygon) - - Description: Polygon to path - - Return type: path - - Example: - - ```sql - MogDB=# SELECT path(polygon '((0,0),(1,1),(2,0))') AS RESULT; - result - --------------------- - ((0,0),(1,1),(2,0)) - (1 row) - ``` - -- point(double precision, double precision) - - Description: Points - - Return type: point - - Example: - - ```sql - MogDB=# SELECT point(23.4, -44.5) AS RESULT; - result - -------------- - (23.4,-44.5) - (1 row) - ``` - -- point(box) - - Description: Center of a box - - Return type: point - - Example: - - ```sql - MogDB=# SELECT point(box '((-1,0),(1,0))') AS RESULT; - result - -------- - (0,0) - (1 row) - ``` - -- point(circle) - - Description: Center of a circle - - Return type: point - - Example: - - ```sql - MogDB=# SELECT point(circle '((0,0),2.0)') AS RESULT; - result - -------- - (0,0) - (1 row) - ``` - -- point(lseg) - - Description: Center of a line segment - - Return type: point - - Example: - - ```sql - MogDB=# SELECT point(lseg '((-1,0),(1,0))') AS RESULT; - result - -------- - (0,0) - (1 row) - ``` - -- point(polygon) - - Description: Center of a polygon - - Return type: point - - Example: - - ```sql - MogDB=# SELECT point(polygon '((0,0),(1,1),(2,0))') AS RESULT; - result - ----------------------- - (1,0.333333333333333) - (1 row) - ``` - -- polygon(box) - - Description: Box to 4-point polygon - - Return type: polygon - - Example: - - ```sql - MogDB=# SELECT polygon(box '((0,0),(1,1))') AS RESULT; - result - --------------------------- - ((0,0),(0,1),(1,1),(1,0)) - (1 row) - ``` - -- polygon(circle) - - Description: Circle to 12-point polygon - - Return type: polygon - - Example: - - ```sql - MogDB=# SELECT polygon(circle '((0,0),2.0)') AS RESULT; - result - - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - ((-2,0),(-1.73205080756888,1),(-1,1.73205080756888),(-1.22464679914735e-16,2),(1,1.73205080756888),(1.73205080756888,1),(2,2.44929359829471e-16),(1.73205080756888,-0.999999999999999),(1,-1.73205080756888),(3.67394039744206e-16,-2),(-0.999999999999999,-1.73205080756888),(-1.73205080756888,-1)) - (1 row) - ``` - -- polygon(npts, circle) - - Description: Circle to **npts**-point polygon - - Return type: polygon - - Example: - - ```sql - MogDB=# SELECT polygon(12, circle '((0,0),2.0)') AS RESULT; - result - - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - ((-2,0),(-1.73205080756888,1),(-1,1.73205080756888),(-1.22464679914735e-16,2),(1,1.73205080756888),(1.73205080756888,1),(2,2.44929359829471e-16),(1.73205080756888,-0.999999999999999),(1,-1.73205080756888),(3.67394039744206e-16,-2),(-0.999999999999999,-1.73205080756888),(-1.73205080756888,-1)) - (1 row) - ``` - -- polygon(path) - - Description: Path to polygon - - Return type: polygon - - Example: - - ```sql - MogDB=# SELECT polygon(path '((0,0),(1,1),(2,0))') AS RESULT; - result - --------------------- - ((0,0),(1,1),(2,0)) - (1 row) - ``` +--- +title: Geometric Functions and Operators +summary: Geometric Functions and Operators +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Geometric Functions and Operators + +## Geometric Operators + +- \+ + + Description: Translation + + Example: + + ```sql + MogDB=# SELECT box '((0,0),(1,1))' + point '(2.0,0)' AS RESULT; + result + ------------- + (3,1),(2,0) + (1 row) + ``` + +- \- + + Description: Translation + + Example: + + ```sql + MogDB=# SELECT box '((0,0),(1,1))' - point '(2.0,0)' AS RESULT; + result + --------------- + (-1,1),(-2,0) + (1 row) + ``` + +- \* + + Description: Scaling out/Rotation + + Example: + + ```sql + MogDB=# SELECT box '((0,0),(1,1))' * point '(2.0,0)' AS RESULT; + result + ------------- + (2,2),(0,0) + (1 row) + ``` + +- / + + Description: Scaling in/Rotation + + Example: + + ```sql + MogDB=# SELECT box '((0,0),(2,2))' / point '(2.0,0)' AS RESULT; + result + ------------- + (1,1),(0,0) + (1 row) + ``` + +- \# + + Description: Intersection of two figures + + Example: + + ```sql + MogDB=# SELECT box '((1,-1),(-1,1))' # box '((1,1),(-2,-2))' AS RESULT; + result + --------------- + (1,1),(-1,-1) + (1 row) + ``` + +- \# + + Description: Number of paths or polygon vertexes of a figure + + Example: + + ```sql + MogDB=# SELECT # path'((1,0),(0,1),(-1,0))' AS RESULT; + result + -------- + 3 + (1 row) + ``` + +- @-@ + + Description: Length or circumference of a figure + + Example: + + ```sql + MogDB=# SELECT @-@ path '((0,0),(1,0))' AS RESULT; + result + -------- + 2 + (1 row) + ``` + +- @@ + + Description: Center of a figure + + Example: + + ```sql + MogDB=# SELECT @@ circle '((0,0),10)' AS RESULT; + result + -------- + (0,0) + (1 row) + ``` + +- <-> + + Description: Distance between two figures + + Example: + + ```sql + MogDB=# SELECT circle '((0,0),1)' <-> circle '((5,0),1)' AS RESULT; + result + -------- + 3 + (1 row) + ``` + +- && + + Description: Overlaps? (One point in common makes this true.) + + Example: + + ```sql + MogDB=# SELECT box '((0,0),(1,1))' && box '((0,0),(2,2))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- << + + Description: Is strictly left of (no common horizontal coordinate)? + + Example: + + ```sql + MogDB=# SELECT circle '((0,0),1)' << circle '((5,0),1)' AS RESULT; + result + -------- + t + (1 row) + ``` + +- >> + + Description: Is strictly right of (no common horizontal coordinate)? + + Example: + + ```sql + MogDB=# SELECT circle '((5,0),1)' >> circle '((0,0),1)' AS RESULT; + result + -------- + t + (1 row) + ``` + +- &< + + Description: Does not extend to the right of? + + Example: + + ```sql + MogDB=# SELECT box '((0,0),(1,1))' &< box '((0,0),(2,2))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- &> + + Description: Does not extend to the left of? + + Example: + + ```sql + MogDB=# SELECT box '((0,0),(3,3))' &> box '((0,0),(2,2))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- <<| + + Description: Is strictly below (no common horizontal coordinate)? + + Example: + + ```sql + MogDB=# SELECT box '((0,0),(3,3))' <<| box '((3,4),(5,5))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- |>> + + Description: Is strictly above (no common horizontal coordinate)? + + Example: + + ```sql + MogDB=# SELECT box '((3,4),(5,5))' |>> box '((0,0),(3,3))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- &<| + + Description: Does not extend above? + + Example: + + ```sql + MogDB=# SELECT box '((0,0),(1,1))' &<| box '((0,0),(2,2))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- |&> + + Description: Does not extend below? + + Example: + + ```sql + MogDB=# SELECT box '((0,0),(3,3))' |&> box '((0,0),(2,2))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- <^ + + Description: Is below (allows touching)? + + Example: + + ```sql + MogDB=# SELECT box '((0,0),(-3,-3))' <^ box '((0,0),(2,2))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- >^ + + Description: Is above (allows touching)? + + Example: + + ```sql + MogDB=# SELECT box '((0,0),(2,2))' >^ box '((0,0),(-3,-3))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- ?# + + Description: Intersect? + + Example: + + ```sql + MogDB=# SELECT lseg '((-1,0),(1,0))' ?# box '((-2,-2),(2,2))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- ?- + + Description: Is horizontal? + + Example: + + ```sql + MogDB=# SELECT ?- lseg '((-1,0),(1,0))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- ?- + + Description: Are horizontally aligned? + + Example: + + ```sql + MogDB=# SELECT point '(1,0)' ?- point '(0,0)' AS RESULT; + result + -------- + t + (1 row) + ``` + +- ?| + + Description: Is vertical? + + Example: + + ```sql + MogDB=# SELECT ?| lseg '((-1,0),(1,0))' AS RESULT; + result + -------- + f + (1 row) + ``` + +- ?| + + Description: Are vertically aligned? + + Example: + + ```sql + MogDB=# SELECT point '(0,1)' ?| point '(0,0)' AS RESULT; + result + -------- + t + (1 row) + ``` + +- ?-| + + Description: Are perpendicular? + + Example: + + ```sql + MogDB=# SELECT lseg '((0,0),(0,1))' ?-| lseg '((0,0),(1,0))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- ?|| + + Description: Are parallel? + + Example: + + ```sql + MogDB=# SELECT lseg '((-1,0),(1,0))' ?|| lseg '((-1,2),(1,2))' AS RESULT; + result + -------- + t + (1 row) + ``` + +- @> + + Description: Contains? + + Example: + + ```sql + MogDB=# SELECT circle '((0,0),2)' @> point '(1,1)' AS RESULT; + result + -------- + t + (1 row) + ``` + +- <@ + + Description: Contained in or on? + + Example: + + ```sql + MogDB=# SELECT point '(1,1)' <@ circle '((0,0),2)' AS RESULT; + result + -------- + t + (1 row) + ``` + +- ~= + + Description: Same as? + + Example: + + ```sql + MogDB=# SELECT polygon '((0,0),(1,1))' ~= polygon '((1,1),(0,0))' AS RESULT; + result + -------- + t + (1 row) + ``` + +## Geometric Functions + +- area(object) + + Description: Area calculation + + Return type: double precision + + Example: + + ```sql + MogDB=# SELECT area(box '((0,0),(1,1))') AS RESULT; + result + -------- + 1 + (1 row) + ``` + +- center(object) + + Description: Figure center calculation + + Return type: point + + Example: + + ```sql + MogDB=# SELECT center(box '((0,0),(1,2))') AS RESULT; + result + --------- + (0.5,1) + (1 row) + ``` + +- diameter(circle) + + Description: Circle diameter calculation + + Return type: double precision + + Example: + + ```sql + MogDB=# SELECT diameter(circle '((0,0),2.0)') AS RESULT; + result + -------- + 4 + (1 row) + ``` + +- height(box) + + Description: Vertical size of box + + Return type: double precision + + Example: + + ```sql + MogDB=# SELECT height(box '((0,0),(1,1))') AS RESULT; + result + -------- + 1 + (1 row) + ``` + +- isclosed(path) + + Description: A closed path? + + Return type: Boolean + + Example: + + ```sql + MogDB=# SELECT isclosed(path '((0,0),(1,1),(2,0))') AS RESULT; + result + -------- + t + (1 row) + ``` + +- isopen(path) + + Description: An open path? + + Return type: Boolean + + Example: + + ```sql + MogDB=# SELECT isopen(path '[(0,0),(1,1),(2,0)]') AS RESULT; + result + -------- + t + (1 row) + ``` + +- length(object) + + Description: Length calculation + + Return type: double precision + + Example: + + ```sql + MogDB=# SELECT length(path '((-1,0),(1,0))') AS RESULT; + result + -------- + 4 + (1 row) + ``` + +- npoints(path) + + Description: Number of points in a path + + Return type: int + + Example: + + ```sql + MogDB=# SELECT npoints(path '[(0,0),(1,1),(2,0)]') AS RESULT; + result + -------- + 3 + (1 row) + ``` + +- npoints(polygon) + + Description: Number of points in a polygon + + Return type: int + + Example: + + ```sql + MogDB=# SELECT npoints(polygon '((1,1),(0,0))') AS RESULT; + result + -------- + 2 + (1 row) + ``` + +- pclose(path) + + Description: Converts a path to closed. + + Return type: path + + Example: + + ```sql + MogDB=# SELECT pclose(path '[(0,0),(1,1),(2,0)]') AS RESULT; + result + --------------------- + ((0,0),(1,1),(2,0)) + (1 row) + ``` + +- popen(path) + + Description: Converts a path to open. + + Return type: path + + Example: + + ```sql + MogDB=# SELECT popen(path '((0,0),(1,1),(2,0))') AS RESULT; + result + --------------------- + [(0,0),(1,1),(2,0)] + (1 row) + ``` + +- radius(circle) + + Description: Circle radius calculation + + Return type: double precision + + Example: + + ```sql + MogDB=# SELECT radius(circle '((0,0),2.0)') AS RESULT; + result + -------- + 2 + (1 row) + ``` + +- width(box) + + Description: Horizontal size of a box + + Return type: double precision + + Example: + + ```sql + MogDB=# SELECT width(box '((0,0),(1,1))') AS RESULT; + result + -------- + 1 + (1 row) + ``` + +## Geometric Type Conversion Functions + +- box(circle) + + Description: Circle to box + + Return type: box + + Example: + + ```sql + MogDB=# SELECT box(circle '((0,0),2.0)') AS RESULT; + result + --------------------------------------------------------------------------- + (1.41421356237309,1.41421356237309),(-1.41421356237309,-1.41421356237309) + (1 row) + ``` + +- box(point, point) + + Description: Points to box + + Return type: box + + Example: + + ```sql + MogDB=# SELECT box(point '(0,0)', point '(1,1)') AS RESULT; + result + ------------- + (1,1),(0,0) + (1 row) + ``` + +- box(polygon) + + Description: Polygon to box + + Return type: box + + Example: + + ```sql + MogDB=# SELECT box(polygon '((0,0),(1,1),(2,0))') AS RESULT; + result + ------------- + (2,1),(0,0) + (1 row) + ``` + +- circle(box) + + Description: Box to circle + + Return type: circle + + Example: + + ```sql + MogDB=# SELECT circle(box '((0,0),(1,1))') AS RESULT; + result + ------------------------------- + <(0.5,0.5),0.707106781186548> + (1 row) + ``` + +- circle(point, double precision) + + Description: Center and radius to circle + + Return type: circle + + Example: + + ```sql + MogDB=# SELECT circle(point '(0,0)', 2.0) AS RESULT; + result + ----------- + <(0,0),2> + (1 row) + ``` + +- circle(polygon) + + Description: Polygon to circle + + Return type: circle + + Example: + + ```sql + MogDB=# SELECT circle(polygon '((0,0),(1,1),(2,0))') AS RESULT; + result + ------------------------------------------- + <(1,0.333333333333333),0.924950591148529> + (1 row) + ``` + +- lseg(box) + + Description: Box diagonal to line segment + + Return type: lseg + + Example: + + ```sql + MogDB=# SELECT lseg(box '((-1,0),(1,0))') AS RESULT; + result + ---------------- + [(1,0),(-1,0)] + (1 row) + ``` + +- lseg(point, point) + + Description: Points to line segment + + Return type: lseg + + Example: + + ```sql + MogDB=# SELECT lseg(point '(-1,0)', point '(1,0)') AS RESULT; + result + ---------------- + [(-1,0),(1,0)] + (1 row) + ``` + +- slope(point, point) + + Description: Calculates the slope of a straight line formed by two points. + + Return type: double + + Example: + + ```sql + MogDB=# SELECT slope(point '(1,1)', point '(0,0)') AS RESULT; + result + -------- + 1 + (1 row) + ``` + +- path(polygon) + + Description: Polygon to path + + Return type: path + + Example: + + ```sql + MogDB=# SELECT path(polygon '((0,0),(1,1),(2,0))') AS RESULT; + result + --------------------- + ((0,0),(1,1),(2,0)) + (1 row) + ``` + +- point(double precision, double precision) + + Description: Points + + Return type: point + + Example: + + ```sql + MogDB=# SELECT point(23.4, -44.5) AS RESULT; + result + -------------- + (23.4,-44.5) + (1 row) + ``` + +- point(box) + + Description: Center of a box + + Return type: point + + Example: + + ```sql + MogDB=# SELECT point(box '((-1,0),(1,0))') AS RESULT; + result + -------- + (0,0) + (1 row) + ``` + +- point(circle) + + Description: Center of a circle + + Return type: point + + Example: + + ```sql + MogDB=# SELECT point(circle '((0,0),2.0)') AS RESULT; + result + -------- + (0,0) + (1 row) + ``` + +- point(lseg) + + Description: Center of a line segment + + Return type: point + + Example: + + ```sql + MogDB=# SELECT point(lseg '((-1,0),(1,0))') AS RESULT; + result + -------- + (0,0) + (1 row) + ``` + +- point(polygon) + + Description: Center of a polygon + + Return type: point + + Example: + + ```sql + MogDB=# SELECT point(polygon '((0,0),(1,1),(2,0))') AS RESULT; + result + ----------------------- + (1,0.333333333333333) + (1 row) + ``` + +- polygon(box) + + Description: Box to 4-point polygon + + Return type: polygon + + Example: + + ```sql + MogDB=# SELECT polygon(box '((0,0),(1,1))') AS RESULT; + result + --------------------------- + ((0,0),(0,1),(1,1),(1,0)) + (1 row) + ``` + +- polygon(circle) + + Description: Circle to 12-point polygon + + Return type: polygon + + Example: + + ```sql + MogDB=# SELECT polygon(circle '((0,0),2.0)') AS RESULT; + result + + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + ((-2,0),(-1.73205080756888,1),(-1,1.73205080756888),(-1.22464679914735e-16,2),(1,1.73205080756888),(1.73205080756888,1),(2,2.44929359829471e-16),(1.73205080756888,-0.999999999999999),(1,-1.73205080756888),(3.67394039744206e-16,-2),(-0.999999999999999,-1.73205080756888),(-1.73205080756888,-1)) + (1 row) + ``` + +- polygon(npts, circle) + + Description: Circle to **npts**-point polygon + + Return type: polygon + + Example: + + ```sql + MogDB=# SELECT polygon(12, circle '((0,0),2.0)') AS RESULT; + result + + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + ((-2,0),(-1.73205080756888,1),(-1,1.73205080756888),(-1.22464679914735e-16,2),(1,1.73205080756888),(1.73205080756888,1),(2,2.44929359829471e-16),(1.73205080756888,-0.999999999999999),(1,-1.73205080756888),(3.67394039744206e-16,-2),(-0.999999999999999,-1.73205080756888),(-1.73205080756888,-1)) + (1 row) + ``` + +- polygon(path) + + Description: Path to polygon + + Return type: polygon + + Example: + + ```sql + MogDB=# SELECT polygon(path '((0,0),(1,1),(2,0))') AS RESULT; + result + --------------------- + ((0,0),(1,1),(2,0)) + (1 row) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/global-syscache-feature-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/global-syscache-feature-functions.md index 74dc3362..5b60e0d5 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/global-syscache-feature-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/global-syscache-feature-functions.md @@ -1,99 +1,99 @@ ---- -title: Global SysCache Feature Functions -summary: Global SysCache Feature Functions -author: Guo Huan -date: 2022-05-10 ---- - -# Global SysCache Feature Functions - -- gs_gsc_table_detail(database_id default NULL, rel_id default NULL) - - Description: Queries global system cache table metadata in a database. The user who calls this function must have the **SYSADMIN** permission. - - Parameter: Specifies the database and table whose global system cache is to be queried. The default value of **database_id** is **NULL** or **–1**, indicating all databases. The value **0** indicates a shared table. Other values indicate the specified database and shared table. **rel_id** indicates the OID of the specified table. The default value **NULL** or **–1** indicates all tables. Other values indicate the specified table. If **database_id** does not exist, an error is reported. If **rel_id** does not exist, the result is empty. - - Return type: Tuple - - Example: - - ``` - select * from gs_gsc_table_detail(-1) limit 1; - database_oid | database_name | reloid | relname | relnamespace | reltype | reloftype | relowner | relam | relfilenode | reltablespace | relhasindex | relisshared | relkind | relnatts | relhasoids | relhaspkey | parttype | tdhasuids | attnames | extinfo - --------------+---------------+--------+-------------------------+--------------+---------+-----------+----------+-------+-------------+---------------+-------------+-------------+---------+----------+------------+------------+----------+-----------+-----------+--------- - 0 | | 2676 | pg_authid_rolname_index | 11 | 0 | 0 | 10 | 403 | 0 | 1664 | f | t | i | 1 | f | f | n | f | 'rolname' | - (1 row) - ``` - -- gs_gsc_catalog_detail(database_id default NULL, rel_id default NULL) - - Description: Queries the system table row information cached in the global system in a database. The user who calls this function must have the **SYSADMIN** permission. - - Parameter: Specifies the database and table whose global system cache is to be queried. The default value of **database_id** is **NULL** or **–1**, indicating all databases. The value **0** indicates a shared table. Other values indicate the specified database and shared table. **rel_id** indicates the ID of the specified table, including all system tables that have system caches, the default value **NULL** or **–1** indicates all tables. Other values indicate the specified table. If **database_id** does not exist, an error is reported. If **rel_id** does not exist, the result is empty. - - Return type: Tuple - - Example: - - ``` - MogDB=# - select * from gs_gsc_catalog_detail(16574, 1260); - database_id | database_name | rel_id | rel_name | cache_id | self | ctid | infomask | infomask2 | hash_value | refcount - -------------+---------------+--------+-----------+----------+--------+--------+----------+-----------+------------+---------- - 0 | | 1260 | pg_authid | 10 | (0, 9) | (0, 9) | 10507 | 26 | 531311568 | 10 - 0 | | 1260 | pg_authid | 11 | (0, 4) | (0, 4) | 2313 | 26 | 365368336 | 1 - 0 | | 1260 | pg_authid | 11 | (0, 9) | (0, 9) | 10507 | 26 | 3911517328 | 10 - 0 | | 1260 | pg_authid | 11 | (0, 7) | (0, 7) | 2313 | 26 | 1317799983 | 1 - 0 | | 1260 | pg_authid | 11 | (0, 5) | (0, 5) | 2313 | 26 | 3664347448 | 1 - 0 | | 1260 | pg_authid | 11 | (0, 1) | (0, 1) | 2313 | 26 | 276477273 | 1 - 0 | | 1260 | pg_authid | 11 | (0, 3) | (0, 3) | 2313 | 26 | 2465837659 | 1 - 0 | | 1260 | pg_authid | 11 | (0, 8) | (0, 8) | 2313 | 26 | 3205288035 | 1 - 0 | | 1260 | pg_authid | 11 | (0, 6) | (0, 6) | 2313 | 26 | 131811687 | 1 - 0 | | 1260 | pg_authid | 11 | (0, 2) | (0, 2) | 2313 | 26 | 1226484587 | 1 - (10 rows) - ``` - -- gs_gsc_clean(database_id default NULL) - - Description: Clears the global syscache cache. Note that data in use will not be cleared. The user who calls this function must have the **SYSADMIN** permission. - - Parameter: Specifies the database whose global system cache needs to be cleared. The default value **NULL** or **–1** indicates that the global system cache of all databases is cleared. The value **0** indicates that only the global system cache of the shared table is cleared. Other values indicate that the global system cache of the specified database and shared table is cleared. If **database_id** does not exist, an error is reported. - - Return type: Boolean - - Example: - - ``` - MogDB=# select * from gs_gsc_clean(); - gs_gsc_clean - -------------- - t - (1 row) - ``` - -- gs_gsc_dbstat_info(database_id default NULL) - - Description: Obtains GSC memory statistics on the local node, including cache query, hit, loading, expiration, and occupied space information of tuples, relationships, and partitions, database-level elimination information, thread reference information, and memory usage information. This parameter can be used to locate performance problems. For example, if the value of the hits/searches array is far less than 1, the value of **global_syscache_threshold** may be too small. As a result, the query hit ratio decreases. The user who calls this function must have the **SYSADMIN** permission. - - Parameter: Specifies the global system cache statistics of the database to be queried. **NULL** or **–1** indicates that all databases are queried. **0** indicates that only the information about the shared table is queried. Other values indicate that the information about the specified database and shared table is queried. Invalid input value. If **databse_id** does not exist, an error is reported. - - Return type: Tuple - - Example: - - ``` - MogDB=# select * from gs_gsc_dbstat_info(); - database_id | database_name | tup_searches | tup_hits | tup_miss | tup_count | tup_dead | tup_memory | rel_searches | rel_hits | rel_mis - s | rel_count | rel_dead | rel_memory | part_searches | part_hits | part_miss | part_count | part_dead | part_memory | total_memory | swa - pout_count | refcount - -------------+---------------+--------------+----------+----------+-----------+----------+------------+--------------+----------+-------- - --+-----------+----------+------------+---------------+-----------+-----------+------------+-----------+-------------+--------------+---- - -----------+---------- - 0 | | 300 | 235 | 31 | 22 | 2 | 9752 | 598 | 108 | 1 - 8 | 18 | 0 | 77720 | 0 | 0 | 0 | 0 | 0 | 0 | 752912 | - 0 | 0 - 16574 | postgres | 3368 | 2289 | 329 | 273 | 0 | 92593 | 1113 | 524 | 4 - 8 | 48 | 0 | 340456 | 0 | 0 | 0 | 0 | 0 | 0 | 4124792 | - 0 | 10 - (2 rows) - ``` +--- +title: Global SysCache Feature Functions +summary: Global SysCache Feature Functions +author: Guo Huan +date: 2022-05-10 +--- + +# Global SysCache Feature Functions + +- gs_gsc_table_detail(database_id default NULL, rel_id default NULL) + + Description: Queries global system cache table metadata in a database. The user who calls this function must have the **SYSADMIN** permission. + + Parameter: Specifies the database and table whose global system cache is to be queried. The default value of **database_id** is **NULL** or **–1**, indicating all databases. The value **0** indicates a shared table. Other values indicate the specified database and shared table. **rel_id** indicates the OID of the specified table. The default value **NULL** or **–1** indicates all tables. Other values indicate the specified table. If **database_id** does not exist, an error is reported. If **rel_id** does not exist, the result is empty. + + Return type: Tuple + + Example: + + ``` + select * from gs_gsc_table_detail(-1) limit 1; + database_oid | database_name | reloid | relname | relnamespace | reltype | reloftype | relowner | relam | relfilenode | reltablespace | relhasindex | relisshared | relkind | relnatts | relhasoids | relhaspkey | parttype | tdhasuids | attnames | extinfo + --------------+---------------+--------+-------------------------+--------------+---------+-----------+----------+-------+-------------+---------------+-------------+-------------+---------+----------+------------+------------+----------+-----------+-----------+--------- + 0 | | 2676 | pg_authid_rolname_index | 11 | 0 | 0 | 10 | 403 | 0 | 1664 | f | t | i | 1 | f | f | n | f | 'rolname' | + (1 row) + ``` + +- gs_gsc_catalog_detail(database_id default NULL, rel_id default NULL) + + Description: Queries the system table row information cached in the global system in a database. The user who calls this function must have the **SYSADMIN** permission. + + Parameter: Specifies the database and table whose global system cache is to be queried. The default value of **database_id** is **NULL** or **–1**, indicating all databases. The value **0** indicates a shared table. Other values indicate the specified database and shared table. **rel_id** indicates the ID of the specified table, including all system tables that have system caches, the default value **NULL** or **–1** indicates all tables. Other values indicate the specified table. If **database_id** does not exist, an error is reported. If **rel_id** does not exist, the result is empty. + + Return type: Tuple + + Example: + + ``` + MogDB=# + select * from gs_gsc_catalog_detail(16574, 1260); + database_id | database_name | rel_id | rel_name | cache_id | self | ctid | infomask | infomask2 | hash_value | refcount + -------------+---------------+--------+-----------+----------+--------+--------+----------+-----------+------------+---------- + 0 | | 1260 | pg_authid | 10 | (0, 9) | (0, 9) | 10507 | 26 | 531311568 | 10 + 0 | | 1260 | pg_authid | 11 | (0, 4) | (0, 4) | 2313 | 26 | 365368336 | 1 + 0 | | 1260 | pg_authid | 11 | (0, 9) | (0, 9) | 10507 | 26 | 3911517328 | 10 + 0 | | 1260 | pg_authid | 11 | (0, 7) | (0, 7) | 2313 | 26 | 1317799983 | 1 + 0 | | 1260 | pg_authid | 11 | (0, 5) | (0, 5) | 2313 | 26 | 3664347448 | 1 + 0 | | 1260 | pg_authid | 11 | (0, 1) | (0, 1) | 2313 | 26 | 276477273 | 1 + 0 | | 1260 | pg_authid | 11 | (0, 3) | (0, 3) | 2313 | 26 | 2465837659 | 1 + 0 | | 1260 | pg_authid | 11 | (0, 8) | (0, 8) | 2313 | 26 | 3205288035 | 1 + 0 | | 1260 | pg_authid | 11 | (0, 6) | (0, 6) | 2313 | 26 | 131811687 | 1 + 0 | | 1260 | pg_authid | 11 | (0, 2) | (0, 2) | 2313 | 26 | 1226484587 | 1 + (10 rows) + ``` + +- gs_gsc_clean(database_id default NULL) + + Description: Clears the global syscache cache. Note that data in use will not be cleared. The user who calls this function must have the **SYSADMIN** permission. + + Parameter: Specifies the database whose global system cache needs to be cleared. The default value **NULL** or **–1** indicates that the global system cache of all databases is cleared. The value **0** indicates that only the global system cache of the shared table is cleared. Other values indicate that the global system cache of the specified database and shared table is cleared. If **database_id** does not exist, an error is reported. + + Return type: Boolean + + Example: + + ``` + MogDB=# select * from gs_gsc_clean(); + gs_gsc_clean + -------------- + t + (1 row) + ``` + +- gs_gsc_dbstat_info(database_id default NULL) + + Description: Obtains GSC memory statistics on the local node, including cache query, hit, loading, expiration, and occupied space information of tuples, relationships, and partitions, database-level elimination information, thread reference information, and memory usage information. This parameter can be used to locate performance problems. For example, if the value of the hits/searches array is far less than 1, the value of **global_syscache_threshold** may be too small. As a result, the query hit ratio decreases. The user who calls this function must have the **SYSADMIN** permission. + + Parameter: Specifies the global system cache statistics of the database to be queried. **NULL** or **–1** indicates that all databases are queried. **0** indicates that only the information about the shared table is queried. Other values indicate that the information about the specified database and shared table is queried. Invalid input value. If **databse_id** does not exist, an error is reported. + + Return type: Tuple + + Example: + + ``` + MogDB=# select * from gs_gsc_dbstat_info(); + database_id | database_name | tup_searches | tup_hits | tup_miss | tup_count | tup_dead | tup_memory | rel_searches | rel_hits | rel_mis + s | rel_count | rel_dead | rel_memory | part_searches | part_hits | part_miss | part_count | part_dead | part_memory | total_memory | swa + pout_count | refcount + -------------+---------------+--------------+----------+----------+-----------+----------+------------+--------------+----------+-------- + --+-----------+----------+------------+---------------+-----------+-----------+------------+-----------+-------------+--------------+---- + -----------+---------- + 0 | | 300 | 235 | 31 | 22 | 2 | 9752 | 598 | 108 | 1 + 8 | 18 | 0 | 77720 | 0 | 0 | 0 | 0 | 0 | 0 | 752912 | + 0 | 0 + 16574 | postgres | 3368 | 2289 | 329 | 273 | 0 | 92593 | 1113 | 524 | 4 + 8 | 48 | 0 | 340456 | 0 | 0 | 0 | 0 | 0 | 0 | 4124792 | + 0 | 10 + (2 rows) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/global-temporary-table-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/global-temporary-table-functions.md index f87cee82..61a8551c 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/global-temporary-table-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/global-temporary-table-functions.md @@ -1,132 +1,132 @@ ---- -title: Global Temporary Table Functions -summary: Global Temporary Table Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Global Temporary Table Functions - -- pg_get_gtt_relstats(relOid) - - Description: Displays basic information about a global temporary table specified by the current session. - - Parameter: OID of the global temporary table - - Return type: record - - Example: - - ```sql - MogDB=# select * from pg_get_gtt_relstats(74069); - relfilenode | relpages | reltuples | relallvisible | relfrozenxid | relminmxid - -------------+----------+-----------+---------------+--------------+------------ - 74069 | 58 | 13000 | 0 | 11151 | 0 - (1 row) - ``` - -- pg_get_gtt_statistics(relOid, attnum, "::text) - - Description: Displays statistics about a single column in a global temporary table specified by the current session. - - Parameter: OID and the **attnum** attribute of the global temporary table - - Return type: record - - Example: - - ```sql - MogDB=# select * from pg_get_gtt_statistics(74069,1,''::text); - starelid | starelkind | staattnum | stainherit | stanullfrac | stawidth | stadistinct | stakind1 | stakind2 | stakind3 | stakind4 | stakind5 | staop1 | staop2 | staop3 | staop4 | staop5 | stanumbers1 | stanumbers2 | stanumbers3 | stanu - mbers4 | stanumbers5 | - stavalues1 - | stavalues2 | stavalues3 | stavalues4 | stavalues5 | stadndistinct | staextinfo - ----------+------------+-----------+------------+-------------+----------+-------------+----------+----------+----------+----------+----------+--------+--------+--------+--------+--------+-------------+-------------+-------------+------ - -------+-------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - ------------------------------------------------------------------------+------------+------------+------------+------------+---------------+------------ - 74069 | c | 1 | f | 0 | 4 | -1 | 2 | 3 | 0 | 0 | 0 | 97 | 97 | 0 | 0 | 0 | | {1} | | - | | {1,130,260,390,520,650,780,910,1040,1170,1300,1430,1560,1690,1820,1950,2080,2210,2340,2470,2600,2730,2860,2990,3120,3250,3380,3510,3640,3770,3900,4030,4160,4290,4420,4550,4680,4810,4940,5070,5200,5330,5460,5590,57 - 20,5850,5980,6110,6240,6370,6500,6630,6760,6890,7020,7150,7280,7410,7540,7670,7800,7930,8060,8190,8320,8450,8580,8710,8840,8970,9100,9230,9360,9490,9620,9750,9880,10010,10140,10270,10400,10530,10660,10790,10920,11050,11180,11310,11440,1 - 1570,11700,11830,11960,12090,12220,12350,12480,12610,12740,12870,13000} | | | | | 0 | - (1 row) - ``` - -- pg_gtt_attached_pid(relOid) - - Description: Displays PIDs of all threads that are using the specified global temporary table. - - Parameter: OID of the global temporary table - - Return type: record - - Example: - - ```sql - MogDB=# select * from pg_gtt_attached_pid(74069); - relid | pid - -------+----------------- - 74069 | 139648170456832 - 74069 | 139648123270912 - (2 rows) - ``` - -- dbe_perf.get_global_full_sql_by_timestamp(start_timestamp timestamp, end_timestamp timestamp) - - Description: Obtains full SQL information at the instance level. - - Return type: record - - **Table 1** dbe_perf.get_global_full_sql_by_timestamp parameter description - - | Parameter | Type | Description | - | :-------------- | :-------- | :--------------------------------------- | - | start_timestamp | timestamp | Start point of the SQL start time range. | - | end_timestamp | timestamp | End point of the SQL start time range. | - -- dbe_perf.get_global_slow_sql_by_timestamp(start_timestamp timestamp, end_timestamp timestamp) - - Description: Obtains slow SQL information at the instance level. - - Return type: record - - **Table 2** dbe_perf.get_global_slow_sql_by_timestamp parameter description - - | Parameter | Type | Description | - | :-------------- | :-------- | :--------------------------------------- | - | start_timestamp | timestamp | Start point of the SQL start time range. | - | end_timestamp | timestamp | End point of the SQL start time range. | - -- statement_detail_decode(detail text, format text, pretty bool) - - Parses the details column in a full or slow SQL statement. - - **Table 3** statement_detail_decode parameter description - - | Parameter | Type | Description | - | :-------- | :--- | :----------------------------------------------------------- | - | detail | text | Set of events generated by the SQL statement (unreadable). | - | format | text | Parsing output format.
The value is **plaintext** or **json**. | - | pretty | bool | Whether to display the text in pretty format when **format** is set to **plaintext**.
The options are as follows:
- The value **true** indicates that events are separated by `\n`.
- The value **false** indicates that events are separated by commas (,). | - -- pg_list_gtt_relfrozenxids() - - Description: Displays the frozen XID of each session. - - If the value of **pid** is **0**, the earliest frozen XID of all sessions is displayed. - - Parameter: none - - Return type: record - - Example: - - ```sql - MogDB=# select * from pg_list_gtt_relfrozenxids(); - pid | relfrozenxid - -----------------+-------------- - 139648123270912 | 11151 - 139648170456832 | 11155 - 0 | 11151 - (3 rows) - ``` +--- +title: Global Temporary Table Functions +summary: Global Temporary Table Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Global Temporary Table Functions + +- pg_get_gtt_relstats(relOid) + + Description: Displays basic information about a global temporary table specified by the current session. + + Parameter: OID of the global temporary table + + Return type: record + + Example: + + ```sql + MogDB=# select * from pg_get_gtt_relstats(74069); + relfilenode | relpages | reltuples | relallvisible | relfrozenxid | relminmxid + -------------+----------+-----------+---------------+--------------+------------ + 74069 | 58 | 13000 | 0 | 11151 | 0 + (1 row) + ``` + +- pg_get_gtt_statistics(relOid, attnum, "::text) + + Description: Displays statistics about a single column in a global temporary table specified by the current session. + + Parameter: OID and the **attnum** attribute of the global temporary table + + Return type: record + + Example: + + ```sql + MogDB=# select * from pg_get_gtt_statistics(74069,1,''::text); + starelid | starelkind | staattnum | stainherit | stanullfrac | stawidth | stadistinct | stakind1 | stakind2 | stakind3 | stakind4 | stakind5 | staop1 | staop2 | staop3 | staop4 | staop5 | stanumbers1 | stanumbers2 | stanumbers3 | stanu + mbers4 | stanumbers5 | + stavalues1 + | stavalues2 | stavalues3 | stavalues4 | stavalues5 | stadndistinct | staextinfo + ----------+------------+-----------+------------+-------------+----------+-------------+----------+----------+----------+----------+----------+--------+--------+--------+--------+--------+-------------+-------------+-------------+------ + -------+-------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + ------------------------------------------------------------------------+------------+------------+------------+------------+---------------+------------ + 74069 | c | 1 | f | 0 | 4 | -1 | 2 | 3 | 0 | 0 | 0 | 97 | 97 | 0 | 0 | 0 | | {1} | | + | | {1,130,260,390,520,650,780,910,1040,1170,1300,1430,1560,1690,1820,1950,2080,2210,2340,2470,2600,2730,2860,2990,3120,3250,3380,3510,3640,3770,3900,4030,4160,4290,4420,4550,4680,4810,4940,5070,5200,5330,5460,5590,57 + 20,5850,5980,6110,6240,6370,6500,6630,6760,6890,7020,7150,7280,7410,7540,7670,7800,7930,8060,8190,8320,8450,8580,8710,8840,8970,9100,9230,9360,9490,9620,9750,9880,10010,10140,10270,10400,10530,10660,10790,10920,11050,11180,11310,11440,1 + 1570,11700,11830,11960,12090,12220,12350,12480,12610,12740,12870,13000} | | | | | 0 | + (1 row) + ``` + +- pg_gtt_attached_pid(relOid) + + Description: Displays PIDs of all threads that are using the specified global temporary table. + + Parameter: OID of the global temporary table + + Return type: record + + Example: + + ```sql + MogDB=# select * from pg_gtt_attached_pid(74069); + relid | pid + -------+----------------- + 74069 | 139648170456832 + 74069 | 139648123270912 + (2 rows) + ``` + +- dbe_perf.get_global_full_sql_by_timestamp(start_timestamp timestamp, end_timestamp timestamp) + + Description: Obtains full SQL information at the instance level. + + Return type: record + + **Table 1** dbe_perf.get_global_full_sql_by_timestamp parameter description + + | Parameter | Type | Description | + | :-------------- | :-------- | :--------------------------------------- | + | start_timestamp | timestamp | Start point of the SQL start time range. | + | end_timestamp | timestamp | End point of the SQL start time range. | + +- dbe_perf.get_global_slow_sql_by_timestamp(start_timestamp timestamp, end_timestamp timestamp) + + Description: Obtains slow SQL information at the instance level. + + Return type: record + + **Table 2** dbe_perf.get_global_slow_sql_by_timestamp parameter description + + | Parameter | Type | Description | + | :-------------- | :-------- | :--------------------------------------- | + | start_timestamp | timestamp | Start point of the SQL start time range. | + | end_timestamp | timestamp | End point of the SQL start time range. | + +- statement_detail_decode(detail text, format text, pretty bool) + + Parses the details column in a full or slow SQL statement. + + **Table 3** statement_detail_decode parameter description + + | Parameter | Type | Description | + | :-------- | :--- | :----------------------------------------------------------- | + | detail | text | Set of events generated by the SQL statement (unreadable). | + | format | text | Parsing output format.
The value is **plaintext** or **json**. | + | pretty | bool | Whether to display the text in pretty format when **format** is set to **plaintext**.
The options are as follows:
- The value **true** indicates that events are separated by `\n`.
- The value **false** indicates that events are separated by commas (,). | + +- pg_list_gtt_relfrozenxids() + + Description: Displays the frozen XID of each session. + + If the value of **pid** is **0**, the earliest frozen XID of all sessions is displayed. + + Parameter: none + + Return type: record + + Example: + + ```sql + MogDB=# select * from pg_list_gtt_relfrozenxids(); + pid | relfrozenxid + -----------------+-------------- + 139648123270912 | 11151 + 139648170456832 | 11155 + 0 | 11151 + (3 rows) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/hash-function.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/hash-function.md index 657f630f..b28627d2 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/hash-function.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/hash-function.md @@ -1,594 +1,594 @@ ---- -title: Hash Function -summary: Hash Function -author: Guo Huan -date: 2021-10-28 ---- - -# Hash Function - -- bucketabstime(value, flag) - - Description: Hashes the value in the abstime format and finds the corresponding hash bucket. - - Parameter: **value** indicates the value to be converted, which is of the abstime type. **flag** is of the int type, indicating the data distribution mode. The value **0** indicates hash distribution. - - Return type: int32 - - Example: - - ```markdown - MogDB=# select bucketabstime('2011-10-01 10:10:10.112',1); - bucketabstime - --------------- - 13954 - (1 row) - ``` - -- bucketbool(value, flag) - - Description: Hashes the value in the bool format and finds the corresponding hash bucket. - - Parameter: **value** indicates the value to be converted, which is of the bool type. **flag** is of the int type, indicating the data distribution mode. The value **0** indicates hash distribution. - - Return type: int32 - - Example: - - ```sql - MogDB=# select bucketbool(true,1); - bucketbool - ------------ - 1 - (1 row) - MogDB=# select bucketbool(false,1); - bucketbool - ------------ - 0 - (1 row) - ``` - -- bucketbpchar(value, flag) - - Description: Hashes the value in the bpchar format and finds the corresponding hash bucket. - - Parameter: **value** indicates the value to be converted, which is of the bpchar type. **flag** is of the int type, indicating the data distribution mode. The value **0** indicates hash distribution. - - Return type: int32 - - Example: - - ```markdown - MogDB=# select bucketbpchar('test',1); - bucketbpchar - -------------- - 9761 - (1 row) - ``` - -- bucketbytea(value, flag) - - Description: Hashes the value in the bytea format and finds the corresponding hash bucket. - - Parameter: **value** indicates the value to be converted, which is of the bytea type. **flag** is of the int type, indicating the data distribution mode. The value **0** indicates hash distribution. - - Return type: int32 - - Example: - - ```markdown - MogDB=# select bucketbytea('test',1); - bucketbytea - ------------- - 9761 - (1 row) - ``` - -- bucketcash(value, flag) - - Description: Hashes the value in the money format and finds the corresponding hash bucket. - - Parameter: **value** indicates the value to be converted, which is of the money type. **flag** is of the int type, indicating the data distribution mode. The value **0** indicates hash distribution. - - Return type: int32 - - Example: - - ```markdown - MogDB=# select bucketcash(10::money,1); - bucketcash - ------------ - 8468 - (1 row) - ``` - -- getbucket(value, flag) - - Description: Obtains the hash bucket from the distribution column. - - **value** indicates the value to be entered, which can be of the following types: - - “char”, abstime, bigint, boolean, bytea, character varying, character, date, double precision, int2vector, integer, interval, money, name, numeric, nvarchar2, nvarchar2, oid, oidvector, raw, real, record, reltime, smalldatetime, smallint, text, time with time zone, time without time zone, timestamp with time zone, timestamp without time zone, tinyint, and uuid - - **flag** is of the int type, indicating the data distribution mode. - - Return type: integer - - Example: - - ```markdown - MogDB=# select getbucket(10,'H'); - getbucket - ----------- - 14535 - (1 row) - - MogDB=# select getbucket(11,'H'); - getbucket - ----------- - 13449 - (1 row) - - MogDB=# select getbucket(11,'R'); - getbucket - ----------- - 13449 - (1 row) - - MogDB=# select getbucket(12,'R'); - getbucket - ----------- - 9412 - (1 row) - ``` - -- hash_array(anyarray) - - Description: Hashes an array, obtains the result of an array element using the hash function, and returns the combination result. - - Parameter: data of the anyarray type - - Return type: integer - - Example: - - ```lua - MogDB=# select hash_array(ARRAY[[1,2,3],[1,2,3]]); - hash_array - ------------ - -382888479 - (1 row) - ``` - -- hash_group(key) - - Description: Calculates the hash value of each column in the Group Clause in the streaming engine. - - Parameter: **key** indicates the value of each column in the Group Clause. - - Return type: 32-bit hash value - - Example: - - ```sql - Perform the following steps in sequence. - MogDB=# CREATE TABLE tt(a int, b int,c int,d int); - NOTICE: The 'DISTRIBUTE BY' clause is not specified. Using 'a' as the distribution column by default. - HINT: Please use 'DISTRIBUTE BY' clause to specify suitable data distribution column. - CREATE TABLE - MogDB=# select * from tt; - a | b | c | d - ---+---+---+--- - (0 rows) - - MogDB=# insert into tt values(1,2,3,4); - INSERT 0 1 - MogDB=# select * from tt; - a | b | c | d - ---+---+---+--- - 1 | 2 | 3 | 4 - (1 row) - - MogDB=# insert into tt values(5,6,7,8); - INSERT 0 1 - MogDB=# select * from tt; - a | b | c | d - ---+---+---+--- - 1 | 2 | 3 | 4 - 5 | 6 | 7 | 8 - (2 rows) - - MogDB=# select hash_group(a,b) from tt where a=1 and b=2; - hash_group - ------------ - 990882385 - (1 row) - ``` - -- hash_numeric(numeric) - - Description: Calculates the hash value of numeric data. - - Parameter: data of the numeric type. - - Return type: integer - - Example: - - ```sql - MogDB=# select hash_numeric(30); - hash_numeric - -------------- - -282860963 - (1 row) - ``` - -- hash_range(anyrange) - - Description: Calculates the hash value of a range. - - Parameter: data of the anyrange type - - Return type: integer - - Example: - - ```sql - MogDB=# select hash_range(numrange(1.1,2.2)); - hash_range - ------------ - 683508754 - (1 row) - ``` - -- hashbpchar(character) - - Description: Calculates the hash value of bpchar. - - Parameter: data of the character type - - Return type: integer - - Example: - - ```sql - MogDB=# select hashbpchar('hello'); - hashbpchar - ------------- - -1870292951 - (1 row) - ``` - -- hashchar(char) - - Description: Converts char and Boolean data into hash values. - - Parameter: data of the char or bool type - - Return type: integer - - Example: - - ```sql - MogDB=# select hashbpchar('hello'); - hashbpchar - ------------- - -1870292951 - (1 row) - - MogDB=# select hashchar('true'); - hashchar - ------------ - 1686226652 - (1 row) - ``` - -- hashenum(anyenum) - - Description: Converts enumerated values to hash values. - - Parameter: data of the anyenum type - - Return type: integer - - Example: - - ```sql - MogDB=# CREATE TYPE b1 AS ENUM('good', 'bad', 'ugly'); - CREATE TYPE - MogDB=# call hashenum('good'::b1); - hashenum - ------------ - 1821213359 - (1 row) - ``` - -- hashfloat4(real) - - Description: Converts float4 values to hash values. - - Parameter: data of the real type - - Return type: integer - - Example: - - ```markdown - MogDB=# select hashfloat4(12.1234); - hashfloat4 - ------------ - 1398514061 - (1 row) - ``` - -- hashfloat8(double precision) - - Description: Converts float8 values to hash values. - - Parameter: data of the double precision type - - Return type: integer - - Example: - - ```markdown - MogDB=# select hashfloat8(123456.1234); - hashfloat8 - ------------ - 1673665593 - (1 row) - ``` - -- hashinet(inet) - - Description: Supports hashing indexes on inet or cidr. Returns the hash value of inet. - - Parameter: data of the inet type - - Return type: integer - - Example: - - ```sql - MogDB=# select hashinet('127.0.0.1'::inet); - hashinet - ------------- - -1435793109 - (1 row) - ``` - -- hashint1(tinyint) - - Description: Converts INT1 values to hash values. - - Parameter: data of the tinyint type - - Return type: uint32 - - Example: - - ```markdown - MogDB=# select hashint1(20); - hashint1 - ------------- - -2014641093 - (1 row) - ``` - -- hashint2(smallint) - - Description: Converts INT2 values to hash values. - - Parameter: data of the smallint type - - Return type: uint32 - - Example: - - ```markdown - MogDB=# select hashint2(20000); - hashint2 - ------------ - -863179081 - (1 row) - ``` - -- bucketchar - - Description: Calculates the hash value of the input parameter. - - Parameter: **char** and **integer** - - Return type: integer - -- bucketdate - - Description: Calculates the hash value of the input parameter. - - Parameters: **date** and **integer** - - Return type: integer - -- bucketfloat4 - - Description: Calculates the hash value of the input parameter. - - Parameter: **real** and **integer** - - Return type: integer - -- bucketfloat8 - - Description: Calculates the hash value of the input parameter. - - Parameters: **double precision** and **integer** - - Return type: integer - -- bucketint1 - - Description: Calculates the hash value of the input parameter. - - Parameter: **tinyint** and **integer** - - Return type: integer - -- bucketint2 - - Description: Calculates the hash value of the input parameter. - - Parameters: **smallint** and **integer** - - Return type: integer - -- bucketint2vector - - Description: Calculates the hash value of the input parameter. - - Parameter: **int2vector** and **integer** - - Return type: integer - -- bucketint4 - - Description: Calculates the hash value of the input parameter. - - Parameter: integer, integer - - Return type: integer - -- bucketint8 - - Description: Calculates the hash value of the input parameter. - - Parameter: bigint, integer - - Return type: integer - -- bucketinterval - - Description: Calculates the hash value of the input parameter. - - Parameter: interval, integer - - Return type: integer - -- bucketname - - Description: Calculates the hash value of the input parameter. - - Parameter: name, integer - - Return type: integer - -- bucketnumeric - - Description: Calculates the hash value of the input parameter. - - Parameter: numeric, integer - - Return type: integer - -- bucketnvarchar2 - - Description: Calculates the hash value of the input parameter. - - Parameter: nvarchar, nvarchar2, integer - - Return type: integer - -- bucketoid - - Description: Calculates the hash value of the input parameter. - - Parameters: oid, integer - - Return type: integer - -- bucketoidvector - - Description: Calculates the hash value of the input parameter. - - Parameter: oidvector, integer - - Return type: integer - -- bucketraw - - Description: Calculates the hash value of the input parameter. - - Parameter: raw, integer - - Return type: integer - -- bucketreltime - - Description: Calculates the hash value of the input parameter. - - Parameter: reltime, integer - - Return type: integer - -- bucketsmalldatetime - - Description: Calculates the hash value of the input parameter. - - Parameter: smalldatetime, integer - - Return type: integer - -- buckettext - - Description: Calculates the hash value of the input parameter. - - Parameter: text, integer - - Return type: integer - -- buckettime - - Description: Calculates the hash value of the input parameter. - - Parameter: time without time zone, integer - - Return type: integer - -- buckettimestamp - - Description: Calculates the hash value of the input parameter. - - Parameter: timestamp without time zone, integer - - Return type: integer - -- buckettimestamptz - - Description: Calculates the hash value of the input parameter. - - Parameter: timestamp with time zone, integer - - Return type: integer - -- buckettimetz - - Description: Calculates the hash value of the input parameter. - - Parameter: time with time zone, integer - - Return type: integer - -- bucketuuid - - Description: Calculates the hash value of the input parameter. - - Parameters: uuid, integer - - Return type: integer - -- bucketvarchar - - Description: Calculates the hash value of the input parameter. - - Parameter: character varying, integer - +--- +title: Hash Function +summary: Hash Function +author: Guo Huan +date: 2021-10-28 +--- + +# Hash Function + +- bucketabstime(value, flag) + + Description: Hashes the value in the abstime format and finds the corresponding hash bucket. + + Parameter: **value** indicates the value to be converted, which is of the abstime type. **flag** is of the int type, indicating the data distribution mode. The value **0** indicates hash distribution. + + Return type: int32 + + Example: + + ```markdown + MogDB=# select bucketabstime('2011-10-01 10:10:10.112',1); + bucketabstime + --------------- + 13954 + (1 row) + ``` + +- bucketbool(value, flag) + + Description: Hashes the value in the bool format and finds the corresponding hash bucket. + + Parameter: **value** indicates the value to be converted, which is of the bool type. **flag** is of the int type, indicating the data distribution mode. The value **0** indicates hash distribution. + + Return type: int32 + + Example: + + ```sql + MogDB=# select bucketbool(true,1); + bucketbool + ------------ + 1 + (1 row) + MogDB=# select bucketbool(false,1); + bucketbool + ------------ + 0 + (1 row) + ``` + +- bucketbpchar(value, flag) + + Description: Hashes the value in the bpchar format and finds the corresponding hash bucket. + + Parameter: **value** indicates the value to be converted, which is of the bpchar type. **flag** is of the int type, indicating the data distribution mode. The value **0** indicates hash distribution. + + Return type: int32 + + Example: + + ```markdown + MogDB=# select bucketbpchar('test',1); + bucketbpchar + -------------- + 9761 + (1 row) + ``` + +- bucketbytea(value, flag) + + Description: Hashes the value in the bytea format and finds the corresponding hash bucket. + + Parameter: **value** indicates the value to be converted, which is of the bytea type. **flag** is of the int type, indicating the data distribution mode. The value **0** indicates hash distribution. + + Return type: int32 + + Example: + + ```markdown + MogDB=# select bucketbytea('test',1); + bucketbytea + ------------- + 9761 + (1 row) + ``` + +- bucketcash(value, flag) + + Description: Hashes the value in the money format and finds the corresponding hash bucket. + + Parameter: **value** indicates the value to be converted, which is of the money type. **flag** is of the int type, indicating the data distribution mode. The value **0** indicates hash distribution. + + Return type: int32 + + Example: + + ```markdown + MogDB=# select bucketcash(10::money,1); + bucketcash + ------------ + 8468 + (1 row) + ``` + +- getbucket(value, flag) + + Description: Obtains the hash bucket from the distribution column. + + **value** indicates the value to be entered, which can be of the following types: + + “char”, abstime, bigint, boolean, bytea, character varying, character, date, double precision, int2vector, integer, interval, money, name, numeric, nvarchar2, nvarchar2, oid, oidvector, raw, real, record, reltime, smalldatetime, smallint, text, time with time zone, time without time zone, timestamp with time zone, timestamp without time zone, tinyint, and uuid + + **flag** is of the int type, indicating the data distribution mode. + + Return type: integer + + Example: + + ```markdown + MogDB=# select getbucket(10,'H'); + getbucket + ----------- + 14535 + (1 row) + + MogDB=# select getbucket(11,'H'); + getbucket + ----------- + 13449 + (1 row) + + MogDB=# select getbucket(11,'R'); + getbucket + ----------- + 13449 + (1 row) + + MogDB=# select getbucket(12,'R'); + getbucket + ----------- + 9412 + (1 row) + ``` + +- hash_array(anyarray) + + Description: Hashes an array, obtains the result of an array element using the hash function, and returns the combination result. + + Parameter: data of the anyarray type + + Return type: integer + + Example: + + ```lua + MogDB=# select hash_array(ARRAY[[1,2,3],[1,2,3]]); + hash_array + ------------ + -382888479 + (1 row) + ``` + +- hash_group(key) + + Description: Calculates the hash value of each column in the Group Clause in the streaming engine. + + Parameter: **key** indicates the value of each column in the Group Clause. + + Return type: 32-bit hash value + + Example: + + ```sql + Perform the following steps in sequence. + MogDB=# CREATE TABLE tt(a int, b int,c int,d int); + NOTICE: The 'DISTRIBUTE BY' clause is not specified. Using 'a' as the distribution column by default. + HINT: Please use 'DISTRIBUTE BY' clause to specify suitable data distribution column. + CREATE TABLE + MogDB=# select * from tt; + a | b | c | d + ---+---+---+--- + (0 rows) + + MogDB=# insert into tt values(1,2,3,4); + INSERT 0 1 + MogDB=# select * from tt; + a | b | c | d + ---+---+---+--- + 1 | 2 | 3 | 4 + (1 row) + + MogDB=# insert into tt values(5,6,7,8); + INSERT 0 1 + MogDB=# select * from tt; + a | b | c | d + ---+---+---+--- + 1 | 2 | 3 | 4 + 5 | 6 | 7 | 8 + (2 rows) + + MogDB=# select hash_group(a,b) from tt where a=1 and b=2; + hash_group + ------------ + 990882385 + (1 row) + ``` + +- hash_numeric(numeric) + + Description: Calculates the hash value of numeric data. + + Parameter: data of the numeric type. + + Return type: integer + + Example: + + ```sql + MogDB=# select hash_numeric(30); + hash_numeric + -------------- + -282860963 + (1 row) + ``` + +- hash_range(anyrange) + + Description: Calculates the hash value of a range. + + Parameter: data of the anyrange type + + Return type: integer + + Example: + + ```sql + MogDB=# select hash_range(numrange(1.1,2.2)); + hash_range + ------------ + 683508754 + (1 row) + ``` + +- hashbpchar(character) + + Description: Calculates the hash value of bpchar. + + Parameter: data of the character type + + Return type: integer + + Example: + + ```sql + MogDB=# select hashbpchar('hello'); + hashbpchar + ------------- + -1870292951 + (1 row) + ``` + +- hashchar(char) + + Description: Converts char and Boolean data into hash values. + + Parameter: data of the char or bool type + + Return type: integer + + Example: + + ```sql + MogDB=# select hashbpchar('hello'); + hashbpchar + ------------- + -1870292951 + (1 row) + + MogDB=# select hashchar('true'); + hashchar + ------------ + 1686226652 + (1 row) + ``` + +- hashenum(anyenum) + + Description: Converts enumerated values to hash values. + + Parameter: data of the anyenum type + + Return type: integer + + Example: + + ```sql + MogDB=# CREATE TYPE b1 AS ENUM('good', 'bad', 'ugly'); + CREATE TYPE + MogDB=# call hashenum('good'::b1); + hashenum + ------------ + 1821213359 + (1 row) + ``` + +- hashfloat4(real) + + Description: Converts float4 values to hash values. + + Parameter: data of the real type + + Return type: integer + + Example: + + ```markdown + MogDB=# select hashfloat4(12.1234); + hashfloat4 + ------------ + 1398514061 + (1 row) + ``` + +- hashfloat8(double precision) + + Description: Converts float8 values to hash values. + + Parameter: data of the double precision type + + Return type: integer + + Example: + + ```markdown + MogDB=# select hashfloat8(123456.1234); + hashfloat8 + ------------ + 1673665593 + (1 row) + ``` + +- hashinet(inet) + + Description: Supports hashing indexes on inet or cidr. Returns the hash value of inet. + + Parameter: data of the inet type + + Return type: integer + + Example: + + ```sql + MogDB=# select hashinet('127.0.0.1'::inet); + hashinet + ------------- + -1435793109 + (1 row) + ``` + +- hashint1(tinyint) + + Description: Converts INT1 values to hash values. + + Parameter: data of the tinyint type + + Return type: uint32 + + Example: + + ```markdown + MogDB=# select hashint1(20); + hashint1 + ------------- + -2014641093 + (1 row) + ``` + +- hashint2(smallint) + + Description: Converts INT2 values to hash values. + + Parameter: data of the smallint type + + Return type: uint32 + + Example: + + ```markdown + MogDB=# select hashint2(20000); + hashint2 + ------------ + -863179081 + (1 row) + ``` + +- bucketchar + + Description: Calculates the hash value of the input parameter. + + Parameter: **char** and **integer** + + Return type: integer + +- bucketdate + + Description: Calculates the hash value of the input parameter. + + Parameters: **date** and **integer** + + Return type: integer + +- bucketfloat4 + + Description: Calculates the hash value of the input parameter. + + Parameter: **real** and **integer** + + Return type: integer + +- bucketfloat8 + + Description: Calculates the hash value of the input parameter. + + Parameters: **double precision** and **integer** + + Return type: integer + +- bucketint1 + + Description: Calculates the hash value of the input parameter. + + Parameter: **tinyint** and **integer** + + Return type: integer + +- bucketint2 + + Description: Calculates the hash value of the input parameter. + + Parameters: **smallint** and **integer** + + Return type: integer + +- bucketint2vector + + Description: Calculates the hash value of the input parameter. + + Parameter: **int2vector** and **integer** + + Return type: integer + +- bucketint4 + + Description: Calculates the hash value of the input parameter. + + Parameter: integer, integer + + Return type: integer + +- bucketint8 + + Description: Calculates the hash value of the input parameter. + + Parameter: bigint, integer + + Return type: integer + +- bucketinterval + + Description: Calculates the hash value of the input parameter. + + Parameter: interval, integer + + Return type: integer + +- bucketname + + Description: Calculates the hash value of the input parameter. + + Parameter: name, integer + + Return type: integer + +- bucketnumeric + + Description: Calculates the hash value of the input parameter. + + Parameter: numeric, integer + + Return type: integer + +- bucketnvarchar2 + + Description: Calculates the hash value of the input parameter. + + Parameter: nvarchar, nvarchar2, integer + + Return type: integer + +- bucketoid + + Description: Calculates the hash value of the input parameter. + + Parameters: oid, integer + + Return type: integer + +- bucketoidvector + + Description: Calculates the hash value of the input parameter. + + Parameter: oidvector, integer + + Return type: integer + +- bucketraw + + Description: Calculates the hash value of the input parameter. + + Parameter: raw, integer + + Return type: integer + +- bucketreltime + + Description: Calculates the hash value of the input parameter. + + Parameter: reltime, integer + + Return type: integer + +- bucketsmalldatetime + + Description: Calculates the hash value of the input parameter. + + Parameter: smalldatetime, integer + + Return type: integer + +- buckettext + + Description: Calculates the hash value of the input parameter. + + Parameter: text, integer + + Return type: integer + +- buckettime + + Description: Calculates the hash value of the input parameter. + + Parameter: time without time zone, integer + + Return type: integer + +- buckettimestamp + + Description: Calculates the hash value of the input parameter. + + Parameter: timestamp without time zone, integer + + Return type: integer + +- buckettimestamptz + + Description: Calculates the hash value of the input parameter. + + Parameter: timestamp with time zone, integer + + Return type: integer + +- buckettimetz + + Description: Calculates the hash value of the input parameter. + + Parameter: time with time zone, integer + + Return type: integer + +- bucketuuid + + Description: Calculates the hash value of the input parameter. + + Parameters: uuid, integer + + Return type: integer + +- bucketvarchar + + Description: Calculates the hash value of the input parameter. + + Parameter: character varying, integer + Return type: integer \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/hll-functions-and-operators.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/hll-functions-and-operators.md index dd2e50fd..f91e16e4 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/hll-functions-and-operators.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/hll-functions-and-operators.md @@ -1,876 +1,876 @@ ---- -title: HLL Functions and Operators -summary: HLL Functions and Operators -author: Zhang Cuiping -date: 2021-06-15 ---- - -# HLL Functions and Operators - -## Hash Functions - -- hll_hash_boolean(bool) - - Description: Hashes data of the Boolean type. - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# SELECT hll_hash_boolean(FALSE); - hll_hash_boolean - --------------------- - -5451962507482445012 - (1 row) - ``` - -- hll_hash_boolean(bool, int32) - - Description: Configures a hash seed (that is, change the hash policy) and hashes data of the bool type. - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# SELECT hll_hash_boolean(FALSE, 10); - hll_hash_boolean - -------------------- - -1169037589280886076 - (1 row) - ``` - -- hll_hash_smallint(smallint) - - Description: Hashes data of the smallint type. - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# SELECT hll_hash_smallint(100::smallint); - hll_hash_smallint - --------------------- - 962727970174027904 - (1 row) - ``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If parameters with the same numeric value are hashed using different data types, the data will differ, because hash functions select different calculation policies for each type. - -- hll_hash_smallint(smallint, int32) - - Description: Configures a hash seed (that is, change the hash policy) and hashes data of the smallint type. - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# SELECT hll_hash_smallint(100::smallint, 10); - hll_hash_smallint - --------------------- - -9056177146160443041 - (1 row) - ``` - -- hll_hash_integer(integer) - - Description: Hashes data of the integer type. - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# SELECT hll_hash_integer(0); - hll_hash_integer - ---------------------- - 5156626420896634997 - (1 row) - ``` - -- hll_hash_integer(integer, int32) - - Description: Hashes data of the integer type and configures a hash seed (that is, change the hash policy). - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# SELECT hll_hash_integer(0, 10); - hll_hash_integer - -------------------- - -5035020264353794276 - (1 row) - ``` - -- hll_hash_bigint(bigint) - - Description: Hashes data of the bigint type. - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# SELECT hll_hash_bigint(100::bigint); - hll_hash_bigint - --------------------- - -2401963681423227794 - (1 row) - ``` - -- hll_hash_bigint(bigint, int32) - - Description: Hashes data of the bigint type and configures a hash seed (that is, change the hash policy). - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# SELECT hll_hash_bigint(100::bigint, 10); - hll_hash_bigint - --------------------- - -2305749404374433531 - (1 row) - ``` - -- hll_hash_bytea(bytea) - - Description: Hashes data of the bytea type. - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# SELECT hll_hash_bytea(E'\\x'); - hll_hash_bytea - ---------------- - 0 - (1 row) - ``` - -- hll_hash_bytea(bytea, int32) - - Description: Hashes data of the bytea type and configures a hash seed (that is, change the hash policy). - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# SELECT hll_hash_bytea(E'\\x', 10); - hll_hash_bytea - --------------------- - 7233188113542599437 - (1 row) - ``` - -- hll_hash_text(text) - - Description: Hashes data of the text type. - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# SELECT hll_hash_text('AB'); - hll_hash_text - --------------------- - -5666002586880275174 - (1 row) - ``` - -- hll_hash_text(text, int32) - - Description: Hashes data of the text type and configures a hash seed (that is, change the hash policy). - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# SELECT hll_hash_text('AB', 10); - hll_hash_text - --------------------- - -2215507121143724132 - (1 row) - ``` - -- hll_hash_any(anytype) - - Description: Hashes data of any type. - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# select hll_hash_any(1); - hll_hash_any - ---------------------- - -1316670585935156930 - (1 row) - - MogDB=# select hll_hash_any('08:00:2b:01:02:03'::macaddr); - hll_hash_any - ---------------------- - -3719950434455589360 - (1 row) - ``` - -- hll_hash_any(anytype, int32) - - Description: Hashes data of any type and configures a hash seed (that is, change the hash policy). - - Return type: hll_hashval - - Example: - - ```sql - MogDB=# select hll_hash_any(1, 10); - hll_hash_any - ---------------------- - 7048553517657992351 - (1 row) - ``` - -- hll_hashval_eq(hll_hashval, hll_hashval) - - Description: Compares two pieces of data of the **hll_hashval** type to check whether they are the same. - - Return type: Boolean - - Example: - - ```sql - MogDB=# select hll_hashval_eq(hll_hash_integer(1), hll_hash_integer(1)); - hll_hashval_eq - ---------------- - t - (1 row) - ``` - -- hll_hashval_ne(hll_hashval, hll_hashval) - - Description: Compares two pieces of data of the **hll_hashval** type to check whether they are different. - - Return type: Boolean - - Example: - - ```sql - MogDB=# select hll_hashval_ne(hll_hash_integer(1), hll_hash_integer(1)); - hll_hashval_ne - ---------------- - f - (1 row) - ``` - -## HLL Functions - -There are three HLL modes: explicit, sparse, and full. When the data size is small, the explicit mode is used. In this mode, distinct values are calculated without errors. As the number of distinct values increases, the HLL mode is switched to the sparse and full modes in sequence. The two modes have no difference in the calculation result, but vary in the calculation efficiency of HLL functions and the storage space of HLL objects. The following functions can be used to view some HLL parameters: - -- hll_print(hll) - - Description: Prints some debugging parameters of an HLL. - - Example: - - ```sql - MogDB=# select hll_print(hll_empty()); - hll_print - ------------------------------------------------------------------------------- - type=1(HLL_EMPTY), log2m=14, log2explicit=10, log2sparse=12, duplicatecheck=0 - (1 row) - ``` - -- hll_type(hll) - - Description: Checks the type of the current HLL. The return values are described as follows: **0** indicates **HLL_UNINIT**, an HLL object that is not initialized. **1** indicates **HLL_EMPTY**, an empty HLL object. **2** indicates **HLL_EXPLICIT**, an HLL object in explicit mode. **3** indicates **HLL_SPARSE**, an HLL object in sparse mode. **4** indicates **HLL_FULL**, an HLL object in full mode. **5** indicates **HLL_UNDEFINED**, an invalid HLL object. - - Example: - - ```sql - MogDB=# select hll_type(hll_empty()); - hll_type - ---------- - 1 - (1 row) - ``` - -- hll_log2m(hll) - - Description: Checks the value of **log2m** in the current HLL data structure. **log2m** is the logarithm of the number of buckets. This value affects the error rate of calculating distinct values by HLL. The error rate = ±1.04/√(2^log2m). If the value of **log2m** ranges from 10 to 16, HLL sets the number of buckets to 2log2m. When the value of **log2explicit** is explicitly set to **-1**, the built-in default value is used. - - Example: - - ```sql - MogDB=# select hll_log2m(hll_empty()); - hll_log2m - ----------- - 14 - (1 row) - - MogDB=# select hll_log2m(hll_empty(10)); - hll_log2m - ----------- - 10 - (1 row) - - MogDB=# select hll_log2m(hll_empty(-1)); - hll_log2m - ----------- - 14 - (1 row) - ``` - -- hll_log2explicit(hll) - - Description: Queries the **log2explicit** value in the current HLL data structure. Generally, the HLL changes from the explicit mode to the sparse mode and then to the full mode. This process is called the promotion hierarchy policy. You can change the value of **log2explicit** to change the policy. For example, if **log2explicit** is set to **0**, an HLL will skip the explicit mode and directly enter the sparse mode. When the value of **log2explicit** is explicitly set to a value ranging from 1 to 12, HLL will switch to the sparse mode when the length of the data segment exceeds 2log2explicit. When the value of **log2explicit** is explicitly set to **-1**, the built-in default value is used. - - Example: - - ```sql - MogDB=# select hll_log2explicit(hll_empty()); - hll_log2explicit - ------------------ - 10 - (1 row) - - MogDB=# select hll_log2explicit(hll_empty(12, 8)); - hll_log2explicit - ------------------ - 8 - (1 row) - - MogDB=# select hll_log2explicit(hll_empty(12, -1)); - hll_log2explicit - ------------------ - 10 - (1 row) - ``` - -- hll_log2sparse(hll) - - Description: Queries the value of **log2sparse** in the current HLL data structure. Generally, the HLL changes from the explicit mode to the sparse mode and then to the full mode. This process is called the promotion hierarchy policy. You can adjust the value of **log2sparse** to change the policy. For example, if the value of **log2sparse** is **0**, the system skips the sparse mode and directly enters the full mode. If the value of **log2sparse** is explicitly set to a value ranging from 1 to 14, HLL will switch to the full mode when the length of the data segment exceeds 2log2sparse. When the value of **log2sparse** is explicitly set to **-1**, the built-in default value is used. - - Example: - - ```sql - MogDB=# select hll_log2sparse(hll_empty()); - hll_log2sparse - ---------------- - 12 - (1 row) - - MogDB=# select hll_log2sparse(hll_empty(12, 8, 10)); - hll_log2sparse - ---------------- - 10 - (1 row) - - MogDB=# select hll_log2sparse(hll_empty(12, 8, -1)); - hll_log2sparse - ---------------- - 12 - (1 row) - ``` - -- hll_duplicatecheck(hll) - - Description: Specifies whether duplicate check is enabled. The value **0** indicates that it is disabled and the value **1** indicates that it is enabled. This function is disabled by default. If there are many duplicate values, you can enable this function to improve efficiency. When the value of **duplicatecheck** is explicitly set to **-1**, the built-in default value is used. - - Example: - - ```sql - MogDB=# select hll_duplicatecheck(hll_empty()); - hll_duplicatecheck - -------------------- - 0 - (1 row) - - MogDB=# select hll_duplicatecheck(hll_empty(12, 8, 10, 1)); - hll_duplicatecheck - -------------------- - 1 - (1 row) - - MogDB=# select hll_duplicatecheck(hll_empty(12, 8, 10, -1)); - hll_duplicatecheck - -------------------- - 0 - (1 row) - ``` - -## Functional Functions - -- hll_empty() - - Description: Creates an empty HLL. - - Return type: hll - - Example: - - ```sql - MogDB=# select hll_empty(); - hll_empty - ------------------------------------------------------------ - \x484c4c00000000002b05000000000000000000000000000000000000 - (1 row) - ``` - -- hll_empty(int32 log2m) - - Description: Creates an empty HLL and sets the **log2m** parameter. The parameter value ranges from 10 to 16. If the input is **-1**, the built-in default value is used. - - Return type: HLL - - Example: - - ```sql - MogDB=# select hll_empty(10); - hll_empty - ------------------------------------------------------------ - \x484c4c00000000002b04000000000000000000000000000000000000 - (1 row) - - MogDB=# select hll_empty(-1); - hll_empty - ------------------------------------------------------------ - \x484c4c00000000002b05000000000000000000000000000000000000 - (1 row) - ``` - -- hll_empty(int32 log2m, int32 log2explicit) - - Description: Creates an empty HLL and sets the **log2m** and **log2explicit** parameters in sequence. The value of **log2explicit** ranges from 0 to 12. The value **0** indicates that the explicit mode is skipped. This parameter is used to set the threshold of the explicit mode. When the length of the data segment reaches 2log2explicit, the mode is switched to the sparse or full mode. If the input is **-1**, the built-in default value of **log2explicit** is used. - - Return type: HLL - - Example: - - ```sql - MogDB=# select hll_empty(10, 4); - hll_empty - ------------------------------------------------------------ - \x484c4c00000000001304000000000000000000000000000000000000 - (1 row) - - MogDB=# select hll_empty(10, -1); - hll_empty - ------------------------------------------------------------ - \x484c4c00000000002b04000000000000000000000000000000000000 - (1 row) - ``` - -- hll_empty(int32 log2m, int32 log2explicit, int64 log2sparse) - - Description: Creates an empty HLL and sets the **log2m**, **log2explicit** and **log2sparse** parameters in sequence. The value of **log2sparse** ranges from 0 to 14. The value **0** indicates that the sparse mode is skipped. This parameter is used to set the threshold of the sparse mode. When the length of the data segment reaches 2log2sparse, the mode is switched to the full mode. If the input is **-1**, the built-in default value of **log2sparse** is used. - - Return type: HLL - - Example: - - ```sql - MogDB=# select hll_empty(10, 4, 8); - hll_empty - ------------------------------------------------------------ - \x484c4c00000000001204000000000000000000000000000000000000 - (1 row) - - MogDB=# select hll_empty(10, 4, -1); - hll_empty - ------------------------------------------------------------ - \x484c4c00000000001304000000000000000000000000000000000000 - (1 row) - ``` - -- hll_empty(int32 log2m, int32 log2explicit, int64 log2sparse, int32 duplicatecheck) - - Description: Creates an empty HLL and sets the **log2m**, **log2explicit**, **log2sparse**, and **duplicatecheck** parameters in sequence. The value of **duplicatecheck** is **0** or **1**, indicating whether the duplicate check mode is enabled. By default, this mode is disabled. If the input is **-1**, the built-in default value of **duplicatecheck** is used. - - Return type: HLL - - Example: - - ```sql - MogDB=# select hll_empty(10, 4, 8, 0); - hll_empty - ------------------------------------------------------------ - \x484c4c00000000001204000000000000000000000000000000000000 - (1 row) - - MogDB=# select hll_empty(10, 4, 8, -1); - hll_empty - ------------------------------------------------------------ - \x484c4c00000000001204000000000000000000000000000000000000 - (1 row) - ``` - -- hll_add(hll, hll_hashval) - - Description: Adds **hll_hashval** to an HLL. - - Return type: HLL - - Example: - - ```sql - MogDB=# select hll_add(hll_empty(), hll_hash_integer(1)); - hll_add - ---------------------------------------------------------------------------- - \x484c4c08000002002b0900000000000000f03f3e2921ff133fbaed3e2921ff133fbaed00 - (1 row) - ``` - -- hll_add_rev(hll_hashval, hll) - - Description: Adds **hll_hashval** to an HLL. This function works the same as **hll_add**, except that the positions of parameters are switched. - - Return type: HLL - - Example: - - ```sql - MogDB=# select hll_add_rev(hll_hash_integer(1), hll_empty()); - hll_add_rev - ---------------------------------------------------------------------------- - \x484c4c08000002002b0900000000000000f03f3e2921ff133fbaed3e2921ff133fbaed00 - (1 row) - ``` - -- hll_eq(hll, hll) - - Description: Compares two HLLs to check whether they are the same. - - Return type: Boolean - - Example: - - ```sql - MogDB=# select hll_eq(hll_add(hll_empty(), hll_hash_integer(1)), hll_add(hll_empty(), hll_hash_integer(2))); - hll_eq - -------- - f - (1 row) - ``` - -- hll_ne(hll, hll) - - Description: Compares two HLLs to check whether they are different. - - Return type: Boolean - - Example: - - ```sql - MogDB=# select hll_ne(hll_add(hll_empty(), hll_hash_integer(1)), hll_add(hll_empty(), hll_hash_integer(2))); - hll_ne - -------- - t - (1 row) - ``` - -- hll_cardinality(hll) - - Description: Calculates the number of distinct values of an HLL. - - Return type: int - - Example: - - ```sql - MogDB=# select hll_cardinality(hll_empty() || hll_hash_integer(1)); - hll_cardinality - ----------------- - 1 - (1 row) - ``` - -- hll_union(hll, hll) - - Description: Performs an UNION operation on two HLL data structures to obtain one HLL. - - Return type: HLL - - Example: - - ```sql - MogDB=# select hll_union(hll_add(hll_empty(), hll_hash_integer(1)), hll_add(hll_empty(), hll_hash_integer(2))); - hll_union - -------------------------------------------------------------------------------------------- - \x484c4c10002000002b090000000000000000400000000000000000b3ccc49320cca1ae3e2921ff133fbaed00 - (1 row) - ``` - -## Aggregate Functions - -- hll_add_agg(hll_hashval) - - Description: Groups hashed data into HLL - - Return type: HLL - - Example: - - ```sql - -- Prepare data. - MogDB=# create table t_id(id int); - MogDB=# insert into t_id values(generate_series(1,500)); - MogDB=# create table t_data(a int, c text); - MogDB=# insert into t_data select mod(id,2), id from t_id; - - -- Create a table and specify an HLL column. - MogDB=# create table t_a_c_hll(a int, c hll); - - -- Use GROUP BY on column a to group data, and insert the data to the HLL. - MogDB=# insert into t_a_c_hll select a, hll_add_agg(hll_hash_text(c)) from t_data group by a; - - -- Calculate the number of distinct values for each group in the HLL. - MogDB=# select a, #c as cardinality from t_a_c_hll order by a; - a | cardinality - ---+------------------ - 0 | 247.862354346299 - 1 | 250.908710610377 - (2 rows) - ``` - -- hll_add_agg(hll_hashval, int32 log2m) - - Description: Groups hashed data into HLL and specifies the **log2m** parameter. The value ranges from 10 to 16. If the input is **-1** or **NULL**, the built-in default value is used. - - Return type: HLL - - Example: - - ```sql - MogDB=# select hll_cardinality(hll_add_agg(hll_hash_text(c), 12)) from t_data; - hll_cardinality - ------------------ - 497.965240179228 - (1 row) - ``` - -- hll_add_agg(hll_hashval, int32 log2m, int32 log2explicit) - - Description: Groups hashed data into HLL and specifies the **log2m** and **log2explicit** parameters in sequence. The value of **log2explicit** ranges from 0 to 12. The value **0** indicates that the explicit mode is skipped. This parameter is used to set the threshold of the explicit mode. When the length of the data segment reaches 2log2explicit, the mode is switched to the sparse or full mode. If the input is **-1** or **NULL**, the built-in default value of **log2explicit** is used. - - Return type: HLL - - Example: - - ```sql - MogDB=# select hll_cardinality(hll_add_agg(hll_hash_text(c), NULL, 1)) from t_data; - hll_cardinality - ------------------ - 498.496062953313 - (1 row) - ``` - -- hll_add_agg(hll_hashval, int32 log2m, int32 log2explicit, int64 log2sparse) - - Description: Groups hashed data into HLL and sets the parameters **log2m**, **log2explicit**, and **log2sparse** in sequence. The value of **log2sparse** ranges from 0 to 14. The value **0** indicates that the sparse mode is skipped. This parameter is used to set the threshold of the sparse mode. When the length of the data segment reaches 2log2sparse, the mode is switched to the full mode. If the input is **-1** or **NULL**, the built-in default value of **log2sparse** is used. - - Return type: HLL - - Example: - - ```sql - MogDB=# select hll_cardinality(hll_add_agg(hll_hash_text(c), NULL, 6, 10)) from t_data; - hll_cardinality - ------------------ - 498.496062953313 - (1 row) - ``` - -- hll_add_agg(hll_hashval, int32 log2m, int32 log2explicit, int64 log2sparse, int32 duplicatecheck) - - Description: Groups hashed data into HLL and sets the **log2m**, **log2explicit**, **log2sparse**, and **duplicatecheck** parameters. The value of **duplicatecheck** can be **0** or **1**, indicating whether to enable this mode. By default, this mode is disabled. If the input is **-1** or **NULL**, the built-in default value of **duplicatecheck** is used. - - Return type: HLL - - Example: - - ```sql - MogDB=# select hll_cardinality(hll_add_agg(hll_hash_text(c), NULL, 6, 10, -1)) from t_data; - hll_cardinality - ------------------ - 498.496062953313 - (1 row) - ``` - -- hll_union_agg(hll) - - Description: Performs an UNION operation on multiple pieces of data of the HLL type to obtain one HLL. - - Return type: HLL - - Example: - - ```sql - -- Perform an UNION operation on data of the HLL type in each group to obtain one HLL, and calculate the number of distinct values. - MogDB=# select #hll_union_agg(c) as cardinality from t_a_c_hll; - cardinality - ------------------ - 498.496062953313 - (1 row) - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** To perform an UNION operation on data in multiple HLLs, ensure that the HLLs have the same precision. Otherwise, the UNION operation cannot be performed. This constraint also applies to the **hll_union(hll, hll)** function. - -## Obsolete Functions - -Some old HLL functions are discarded due to version upgrade. You can replace them with similar functions. - -- hll_schema_version(hll) - - Description: Checks the schema version in the current HLL. In earlier versions, the schema version is fixed at **1**, which is used to verify the header of the HLL field. After refactoring, the HLL field is added to the header for verification. The schema version is no longer used. - -- hll_regwidth(hll) - - Description: Queries the bucket size in the HLL data structure. In earlier versions, the value of **regwidth** ranges from 1 to 5, which has a large error and limits the upper limit of the cardinality estimation. After refactoring, the value of **regwidth** is fixed at **6** and the **regwidth** variable is not used. - -- hll_expthresh(hll) - - Description: Obtains the **expthresh** value in the current HLL. The **hll_log2explicit(hll)** function is used to replace similar functions. - -- hll_sparseon(hll) - - Description: Specifies whether to enable the sparse mode. Use **hll_log2sparse(hll)** to replace similar functions. The value **0** indicates that the sparse mode is disabled. - -## Built-in Functions - -HyperLogLog (HLL) has a series of built-in functions for internal data processing. Generally, users do not need to know how to use these functions. For details, see Table 1. - -**Table 1** Built-in Functions - -| Function | Description | -| :---------------- | :----------------------------------------------------------- | -| hll_in | Receives hll data in string format. | -| hll_out | Sends hll data in string format. | -| hll_recv | Receives hll data in bytea format. | -| hll_send | Sends hll data in bytea format. | -| hll_trans_in | Receives hll_trans_type data in string format. | -| hll_trans_out | Sends hll_trans_type data in string format. | -| hll_trans_recv | Receives hll_trans_type data in bytea format. | -| hll_trans_send | Sends hll_trans_type data in bytea format. | -| hll_typmod_in | Receives typmod data. | -| hll_typmod_out | Sends typmod data. | -| hll_hashval_in | Receives hll_hashval data. | -| hll_hashval_out | Sends hll_hashval data. | -| hll_add_trans0 | It is similar to **hll_add**. No input parameter is specified during initialization. It is usually used in the first phase of DNs in aggregation operations. | -| hll_add_trans1 | It is similar to **hll_add**. An input parameter is specified during initialization. It is usually used in the first phase of DNs in aggregation operations. | -| hll_add_trans2 | It is similar to **hll_add**. Two input parameters are specified during initialization. It is usually used in the first phase of DNs in aggregation operations. | -| hll_add_trans3 | It is similar to **hll_add**. Three input parameters are specified during initialization. It is usually used in the first phase of DNs in aggregation operations. | -| hll_add_trans4 | It is similar to **hll_add**. Four input parameters are specified during initialization. It is usually used in the first phase of DNs in aggregation operations. | -| hll_union_trans | It is similar to **hll_union** and is used in the first phase of DNs in aggregation operations. | -| hll_union_collect | It is similar to **hll_union** and is used in the second phase of DNs in aggregation operations to summarize the results of each DN. | -| hll_pack | It is used in the third phase of DNs in aggregation operations to convert a user-defined type hll_trans_type to the hll type. | -| hll | Converts an HLL type to another HLL type. Input parameters can be specified. | -| hll_hashval | Converts the bigint type to the **hll_hashval** type. | -| hll_hashval_int4 | Converts the int4 type to the **hll_hashval** type. | - -## Operators - -- = - - Description: Compares the values of HLL and **hll_hashval** types to check whether they are the same. - - Return type: Boolean - - Example: - - ```sql - --hll - MogDB=# select (hll_empty() || hll_hash_integer(1)) = (hll_empty() || hll_hash_integer(1)); - column - ---------- - t - (1 row) - - --hll_hashval - MogDB=# select hll_hash_integer(1) = hll_hash_integer(1); - ?column? - ---------- - t - (1 row) - ``` - -- <> or != - - Description: Compares the values of HLL and **hll_hashval** types to check whether they are different. - - Return type: Boolean - - Example: - - ```sql - --hll - MogDB=# select (hll_empty() || hll_hash_integer(1)) <> (hll_empty() || hll_hash_integer(2)); - ?column? - ---------- - t - (1 row) - - --hll_hashval - MogDB=# select hll_hash_integer(1) <> hll_hash_integer(2); - ?column? - ---------- - t - (1 row) - ``` - -- || - - Description: Represents the functions of **hll_add**, **hll_union**, and **hll_add_rev**. - - Return type: HLL - - Example: - - ```sql - --hll_add - MogDB=# select hll_empty() || hll_hash_integer(1); - ?column? - ---------------------------------------------------------------------------- - \x484c4c08000002002b0900000000000000f03f3e2921ff133fbaed3e2921ff133fbaed00 - (1 row) - - --hll_add_rev - MogDB=# select hll_hash_integer(1) || hll_empty(); - ?column? - ---------------------------------------------------------------------------- - \x484c4c08000002002b0900000000000000f03f3e2921ff133fbaed3e2921ff133fbaed00 - (1 row) - - --hll_union - MogDB=# select (hll_empty() || hll_hash_integer(1)) || (hll_empty() || hll_hash_integer(2)); - ?column? - -------------------------------------------------------------------------------------------- - \x484c4c10002000002b090000000000000000400000000000000000b3ccc49320cca1ae3e2921ff133fbaed00 - (1 row) - ``` - -- \# - - Description: Calculates the number of distinct values of an HLL. It works the same as the **hll_cardinality** function. - - Return type: int - - Example: - - ```sql - MogDB=# select #(hll_empty() || hll_hash_integer(1)); - ?column? - ---------- - 1 - (1 row) - ``` +--- +title: HLL Functions and Operators +summary: HLL Functions and Operators +author: Zhang Cuiping +date: 2021-06-15 +--- + +# HLL Functions and Operators + +## Hash Functions + +- hll_hash_boolean(bool) + + Description: Hashes data of the Boolean type. + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# SELECT hll_hash_boolean(FALSE); + hll_hash_boolean + --------------------- + -5451962507482445012 + (1 row) + ``` + +- hll_hash_boolean(bool, int32) + + Description: Configures a hash seed (that is, change the hash policy) and hashes data of the bool type. + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# SELECT hll_hash_boolean(FALSE, 10); + hll_hash_boolean + -------------------- + -1169037589280886076 + (1 row) + ``` + +- hll_hash_smallint(smallint) + + Description: Hashes data of the smallint type. + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# SELECT hll_hash_smallint(100::smallint); + hll_hash_smallint + --------------------- + 962727970174027904 + (1 row) + ``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If parameters with the same numeric value are hashed using different data types, the data will differ, because hash functions select different calculation policies for each type. + +- hll_hash_smallint(smallint, int32) + + Description: Configures a hash seed (that is, change the hash policy) and hashes data of the smallint type. + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# SELECT hll_hash_smallint(100::smallint, 10); + hll_hash_smallint + --------------------- + -9056177146160443041 + (1 row) + ``` + +- hll_hash_integer(integer) + + Description: Hashes data of the integer type. + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# SELECT hll_hash_integer(0); + hll_hash_integer + ---------------------- + 5156626420896634997 + (1 row) + ``` + +- hll_hash_integer(integer, int32) + + Description: Hashes data of the integer type and configures a hash seed (that is, change the hash policy). + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# SELECT hll_hash_integer(0, 10); + hll_hash_integer + -------------------- + -5035020264353794276 + (1 row) + ``` + +- hll_hash_bigint(bigint) + + Description: Hashes data of the bigint type. + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# SELECT hll_hash_bigint(100::bigint); + hll_hash_bigint + --------------------- + -2401963681423227794 + (1 row) + ``` + +- hll_hash_bigint(bigint, int32) + + Description: Hashes data of the bigint type and configures a hash seed (that is, change the hash policy). + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# SELECT hll_hash_bigint(100::bigint, 10); + hll_hash_bigint + --------------------- + -2305749404374433531 + (1 row) + ``` + +- hll_hash_bytea(bytea) + + Description: Hashes data of the bytea type. + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# SELECT hll_hash_bytea(E'\\x'); + hll_hash_bytea + ---------------- + 0 + (1 row) + ``` + +- hll_hash_bytea(bytea, int32) + + Description: Hashes data of the bytea type and configures a hash seed (that is, change the hash policy). + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# SELECT hll_hash_bytea(E'\\x', 10); + hll_hash_bytea + --------------------- + 7233188113542599437 + (1 row) + ``` + +- hll_hash_text(text) + + Description: Hashes data of the text type. + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# SELECT hll_hash_text('AB'); + hll_hash_text + --------------------- + -5666002586880275174 + (1 row) + ``` + +- hll_hash_text(text, int32) + + Description: Hashes data of the text type and configures a hash seed (that is, change the hash policy). + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# SELECT hll_hash_text('AB', 10); + hll_hash_text + --------------------- + -2215507121143724132 + (1 row) + ``` + +- hll_hash_any(anytype) + + Description: Hashes data of any type. + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# select hll_hash_any(1); + hll_hash_any + ---------------------- + -1316670585935156930 + (1 row) + + MogDB=# select hll_hash_any('08:00:2b:01:02:03'::macaddr); + hll_hash_any + ---------------------- + -3719950434455589360 + (1 row) + ``` + +- hll_hash_any(anytype, int32) + + Description: Hashes data of any type and configures a hash seed (that is, change the hash policy). + + Return type: hll_hashval + + Example: + + ```sql + MogDB=# select hll_hash_any(1, 10); + hll_hash_any + ---------------------- + 7048553517657992351 + (1 row) + ``` + +- hll_hashval_eq(hll_hashval, hll_hashval) + + Description: Compares two pieces of data of the **hll_hashval** type to check whether they are the same. + + Return type: Boolean + + Example: + + ```sql + MogDB=# select hll_hashval_eq(hll_hash_integer(1), hll_hash_integer(1)); + hll_hashval_eq + ---------------- + t + (1 row) + ``` + +- hll_hashval_ne(hll_hashval, hll_hashval) + + Description: Compares two pieces of data of the **hll_hashval** type to check whether they are different. + + Return type: Boolean + + Example: + + ```sql + MogDB=# select hll_hashval_ne(hll_hash_integer(1), hll_hash_integer(1)); + hll_hashval_ne + ---------------- + f + (1 row) + ``` + +## HLL Functions + +There are three HLL modes: explicit, sparse, and full. When the data size is small, the explicit mode is used. In this mode, distinct values are calculated without errors. As the number of distinct values increases, the HLL mode is switched to the sparse and full modes in sequence. The two modes have no difference in the calculation result, but vary in the calculation efficiency of HLL functions and the storage space of HLL objects. The following functions can be used to view some HLL parameters: + +- hll_print(hll) + + Description: Prints some debugging parameters of an HLL. + + Example: + + ```sql + MogDB=# select hll_print(hll_empty()); + hll_print + ------------------------------------------------------------------------------- + type=1(HLL_EMPTY), log2m=14, log2explicit=10, log2sparse=12, duplicatecheck=0 + (1 row) + ``` + +- hll_type(hll) + + Description: Checks the type of the current HLL. The return values are described as follows: **0** indicates **HLL_UNINIT**, an HLL object that is not initialized. **1** indicates **HLL_EMPTY**, an empty HLL object. **2** indicates **HLL_EXPLICIT**, an HLL object in explicit mode. **3** indicates **HLL_SPARSE**, an HLL object in sparse mode. **4** indicates **HLL_FULL**, an HLL object in full mode. **5** indicates **HLL_UNDEFINED**, an invalid HLL object. + + Example: + + ```sql + MogDB=# select hll_type(hll_empty()); + hll_type + ---------- + 1 + (1 row) + ``` + +- hll_log2m(hll) + + Description: Checks the value of **log2m** in the current HLL data structure. **log2m** is the logarithm of the number of buckets. This value affects the error rate of calculating distinct values by HLL. The error rate = ±1.04/√(2^log2m). If the value of **log2m** ranges from 10 to 16, HLL sets the number of buckets to 2log2m. When the value of **log2explicit** is explicitly set to **-1**, the built-in default value is used. + + Example: + + ```sql + MogDB=# select hll_log2m(hll_empty()); + hll_log2m + ----------- + 14 + (1 row) + + MogDB=# select hll_log2m(hll_empty(10)); + hll_log2m + ----------- + 10 + (1 row) + + MogDB=# select hll_log2m(hll_empty(-1)); + hll_log2m + ----------- + 14 + (1 row) + ``` + +- hll_log2explicit(hll) + + Description: Queries the **log2explicit** value in the current HLL data structure. Generally, the HLL changes from the explicit mode to the sparse mode and then to the full mode. This process is called the promotion hierarchy policy. You can change the value of **log2explicit** to change the policy. For example, if **log2explicit** is set to **0**, an HLL will skip the explicit mode and directly enter the sparse mode. When the value of **log2explicit** is explicitly set to a value ranging from 1 to 12, HLL will switch to the sparse mode when the length of the data segment exceeds 2log2explicit. When the value of **log2explicit** is explicitly set to **-1**, the built-in default value is used. + + Example: + + ```sql + MogDB=# select hll_log2explicit(hll_empty()); + hll_log2explicit + ------------------ + 10 + (1 row) + + MogDB=# select hll_log2explicit(hll_empty(12, 8)); + hll_log2explicit + ------------------ + 8 + (1 row) + + MogDB=# select hll_log2explicit(hll_empty(12, -1)); + hll_log2explicit + ------------------ + 10 + (1 row) + ``` + +- hll_log2sparse(hll) + + Description: Queries the value of **log2sparse** in the current HLL data structure. Generally, the HLL changes from the explicit mode to the sparse mode and then to the full mode. This process is called the promotion hierarchy policy. You can adjust the value of **log2sparse** to change the policy. For example, if the value of **log2sparse** is **0**, the system skips the sparse mode and directly enters the full mode. If the value of **log2sparse** is explicitly set to a value ranging from 1 to 14, HLL will switch to the full mode when the length of the data segment exceeds 2log2sparse. When the value of **log2sparse** is explicitly set to **-1**, the built-in default value is used. + + Example: + + ```sql + MogDB=# select hll_log2sparse(hll_empty()); + hll_log2sparse + ---------------- + 12 + (1 row) + + MogDB=# select hll_log2sparse(hll_empty(12, 8, 10)); + hll_log2sparse + ---------------- + 10 + (1 row) + + MogDB=# select hll_log2sparse(hll_empty(12, 8, -1)); + hll_log2sparse + ---------------- + 12 + (1 row) + ``` + +- hll_duplicatecheck(hll) + + Description: Specifies whether duplicate check is enabled. The value **0** indicates that it is disabled and the value **1** indicates that it is enabled. This function is disabled by default. If there are many duplicate values, you can enable this function to improve efficiency. When the value of **duplicatecheck** is explicitly set to **-1**, the built-in default value is used. + + Example: + + ```sql + MogDB=# select hll_duplicatecheck(hll_empty()); + hll_duplicatecheck + -------------------- + 0 + (1 row) + + MogDB=# select hll_duplicatecheck(hll_empty(12, 8, 10, 1)); + hll_duplicatecheck + -------------------- + 1 + (1 row) + + MogDB=# select hll_duplicatecheck(hll_empty(12, 8, 10, -1)); + hll_duplicatecheck + -------------------- + 0 + (1 row) + ``` + +## Functional Functions + +- hll_empty() + + Description: Creates an empty HLL. + + Return type: hll + + Example: + + ```sql + MogDB=# select hll_empty(); + hll_empty + ------------------------------------------------------------ + \x484c4c00000000002b05000000000000000000000000000000000000 + (1 row) + ``` + +- hll_empty(int32 log2m) + + Description: Creates an empty HLL and sets the **log2m** parameter. The parameter value ranges from 10 to 16. If the input is **-1**, the built-in default value is used. + + Return type: HLL + + Example: + + ```sql + MogDB=# select hll_empty(10); + hll_empty + ------------------------------------------------------------ + \x484c4c00000000002b04000000000000000000000000000000000000 + (1 row) + + MogDB=# select hll_empty(-1); + hll_empty + ------------------------------------------------------------ + \x484c4c00000000002b05000000000000000000000000000000000000 + (1 row) + ``` + +- hll_empty(int32 log2m, int32 log2explicit) + + Description: Creates an empty HLL and sets the **log2m** and **log2explicit** parameters in sequence. The value of **log2explicit** ranges from 0 to 12. The value **0** indicates that the explicit mode is skipped. This parameter is used to set the threshold of the explicit mode. When the length of the data segment reaches 2log2explicit, the mode is switched to the sparse or full mode. If the input is **-1**, the built-in default value of **log2explicit** is used. + + Return type: HLL + + Example: + + ```sql + MogDB=# select hll_empty(10, 4); + hll_empty + ------------------------------------------------------------ + \x484c4c00000000001304000000000000000000000000000000000000 + (1 row) + + MogDB=# select hll_empty(10, -1); + hll_empty + ------------------------------------------------------------ + \x484c4c00000000002b04000000000000000000000000000000000000 + (1 row) + ``` + +- hll_empty(int32 log2m, int32 log2explicit, int64 log2sparse) + + Description: Creates an empty HLL and sets the **log2m**, **log2explicit** and **log2sparse** parameters in sequence. The value of **log2sparse** ranges from 0 to 14. The value **0** indicates that the sparse mode is skipped. This parameter is used to set the threshold of the sparse mode. When the length of the data segment reaches 2log2sparse, the mode is switched to the full mode. If the input is **-1**, the built-in default value of **log2sparse** is used. + + Return type: HLL + + Example: + + ```sql + MogDB=# select hll_empty(10, 4, 8); + hll_empty + ------------------------------------------------------------ + \x484c4c00000000001204000000000000000000000000000000000000 + (1 row) + + MogDB=# select hll_empty(10, 4, -1); + hll_empty + ------------------------------------------------------------ + \x484c4c00000000001304000000000000000000000000000000000000 + (1 row) + ``` + +- hll_empty(int32 log2m, int32 log2explicit, int64 log2sparse, int32 duplicatecheck) + + Description: Creates an empty HLL and sets the **log2m**, **log2explicit**, **log2sparse**, and **duplicatecheck** parameters in sequence. The value of **duplicatecheck** is **0** or **1**, indicating whether the duplicate check mode is enabled. By default, this mode is disabled. If the input is **-1**, the built-in default value of **duplicatecheck** is used. + + Return type: HLL + + Example: + + ```sql + MogDB=# select hll_empty(10, 4, 8, 0); + hll_empty + ------------------------------------------------------------ + \x484c4c00000000001204000000000000000000000000000000000000 + (1 row) + + MogDB=# select hll_empty(10, 4, 8, -1); + hll_empty + ------------------------------------------------------------ + \x484c4c00000000001204000000000000000000000000000000000000 + (1 row) + ``` + +- hll_add(hll, hll_hashval) + + Description: Adds **hll_hashval** to an HLL. + + Return type: HLL + + Example: + + ```sql + MogDB=# select hll_add(hll_empty(), hll_hash_integer(1)); + hll_add + ---------------------------------------------------------------------------- + \x484c4c08000002002b0900000000000000f03f3e2921ff133fbaed3e2921ff133fbaed00 + (1 row) + ``` + +- hll_add_rev(hll_hashval, hll) + + Description: Adds **hll_hashval** to an HLL. This function works the same as **hll_add**, except that the positions of parameters are switched. + + Return type: HLL + + Example: + + ```sql + MogDB=# select hll_add_rev(hll_hash_integer(1), hll_empty()); + hll_add_rev + ---------------------------------------------------------------------------- + \x484c4c08000002002b0900000000000000f03f3e2921ff133fbaed3e2921ff133fbaed00 + (1 row) + ``` + +- hll_eq(hll, hll) + + Description: Compares two HLLs to check whether they are the same. + + Return type: Boolean + + Example: + + ```sql + MogDB=# select hll_eq(hll_add(hll_empty(), hll_hash_integer(1)), hll_add(hll_empty(), hll_hash_integer(2))); + hll_eq + -------- + f + (1 row) + ``` + +- hll_ne(hll, hll) + + Description: Compares two HLLs to check whether they are different. + + Return type: Boolean + + Example: + + ```sql + MogDB=# select hll_ne(hll_add(hll_empty(), hll_hash_integer(1)), hll_add(hll_empty(), hll_hash_integer(2))); + hll_ne + -------- + t + (1 row) + ``` + +- hll_cardinality(hll) + + Description: Calculates the number of distinct values of an HLL. + + Return type: int + + Example: + + ```sql + MogDB=# select hll_cardinality(hll_empty() || hll_hash_integer(1)); + hll_cardinality + ----------------- + 1 + (1 row) + ``` + +- hll_union(hll, hll) + + Description: Performs an UNION operation on two HLL data structures to obtain one HLL. + + Return type: HLL + + Example: + + ```sql + MogDB=# select hll_union(hll_add(hll_empty(), hll_hash_integer(1)), hll_add(hll_empty(), hll_hash_integer(2))); + hll_union + -------------------------------------------------------------------------------------------- + \x484c4c10002000002b090000000000000000400000000000000000b3ccc49320cca1ae3e2921ff133fbaed00 + (1 row) + ``` + +## Aggregate Functions + +- hll_add_agg(hll_hashval) + + Description: Groups hashed data into HLL + + Return type: HLL + + Example: + + ```sql + -- Prepare data. + MogDB=# create table t_id(id int); + MogDB=# insert into t_id values(generate_series(1,500)); + MogDB=# create table t_data(a int, c text); + MogDB=# insert into t_data select mod(id,2), id from t_id; + + -- Create a table and specify an HLL column. + MogDB=# create table t_a_c_hll(a int, c hll); + + -- Use GROUP BY on column a to group data, and insert the data to the HLL. + MogDB=# insert into t_a_c_hll select a, hll_add_agg(hll_hash_text(c)) from t_data group by a; + + -- Calculate the number of distinct values for each group in the HLL. + MogDB=# select a, #c as cardinality from t_a_c_hll order by a; + a | cardinality + ---+------------------ + 0 | 247.862354346299 + 1 | 250.908710610377 + (2 rows) + ``` + +- hll_add_agg(hll_hashval, int32 log2m) + + Description: Groups hashed data into HLL and specifies the **log2m** parameter. The value ranges from 10 to 16. If the input is **-1** or **NULL**, the built-in default value is used. + + Return type: HLL + + Example: + + ```sql + MogDB=# select hll_cardinality(hll_add_agg(hll_hash_text(c), 12)) from t_data; + hll_cardinality + ------------------ + 497.965240179228 + (1 row) + ``` + +- hll_add_agg(hll_hashval, int32 log2m, int32 log2explicit) + + Description: Groups hashed data into HLL and specifies the **log2m** and **log2explicit** parameters in sequence. The value of **log2explicit** ranges from 0 to 12. The value **0** indicates that the explicit mode is skipped. This parameter is used to set the threshold of the explicit mode. When the length of the data segment reaches 2log2explicit, the mode is switched to the sparse or full mode. If the input is **-1** or **NULL**, the built-in default value of **log2explicit** is used. + + Return type: HLL + + Example: + + ```sql + MogDB=# select hll_cardinality(hll_add_agg(hll_hash_text(c), NULL, 1)) from t_data; + hll_cardinality + ------------------ + 498.496062953313 + (1 row) + ``` + +- hll_add_agg(hll_hashval, int32 log2m, int32 log2explicit, int64 log2sparse) + + Description: Groups hashed data into HLL and sets the parameters **log2m**, **log2explicit**, and **log2sparse** in sequence. The value of **log2sparse** ranges from 0 to 14. The value **0** indicates that the sparse mode is skipped. This parameter is used to set the threshold of the sparse mode. When the length of the data segment reaches 2log2sparse, the mode is switched to the full mode. If the input is **-1** or **NULL**, the built-in default value of **log2sparse** is used. + + Return type: HLL + + Example: + + ```sql + MogDB=# select hll_cardinality(hll_add_agg(hll_hash_text(c), NULL, 6, 10)) from t_data; + hll_cardinality + ------------------ + 498.496062953313 + (1 row) + ``` + +- hll_add_agg(hll_hashval, int32 log2m, int32 log2explicit, int64 log2sparse, int32 duplicatecheck) + + Description: Groups hashed data into HLL and sets the **log2m**, **log2explicit**, **log2sparse**, and **duplicatecheck** parameters. The value of **duplicatecheck** can be **0** or **1**, indicating whether to enable this mode. By default, this mode is disabled. If the input is **-1** or **NULL**, the built-in default value of **duplicatecheck** is used. + + Return type: HLL + + Example: + + ```sql + MogDB=# select hll_cardinality(hll_add_agg(hll_hash_text(c), NULL, 6, 10, -1)) from t_data; + hll_cardinality + ------------------ + 498.496062953313 + (1 row) + ``` + +- hll_union_agg(hll) + + Description: Performs an UNION operation on multiple pieces of data of the HLL type to obtain one HLL. + + Return type: HLL + + Example: + + ```sql + -- Perform an UNION operation on data of the HLL type in each group to obtain one HLL, and calculate the number of distinct values. + MogDB=# select #hll_union_agg(c) as cardinality from t_a_c_hll; + cardinality + ------------------ + 498.496062953313 + (1 row) + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** To perform an UNION operation on data in multiple HLLs, ensure that the HLLs have the same precision. Otherwise, the UNION operation cannot be performed. This constraint also applies to the **hll_union(hll, hll)** function. + +## Obsolete Functions + +Some old HLL functions are discarded due to version upgrade. You can replace them with similar functions. + +- hll_schema_version(hll) + + Description: Checks the schema version in the current HLL. In earlier versions, the schema version is fixed at **1**, which is used to verify the header of the HLL field. After refactoring, the HLL field is added to the header for verification. The schema version is no longer used. + +- hll_regwidth(hll) + + Description: Queries the bucket size in the HLL data structure. In earlier versions, the value of **regwidth** ranges from 1 to 5, which has a large error and limits the upper limit of the cardinality estimation. After refactoring, the value of **regwidth** is fixed at **6** and the **regwidth** variable is not used. + +- hll_expthresh(hll) + + Description: Obtains the **expthresh** value in the current HLL. The **hll_log2explicit(hll)** function is used to replace similar functions. + +- hll_sparseon(hll) + + Description: Specifies whether to enable the sparse mode. Use **hll_log2sparse(hll)** to replace similar functions. The value **0** indicates that the sparse mode is disabled. + +## Built-in Functions + +HyperLogLog (HLL) has a series of built-in functions for internal data processing. Generally, users do not need to know how to use these functions. For details, see Table 1. + +**Table 1** Built-in Functions + +| Function | Description | +| :---------------- | :----------------------------------------------------------- | +| hll_in | Receives hll data in string format. | +| hll_out | Sends hll data in string format. | +| hll_recv | Receives hll data in bytea format. | +| hll_send | Sends hll data in bytea format. | +| hll_trans_in | Receives hll_trans_type data in string format. | +| hll_trans_out | Sends hll_trans_type data in string format. | +| hll_trans_recv | Receives hll_trans_type data in bytea format. | +| hll_trans_send | Sends hll_trans_type data in bytea format. | +| hll_typmod_in | Receives typmod data. | +| hll_typmod_out | Sends typmod data. | +| hll_hashval_in | Receives hll_hashval data. | +| hll_hashval_out | Sends hll_hashval data. | +| hll_add_trans0 | It is similar to **hll_add**. No input parameter is specified during initialization. It is usually used in the first phase of DNs in aggregation operations. | +| hll_add_trans1 | It is similar to **hll_add**. An input parameter is specified during initialization. It is usually used in the first phase of DNs in aggregation operations. | +| hll_add_trans2 | It is similar to **hll_add**. Two input parameters are specified during initialization. It is usually used in the first phase of DNs in aggregation operations. | +| hll_add_trans3 | It is similar to **hll_add**. Three input parameters are specified during initialization. It is usually used in the first phase of DNs in aggregation operations. | +| hll_add_trans4 | It is similar to **hll_add**. Four input parameters are specified during initialization. It is usually used in the first phase of DNs in aggregation operations. | +| hll_union_trans | It is similar to **hll_union** and is used in the first phase of DNs in aggregation operations. | +| hll_union_collect | It is similar to **hll_union** and is used in the second phase of DNs in aggregation operations to summarize the results of each DN. | +| hll_pack | It is used in the third phase of DNs in aggregation operations to convert a user-defined type hll_trans_type to the hll type. | +| hll | Converts an HLL type to another HLL type. Input parameters can be specified. | +| hll_hashval | Converts the bigint type to the **hll_hashval** type. | +| hll_hashval_int4 | Converts the int4 type to the **hll_hashval** type. | + +## Operators + +- = + + Description: Compares the values of HLL and **hll_hashval** types to check whether they are the same. + + Return type: Boolean + + Example: + + ```sql + --hll + MogDB=# select (hll_empty() || hll_hash_integer(1)) = (hll_empty() || hll_hash_integer(1)); + column + ---------- + t + (1 row) + + --hll_hashval + MogDB=# select hll_hash_integer(1) = hll_hash_integer(1); + ?column? + ---------- + t + (1 row) + ``` + +- <> or != + + Description: Compares the values of HLL and **hll_hashval** types to check whether they are different. + + Return type: Boolean + + Example: + + ```sql + --hll + MogDB=# select (hll_empty() || hll_hash_integer(1)) <> (hll_empty() || hll_hash_integer(2)); + ?column? + ---------- + t + (1 row) + + --hll_hashval + MogDB=# select hll_hash_integer(1) <> hll_hash_integer(2); + ?column? + ---------- + t + (1 row) + ``` + +- || + + Description: Represents the functions of **hll_add**, **hll_union**, and **hll_add_rev**. + + Return type: HLL + + Example: + + ```sql + --hll_add + MogDB=# select hll_empty() || hll_hash_integer(1); + ?column? + ---------------------------------------------------------------------------- + \x484c4c08000002002b0900000000000000f03f3e2921ff133fbaed3e2921ff133fbaed00 + (1 row) + + --hll_add_rev + MogDB=# select hll_hash_integer(1) || hll_empty(); + ?column? + ---------------------------------------------------------------------------- + \x484c4c08000002002b0900000000000000f03f3e2921ff133fbaed3e2921ff133fbaed00 + (1 row) + + --hll_union + MogDB=# select (hll_empty() || hll_hash_integer(1)) || (hll_empty() || hll_hash_integer(2)); + ?column? + -------------------------------------------------------------------------------------------- + \x484c4c10002000002b090000000000000000400000000000000000b3ccc49320cca1ae3e2921ff133fbaed00 + (1 row) + ``` + +- \# + + Description: Calculates the number of distinct values of an HLL. It works the same as the **hll_cardinality** function. + + Return type: int + + Example: + + ```sql + MogDB=# select #(hll_empty() || hll_hash_integer(1)); + ?column? + ---------- + 1 + (1 row) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/internal-functions/internal-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/internal-functions/internal-functions.md index b16fbf5e..48b6bf75 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/internal-functions/internal-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/internal-functions/internal-functions.md @@ -1,12 +1,12 @@ ---- -title: Internal Functions -summary: Internal Functions -author: Zhang Cuiping -date: 2021-06-07 ---- - -# Internal Functions - -- **[Internal Functions (1)](internal-functions-1.md)** - +--- +title: Internal Functions +summary: Internal Functions +author: Zhang Cuiping +date: 2021-06-07 +--- + +# Internal Functions + +- **[Internal Functions (1)](internal-functions-1.md)** + - **[Internal Functions (2)](internal-functions-2.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/ledger-database-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/ledger-database-functions.md index bd933970..d849b2de 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/ledger-database-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/ledger-database-functions.md @@ -1,96 +1,96 @@ ---- -title: Ledger Database Functions -summary: Ledger Database Functions -author: Guo Huan -date: 2021-10-28 ---- - -# Ledger Database Functions - -- get_dn_hist_relhash(text, text) - - Description: Returns the hash value of table-level data in a specified tamper-proof user table. This function is used only in distributed mode. - - Parameter type: text - - Return type: hash16 - -- ledger_hist_check(text, text) - - Description: Verifies the consistency between the hash value of table-level data in a specified tamper-proof user table and that in the corresponding history table. - - Parameter type: text - - Return type: Boolean - -- ledger_hist_repair(text, text) - - Description: Restores the hash value of the history table corresponding to the specified tamper-proof user table to be the same as that of the user table, and returns the hash difference. - - Parameter type: text - - Return type: hash16 - -- ledger_hist_archive(text, text) - - Description: Archives the history table corresponding to a specified tamper-proof user table to the **hist_back** folder in the audit log directory. - - Parameter type: text - - Return type: Boolean - -- ledger_gchain_check(text, text) - - Description: Verifies the consistency between the history table hash corresponding to the specified tamper-proof user table and the **relhash** corresponding to the global history table. - - Parameter type: text - - Return type: Boolean - -- ledger_gchain_repair(text, text) - - Description: Restores **relhash** of a specified tamper-proof user table in the global history table so that the hash is the same as that in the history table, and returns the hash difference. - - Parameter type: text - - Return type: hash16 - -- ledger_gchain_archive(void) - - Description: Archives global history tables to the **hist_back** folder in the audit log directory. - - Parameter type: void - - Return type: Boolean - -- hash16in(cstring) - - Description: Converts the input hexadecimal string into the internal hash16 format. - - Parameter type: cstring - - Return type: hash16 - -- hash16out(hash16) - - Description: Converts internal hash16 data to hexadecimal cstring data. - - Parameter type: hash16 - - Return type: cstring - -- hash32in(cstring) - - Description: Converts the input hexadecimal string (32 characters) into the internal type hash32. - - Parameter type: cstring - - Return type: hash32 - -- hash32out(hash32) - - Description: Converts internal hash32 data to hexadecimal cstring data. - - Parameter type: cstring - - Return type: hash32 +--- +title: Ledger Database Functions +summary: Ledger Database Functions +author: Guo Huan +date: 2021-10-28 +--- + +# Ledger Database Functions + +- get_dn_hist_relhash(text, text) + + Description: Returns the hash value of table-level data in a specified tamper-proof user table. This function is used only in distributed mode. + + Parameter type: text + + Return type: hash16 + +- ledger_hist_check(text, text) + + Description: Verifies the consistency between the hash value of table-level data in a specified tamper-proof user table and that in the corresponding history table. + + Parameter type: text + + Return type: Boolean + +- ledger_hist_repair(text, text) + + Description: Restores the hash value of the history table corresponding to the specified tamper-proof user table to be the same as that of the user table, and returns the hash difference. + + Parameter type: text + + Return type: hash16 + +- ledger_hist_archive(text, text) + + Description: Archives the history table corresponding to a specified tamper-proof user table to the **hist_back** folder in the audit log directory. + + Parameter type: text + + Return type: Boolean + +- ledger_gchain_check(text, text) + + Description: Verifies the consistency between the history table hash corresponding to the specified tamper-proof user table and the **relhash** corresponding to the global history table. + + Parameter type: text + + Return type: Boolean + +- ledger_gchain_repair(text, text) + + Description: Restores **relhash** of a specified tamper-proof user table in the global history table so that the hash is the same as that in the history table, and returns the hash difference. + + Parameter type: text + + Return type: hash16 + +- ledger_gchain_archive(void) + + Description: Archives global history tables to the **hist_back** folder in the audit log directory. + + Parameter type: void + + Return type: Boolean + +- hash16in(cstring) + + Description: Converts the input hexadecimal string into the internal hash16 format. + + Parameter type: cstring + + Return type: hash16 + +- hash16out(hash16) + + Description: Converts internal hash16 data to hexadecimal cstring data. + + Parameter type: hash16 + + Return type: cstring + +- hash32in(cstring) + + Description: Converts the input hexadecimal string (32 characters) into the internal type hash32. + + Parameter type: cstring + + Return type: hash32 + +- hash32out(hash32) + + Description: Converts internal hash32 data to hexadecimal cstring data. + + Parameter type: cstring + + Return type: hash32 diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/logical-operators.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/logical-operators.md index 58501eff..abde7b03 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/logical-operators.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/logical-operators.md @@ -1,26 +1,26 @@ ---- -title: Logical Operators -summary: Logical Operators -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Logical Operators - -The usual logical operators include AND, OR, and NOT. SQL uses a three-valued logical system with true, false, and null, which represents "unknown". Their priorities are NOT > AND > OR. - -[Table 1](#Operation rules) lists the calculation rules, where a and b represent logical expressions. - -**Table 1** Operation rules - -| a | b | a **AND** b Result | a **OR** b Result | **NOT** a Result | -| :---- | :---- | :----------------- | :---------------- | :--------------- | -| TRUE | TRUE | TRUE | TRUE | FALSE | -| TRUE | FALSE | FALSE | TRUE | FALSE | -| TRUE | NULL | NULL | TRUE | FALSE | -| FALSE | FALSE | FALSE | FALSE | TRUE | -| FALSE | NULL | FALSE | NULL | TRUE | -| NULL | NULL | NULL | NULL | NULL | - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> The operators AND and OR are commutative, that is, you can switch the left and right operand without affecting the result. +--- +title: Logical Operators +summary: Logical Operators +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Logical Operators + +The usual logical operators include AND, OR, and NOT. SQL uses a three-valued logical system with true, false, and null, which represents "unknown". Their priorities are NOT > AND > OR. + +[Table 1](#Operation rules) lists the calculation rules, where a and b represent logical expressions. + +**Table 1** Operation rules + +| a | b | a **AND** b Result | a **OR** b Result | **NOT** a Result | +| :---- | :---- | :----------------- | :---------------- | :--------------- | +| TRUE | TRUE | TRUE | TRUE | FALSE | +| TRUE | FALSE | FALSE | TRUE | FALSE | +| TRUE | NULL | NULL | TRUE | FALSE | +| FALSE | FALSE | FALSE | FALSE | TRUE | +| FALSE | NULL | FALSE | NULL | TRUE | +| NULL | NULL | NULL | NULL | NULL | + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> The operators AND and OR are commutative, that is, you can switch the left and right operand without affecting the result. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/network-address-functions-and-operators.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/network-address-functions-and-operators.md index 164811e7..9d3e1d45 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/network-address-functions-and-operators.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/network-address-functions-and-operators.md @@ -1,456 +1,456 @@ ---- -title: Network Address Functions and Operators -summary: Network Address Functions and Operators -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Network Address Functions and Operators - -## cidr and inet Operators - -The operators **<<**, **<<=**, **>>**, and **>>=** test for subnet inclusion. They consider only the network parts of the two addresses (ignoring any host part) and determine whether one network is identical to or a subnet of the other. - -- < - - Description: Is less than - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1.5' < inet '192.168.1.6' AS RESULT; - result - -------- - t - (1 row) - ``` - -- <= - - Description: Is less than or equals - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1.5' <= inet '192.168.1.5' AS RESULT; - result - -------- - t - (1 row) - ``` - -- = - - Description: Equals - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1.5' = inet '192.168.1.5' AS RESULT; - result - -------- - t - (1 row) - ``` - -- >= - - Description: Is greater than or equals - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1.5' >= inet '192.168.1.5' AS RESULT; - result - -------- - t - (1 row) - ``` - -- > - - Description: Is greater than - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1.5' > inet '192.168.1.4' AS RESULT; - result - -------- - t - (1 row) - ``` - -- <> - - Description: Does not equal to - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1.5' <> inet '192.168.1.4' AS RESULT; - result - -------- - t - (1 row) - ``` - -- << - - Description: Is contained in - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1.5' << inet '192.168.1/24' AS RESULT; - result - -------- - t - (1 row) - ``` - -- <<= - - Description: Is contained in or equals - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1/24' <<= inet '192.168.1/24' AS RESULT; - result - -------- - t - (1 row) - ``` - -- >> - - Description: Contains - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1/24' >> inet '192.168.1.5' AS RESULT; - result - -------- - t - (1 row) - ``` - -- >>= - - Description: Contains or equals - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1/24' >>= inet '192.168.1/24' AS RESULT; - result - -------- - t - (1 row) - ``` - -- ~ - - Description: Bitwise NOT - - Example: - - ```sql - MogDB=# SELECT ~ inet '192.168.1.6' AS RESULT; - result - --------------- - 63.87.254.249 - (1 row) - ``` - -- & - - Description: Performs an AND operation on each bit of the two network addresses. - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1.6' & inet '10.0.0.0' AS RESULT; - result - --------- - 0.0.0.0 - (1 row) - ``` - -- | - - Description: Performs an OR operation on each bit of the two network addresses. - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1.6' | inet '10.0.0.0' AS RESULT; - result - ------------- - 202.168.1.6 - (1 row) - ``` - -- \+ - - Description: Addition - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1.6' + 25 AS RESULT; - result - -------------- - 192.168.1.31 - (1 row) - ``` - -- \- - - Description: Subtraction - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1.43' - 36 AS RESULT; - result - ------------- - 192.168.1.7 - (1 row) - ``` - -- \- - - Description: Subtraction - - Example: - - ```sql - MogDB=# SELECT inet '192.168.1.43' - inet '192.168.1.19' AS RESULT; - result - -------- - 24 - (1 row) - ``` - -## cidr and inet Functions - -The **abbrev**, **host**, and **text** functions are primarily intended to offer alternative display formats. - -- abbrev(inet) - - Description: Abbreviated display format as text - - Return type: text - - Example: - - ```sql - MogDB=# SELECT abbrev(inet '10.1.0.0/16') AS RESULT; - result - ------------- - 10.1.0.0/16 - (1 row) - ``` - -- abbrev(cidr) - - Description: Abbreviated display format as text - - Return type: text - - Example: - - ```sql - MogDB=# SELECT abbrev(cidr '10.1.0.0/16') AS RESULT; - result - --------- - 10.1/16 - (1 row) - ``` - -- broadcast(inet) - - Description: Broadcast address for networks - - Return type: inet - - Example: - - ```sql - MogDB=# SELECT broadcast('192.168.1.5/24') AS RESULT; - result - ------------------ - 192.168.1.255/24 - (1 row) - ``` - -- family(inet) - - Description: Extracts family of addresses, **4** for IPv4, and **6** for IPv6. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT family('127.0.0.1') AS RESULT; - result - -------- - 4 - (1 row) - ``` - -- host(inet) - - Description: Extracts IP addresses as text. - - Return type: text - - Example: - - ```sql - MogDB=# SELECT host('192.168.1.5/24') AS RESULT; - result - ------------- - 192.168.1.5 - (1 row) - ``` - -- hostmask(inet) - - Description: Constructs the host mask for a network. - - Return type: inet - - Example: - - ```sql - MogDB=# SELECT hostmask('192.168.23.20/30') AS RESULT; - result - --------- - 0.0.0.3 - (1 row) - ``` - -- masklen(inet) - - Description: Extracts subnet mask length. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT masklen('192.168.1.5/24') AS RESULT; - result - -------- - 24 - (1 row) - ``` - -- netmask(inet) - - Description: Constructs the subnet mask for a network. - - Return type: inet - - Example: - - ```sql - MogDB=# SELECT netmask('192.168.1.5/24') AS RESULT; - result - --------------- - 255.255.255.0 - (1 row) - ``` - -- network(inet) - - Description: Extracts the network part of an address. - - Return type: cidr - - Example: - - ```sql - MogDB=# SELECT network('192.168.1.5/24') AS RESULT; - result - ---------------- - 192.168.1.0/24 - (1 row) - ``` - -- set_masklen(inet, int) - - Description: Sets subnet mask length for the **inet** value. - - Return type: inet - - Example: - - ```sql - MogDB=# SELECT set_masklen('192.168.1.5/24', 16) AS RESULT; - result - ---------------- - 192.168.1.5/16 - (1 row) - ``` - -- set_masklen(cidr, int) - - Description: Sets subnet mask length for the **cidr** value. - - Return type: cidr - - Example: - - ```sql - MogDB=# SELECT set_masklen('192.168.1.0/24'::cidr, 16) AS RESULT; - result - ---------------- - 192.168.0.0/16 - (1 row) - ``` - -- text(inet) - - Description: Extracts IP addresses and subnet mask length as text. - - Return type: text - - Example: - - ```sql - MogDB=# SELECT text(inet '192.168.1.5') AS RESULT; - result - ---------------- - 192.168.1.5/32 - (1 row) - ``` - -Any **cidr** value can be cast to **inet** implicitly or explicitly; therefore, the functions shown above as operating on **inet** also work on **cidr** values. An **inet** value can be cast to **cidr**. After the conversion, any bits to the right of the subnet mask are silently zeroed to create a valid **cidr** value. In addition, you can cast a text string to **inet** or **cidr** using normal casting syntax. For example, **inet(expression)** or **colname::cidr**. - -## macaddr Functions - -The function **trunc(macaddr)** returns a MAC address with the last 3 bytes set to zero. - -- trunc(macaddr) - - Description: Sets last 3 bytes to zero. - - Return type: macaddr - - Example: - -```sql -MogDB=# SELECT trunc(macaddr '12:34:56:78:90:ab') AS RESULT; - result -------------------- - 12:34:56:00:00:00 -(1 row) -``` - -The **macaddr** type also supports the standard relational operators (such as **>** and **<=**) for lexicographical ordering, and the bitwise arithmetic operators (**~**, **&** and **|**) for NOT, AND and OR. +--- +title: Network Address Functions and Operators +summary: Network Address Functions and Operators +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Network Address Functions and Operators + +## cidr and inet Operators + +The operators **<<**, **<<=**, **>>**, and **>>=** test for subnet inclusion. They consider only the network parts of the two addresses (ignoring any host part) and determine whether one network is identical to or a subnet of the other. + +- < + + Description: Is less than + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1.5' < inet '192.168.1.6' AS RESULT; + result + -------- + t + (1 row) + ``` + +- <= + + Description: Is less than or equals + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1.5' <= inet '192.168.1.5' AS RESULT; + result + -------- + t + (1 row) + ``` + +- = + + Description: Equals + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1.5' = inet '192.168.1.5' AS RESULT; + result + -------- + t + (1 row) + ``` + +- >= + + Description: Is greater than or equals + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1.5' >= inet '192.168.1.5' AS RESULT; + result + -------- + t + (1 row) + ``` + +- > + + Description: Is greater than + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1.5' > inet '192.168.1.4' AS RESULT; + result + -------- + t + (1 row) + ``` + +- <> + + Description: Does not equal to + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1.5' <> inet '192.168.1.4' AS RESULT; + result + -------- + t + (1 row) + ``` + +- << + + Description: Is contained in + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1.5' << inet '192.168.1/24' AS RESULT; + result + -------- + t + (1 row) + ``` + +- <<= + + Description: Is contained in or equals + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1/24' <<= inet '192.168.1/24' AS RESULT; + result + -------- + t + (1 row) + ``` + +- >> + + Description: Contains + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1/24' >> inet '192.168.1.5' AS RESULT; + result + -------- + t + (1 row) + ``` + +- >>= + + Description: Contains or equals + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1/24' >>= inet '192.168.1/24' AS RESULT; + result + -------- + t + (1 row) + ``` + +- ~ + + Description: Bitwise NOT + + Example: + + ```sql + MogDB=# SELECT ~ inet '192.168.1.6' AS RESULT; + result + --------------- + 63.87.254.249 + (1 row) + ``` + +- & + + Description: Performs an AND operation on each bit of the two network addresses. + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1.6' & inet '10.0.0.0' AS RESULT; + result + --------- + 0.0.0.0 + (1 row) + ``` + +- | + + Description: Performs an OR operation on each bit of the two network addresses. + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1.6' | inet '10.0.0.0' AS RESULT; + result + ------------- + 202.168.1.6 + (1 row) + ``` + +- \+ + + Description: Addition + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1.6' + 25 AS RESULT; + result + -------------- + 192.168.1.31 + (1 row) + ``` + +- \- + + Description: Subtraction + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1.43' - 36 AS RESULT; + result + ------------- + 192.168.1.7 + (1 row) + ``` + +- \- + + Description: Subtraction + + Example: + + ```sql + MogDB=# SELECT inet '192.168.1.43' - inet '192.168.1.19' AS RESULT; + result + -------- + 24 + (1 row) + ``` + +## cidr and inet Functions + +The **abbrev**, **host**, and **text** functions are primarily intended to offer alternative display formats. + +- abbrev(inet) + + Description: Abbreviated display format as text + + Return type: text + + Example: + + ```sql + MogDB=# SELECT abbrev(inet '10.1.0.0/16') AS RESULT; + result + ------------- + 10.1.0.0/16 + (1 row) + ``` + +- abbrev(cidr) + + Description: Abbreviated display format as text + + Return type: text + + Example: + + ```sql + MogDB=# SELECT abbrev(cidr '10.1.0.0/16') AS RESULT; + result + --------- + 10.1/16 + (1 row) + ``` + +- broadcast(inet) + + Description: Broadcast address for networks + + Return type: inet + + Example: + + ```sql + MogDB=# SELECT broadcast('192.168.1.5/24') AS RESULT; + result + ------------------ + 192.168.1.255/24 + (1 row) + ``` + +- family(inet) + + Description: Extracts family of addresses, **4** for IPv4, and **6** for IPv6. + + Return type: int + + Example: + + ```sql + MogDB=# SELECT family('127.0.0.1') AS RESULT; + result + -------- + 4 + (1 row) + ``` + +- host(inet) + + Description: Extracts IP addresses as text. + + Return type: text + + Example: + + ```sql + MogDB=# SELECT host('192.168.1.5/24') AS RESULT; + result + ------------- + 192.168.1.5 + (1 row) + ``` + +- hostmask(inet) + + Description: Constructs the host mask for a network. + + Return type: inet + + Example: + + ```sql + MogDB=# SELECT hostmask('192.168.23.20/30') AS RESULT; + result + --------- + 0.0.0.3 + (1 row) + ``` + +- masklen(inet) + + Description: Extracts subnet mask length. + + Return type: int + + Example: + + ```sql + MogDB=# SELECT masklen('192.168.1.5/24') AS RESULT; + result + -------- + 24 + (1 row) + ``` + +- netmask(inet) + + Description: Constructs the subnet mask for a network. + + Return type: inet + + Example: + + ```sql + MogDB=# SELECT netmask('192.168.1.5/24') AS RESULT; + result + --------------- + 255.255.255.0 + (1 row) + ``` + +- network(inet) + + Description: Extracts the network part of an address. + + Return type: cidr + + Example: + + ```sql + MogDB=# SELECT network('192.168.1.5/24') AS RESULT; + result + ---------------- + 192.168.1.0/24 + (1 row) + ``` + +- set_masklen(inet, int) + + Description: Sets subnet mask length for the **inet** value. + + Return type: inet + + Example: + + ```sql + MogDB=# SELECT set_masklen('192.168.1.5/24', 16) AS RESULT; + result + ---------------- + 192.168.1.5/16 + (1 row) + ``` + +- set_masklen(cidr, int) + + Description: Sets subnet mask length for the **cidr** value. + + Return type: cidr + + Example: + + ```sql + MogDB=# SELECT set_masklen('192.168.1.0/24'::cidr, 16) AS RESULT; + result + ---------------- + 192.168.0.0/16 + (1 row) + ``` + +- text(inet) + + Description: Extracts IP addresses and subnet mask length as text. + + Return type: text + + Example: + + ```sql + MogDB=# SELECT text(inet '192.168.1.5') AS RESULT; + result + ---------------- + 192.168.1.5/32 + (1 row) + ``` + +Any **cidr** value can be cast to **inet** implicitly or explicitly; therefore, the functions shown above as operating on **inet** also work on **cidr** values. An **inet** value can be cast to **cidr**. After the conversion, any bits to the right of the subnet mask are silently zeroed to create a valid **cidr** value. In addition, you can cast a text string to **inet** or **cidr** using normal casting syntax. For example, **inet(expression)** or **colname::cidr**. + +## macaddr Functions + +The function **trunc(macaddr)** returns a MAC address with the last 3 bytes set to zero. + +- trunc(macaddr) + + Description: Sets last 3 bytes to zero. + + Return type: macaddr + + Example: + +```sql +MogDB=# SELECT trunc(macaddr '12:34:56:78:90:ab') AS RESULT; + result +------------------- + 12:34:56:00:00:00 +(1 row) +``` + +The **macaddr** type also supports the standard relational operators (such as **>** and **<=**) for lexicographical ordering, and the bitwise arithmetic operators (**~**, **&** and **|**) for NOT, AND and OR. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/obsolete-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/obsolete-functions.md index b9c0b934..299597f5 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/obsolete-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/obsolete-functions.md @@ -1,18 +1,18 @@ ---- -title: Obsolete Functions -summary: Obsolete Functions -author: Zhang Cuiping -date: 2021-06-07 ---- - -# Obsolete Functions - -The following functions in MogDB have been discarded in the latest version: - -| gs_wlm_get_session_info | gs_wlm_get_user_session_info | pgxc_get_csn | pgxc_get_stat_dirty_tables | pgxc_get_thread_wait_status | pgxc_gtm_snapshot_status | pgxc_is_committed | -| --------------------------- | ---------------------------------- | ------------------------------------- | ----------------------------------------- | ---------------------------------------- | -------------------------------------- | ---------------------------------- | -| pgxc_lock_for_backup | pgxc_lock_for_sp_database | pgxc_lock_for_transfer | pgxc_log_comm_status | pgxc_max_datanode_size | pgxc_node_str | pgxc_pool_check | -| pgxc_pool_connection_status | pgxc_pool_reload | pgxc_prepared_xact | pgxc_snapshot_status | pgxc_stat_dirty_tables | pgxc_unlock_for_sp_database | pgxc_unlock_for_transfer | -| pgxc_version | array_extend | prepare_statement_status | remote_rto_stat | dbe_perf.global_slow_query_info | dbe_perf.global_slow_query_info_bytime | dbe_perf.global_slow_query_history | -| pg_stat_get_pooler_status | pg_stat_get_wlm_node_resource_info | pg_stat_get_wlm_session_info_internal | DBE_PERF.get_wlm_controlgroup_ng_config() | DBE_PERF.get_wlm_user_resource_runtime() | global_space_shrink | pg_pool_validate | -| gs_stat_ustore | table_skewness(text) | table_skewness(text, text, text) | - | - | - | - | +--- +title: Obsolete Functions +summary: Obsolete Functions +author: Zhang Cuiping +date: 2021-06-07 +--- + +# Obsolete Functions + +The following functions in MogDB have been discarded in the latest version: + +| gs_wlm_get_session_info | gs_wlm_get_user_session_info | pgxc_get_csn | pgxc_get_stat_dirty_tables | pgxc_get_thread_wait_status | pgxc_gtm_snapshot_status | pgxc_is_committed | +| --------------------------- | ---------------------------------- | ------------------------------------- | ----------------------------------------- | ---------------------------------------- | -------------------------------------- | ---------------------------------- | +| pgxc_lock_for_backup | pgxc_lock_for_sp_database | pgxc_lock_for_transfer | pgxc_log_comm_status | pgxc_max_datanode_size | pgxc_node_str | pgxc_pool_check | +| pgxc_pool_connection_status | pgxc_pool_reload | pgxc_prepared_xact | pgxc_snapshot_status | pgxc_stat_dirty_tables | pgxc_unlock_for_sp_database | pgxc_unlock_for_transfer | +| pgxc_version | array_extend | prepare_statement_status | remote_rto_stat | dbe_perf.global_slow_query_info | dbe_perf.global_slow_query_info_bytime | dbe_perf.global_slow_query_history | +| pg_stat_get_pooler_status | pg_stat_get_wlm_node_resource_info | pg_stat_get_wlm_session_info_internal | DBE_PERF.get_wlm_controlgroup_ng_config() | DBE_PERF.get_wlm_user_resource_runtime() | global_space_shrink | pg_pool_validate | +| gs_stat_ustore | table_skewness(text) | table_skewness(text, text, text) | - | - | - | - | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/other-system-functions/other-system-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/other-system-functions/other-system-functions.md index 88f411b6..adab1d03 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/other-system-functions/other-system-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/other-system-functions/other-system-functions.md @@ -1,12 +1,12 @@ ---- -title: Other System Functions -summary: Other System Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Other System Functions - -- **[Other System Functions (1)](other-system-functions-1.md)** - +--- +title: Other System Functions +summary: Other System Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Other System Functions + +- **[Other System Functions (1)](other-system-functions-1.md)** + - **[Other System Functions (2)](other-system-functions-2.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/prompt-message-function.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/prompt-message-function.md index 27633ebe..216c79d3 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/prompt-message-function.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/prompt-message-function.md @@ -1,21 +1,21 @@ ---- -title: Prompt Message Function -summary: Prompt Message Function -author: Guo Huan -date: 2021-10-28 ---- - -# Prompt Message Function - -- report_application_error - - Description: This function can be used to throw errors during PL execution. - - Return type: void - - **Table 1** report_application_error parameter description - - | Parameter | Type | Description | Mandatory or Not | - | :-------- | :--- | :----------------------------------------------------------- | :--------------- | - | log | text | Content of an error message. | Yes | - | code | int4 | Error code corresponding to an error message. The value ranges from -20999 to -20000. | No | +--- +title: Prompt Message Function +summary: Prompt Message Function +author: Guo Huan +date: 2021-10-28 +--- + +# Prompt Message Function + +- report_application_error + + Description: This function can be used to throw errors during PL execution. + + Return type: void + + **Table 1** report_application_error parameter description + + | Parameter | Type | Description | Mandatory or Not | + | :-------- | :--- | :----------------------------------------------------------- | :--------------- | + | log | text | Content of an error message. | Yes | + | code | int4 | Error code corresponding to an error message. The value ranges from -20999 to -20000. | No | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/range-functions-and-operators.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/range-functions-and-operators.md index 37e9bc0a..d8469bbf 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/range-functions-and-operators.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/range-functions-and-operators.md @@ -1,435 +1,435 @@ ---- -title: Range Functions and Operators -summary: Range Functions and Operators -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Range Functions and Operators - -## Range Operators - -- = - - Description: Equals - - Example: - - ```sql - MogDB=# SELECT int4range(1,5) = '[1,4]'::int4range AS RESULT; - result - -------- - t - (1 row) - ``` - -- <> - - Description: Does not equal to - - Example: - - ```sql - MogDB=# SELECT numrange(1.1,2.2) <> numrange(1.1,2.3) AS RESULT; - result - -------- - t - (1 row) - ``` - -- < - - Description: Is less than - - Example: - - ```sql - MogDB=# SELECT int4range(1,10) < int4range(2,3) AS RESULT; - result - -------- - t - (1 row) - ``` - -- > - - Description: Is greater than - - Example: - - ```sql - MogDB=# SELECT int4range(1,10) > int4range(1,5) AS RESULT; - result - -------- - t - (1 row) - ``` - -- <= - - Description: Is less than or equals - - Example: - - ```sql - MogDB=# SELECT numrange(1.1,2.2) <= numrange(1.1,2.2) AS RESULT; - result - -------- - t - (1 row) - ``` - -- >= - - Description: Is greater than or equals - - Example: - - ```sql - MogDB=# SELECT numrange(1.1,2.2) >= numrange(1.1,2.0) AS RESULT; - result - -------- - t - (1 row) - ``` - -- @> - - Description: Contains ranges - - Example: - - ```sql - MogDB=# SELECT int4range(2,4) @> int4range(2,3) AS RESULT; - result - -------- - t - (1 row) - ``` - -- @> - - Description: Contains elements - - Example: - - ```sql - MogDB=# SELECT '[2011-01-01,2011-03-01)'::tsrange @> '2011-01-10'::timestamp AS RESULT; - result - -------- - t - (1 row) - ``` - -- <@ - - Description: Range is contained by - - Example: - - ```sql - MogDB=# SELECT int4range(2,4) <@ int4range(1,7) AS RESULT; - result - -------- - t - (1 row) - ``` - -- <@ - - Description: Element is contained by - - Example: - - ```sql - MogDB=# SELECT 42 <@ int4range(1,7) AS RESULT; - result - -------- - f - (1 row) - ``` - -- && - - Description: Overlap (have points in common) - - Example: - - ```sql - MogDB=# SELECT int8range(3,7) && int8range(4,12) AS RESULT; - result - -------- - t - (1 row) - ``` - -- << - - Description: Strictly left of - - Example: - - ```sql - MogDB=# SELECT int8range(1,10) << int8range(100,110) AS RESULT; - result - -------- - t - (1 row) - ``` - -- >> - - Description: Strictly right of - - Example: - - ```sql - MogDB=# SELECT int8range(50,60) >> int8range(20,30) AS RESULT; - result - -------- - t - (1 row) - ``` - -- &< - - Description: Does not extend to the right of - - Example: - - ```sql - MogDB=# SELECT int8range(1,20) &< int8range(18,20) AS RESULT; - result - -------- - t - (1 row) - ``` - -- &> - - Description: Does not extend to the left of - - Example: - - ```sql - MogDB=# SELECT int8range(7,20) &> int8range(5,10) AS RESULT; - result - -------- - t - (1 row) - ``` - -- -|- - - Description: Is adjacent to - - Example: - - ```sql - MogDB=# SELECT numrange(1.1,2.2) -|- numrange(2.2,3.3) AS RESULT; - result - -------- - t - (1 row) - ``` - -- \+ - - Description: Union - - Example: - - ```sql - MogDB=# SELECT numrange(5,15) + numrange(10,20) AS RESULT; - result - -------- - [5,20) - (1 row) - ``` - -- \* - - Description: Intersection - - Example: - - ```sql - MogDB=# SELECT int8range(5,15) * int8range(10,20) AS RESULT; - result - --------- - [10,15) - (1 row) - ``` - -- \- - - Description: Difference - - Example: - - ```sql - MogDB=# SELECT int8range(5,15) - int8range(10,20) AS RESULT; - result - -------- - [5,10) - (1 row) - ``` - -The simple comparison operators **<**, **>**, **<=**, and **>=** compare the lower bounds first, and only if those are equal, compare the upper bounds. - -The **<<**, **>>**, and **-|-** operators always return false when an empty range is involved; that is, an empty range is not considered to be either before or after any other range. - -The union and difference operators will fail if the resulting range would need to contain two disjoint sub-ranges. - -## Range Functions - -- numrange(numeric, numeric, [text]) - - Description: Specifies a range. - - Return type: Range's element type - - Example: - - ``` - MogDB=# SELECT numrange(1.1,2.2) AS RESULT; - result - -------- - [1.1,2.2) - (1 row) - MogDB=# SELECT numrange(1.1,2.2, '()') AS RESULT; - result - -------- - (1.1,2.2) - (1 row) - ``` - -- lower(anyrange) - - Description: Lower bound of a range - - Return type: Range's element type - - Example: - - ```sql - MogDB=# SELECT lower(numrange(1.1,2.2)) AS RESULT; - result - -------- - 1.1 - (1 row) - ``` - -- upper(anyrange) - - Description: Upper bound of a range - - Return type: Range's element type - - Example: - - ```sql - MogDB=# SELECT upper(numrange(1.1,2.2)) AS RESULT; - result - -------- - 2.2 - (1 row) - ``` - -- isempty(anyrange) - - Description: Is the range empty? - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT isempty(numrange(1.1,2.2)) AS RESULT; - result - -------- - f - (1 row) - ``` - -- lower_inc(anyrange) - - Description: Is the lower bound inclusive? - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT lower_inc(numrange(1.1,2.2)) AS RESULT; - result - -------- - t - (1 row) - ``` - -- upper_inc(anyrange) - - Description: Is the upper bound inclusive? - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT upper_inc(numrange(1.1,2.2)) AS RESULT; - result - -------- - f - (1 row) - ``` - -- lower_inf(anyrange) - - Description: Is the lower bound infinite? - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT lower_inf('(,)'::daterange) AS RESULT; - result - -------- - t - (1 row) - ``` - -- upper_inf(anyrange) - - Description: Is the upper bound infinite? - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT upper_inf('(,)'::daterange) AS RESULT; - result - -------- - t - (1 row) - ``` - -The **lower** and **upper** functions return null if the range is empty or the requested bound is infinite. The **lower_inc**, **upper_inc**, **lower_inf**, and **upper_inf** functions all return false for an empty range. - -- elem_contained_by_range(anyelement, anyrange) - - Description: Determines whether an element is within the range. - - Return type: Boolean - - Example: - - ``` - MogDB=# SELECT elem_contained_by_range('2', numrange(1.1,2.2)); - elem_contained_by_range - ------------------------- - t - (1 row) - ``` +--- +title: Range Functions and Operators +summary: Range Functions and Operators +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Range Functions and Operators + +## Range Operators + +- = + + Description: Equals + + Example: + + ```sql + MogDB=# SELECT int4range(1,5) = '[1,4]'::int4range AS RESULT; + result + -------- + t + (1 row) + ``` + +- <> + + Description: Does not equal to + + Example: + + ```sql + MogDB=# SELECT numrange(1.1,2.2) <> numrange(1.1,2.3) AS RESULT; + result + -------- + t + (1 row) + ``` + +- < + + Description: Is less than + + Example: + + ```sql + MogDB=# SELECT int4range(1,10) < int4range(2,3) AS RESULT; + result + -------- + t + (1 row) + ``` + +- > + + Description: Is greater than + + Example: + + ```sql + MogDB=# SELECT int4range(1,10) > int4range(1,5) AS RESULT; + result + -------- + t + (1 row) + ``` + +- <= + + Description: Is less than or equals + + Example: + + ```sql + MogDB=# SELECT numrange(1.1,2.2) <= numrange(1.1,2.2) AS RESULT; + result + -------- + t + (1 row) + ``` + +- >= + + Description: Is greater than or equals + + Example: + + ```sql + MogDB=# SELECT numrange(1.1,2.2) >= numrange(1.1,2.0) AS RESULT; + result + -------- + t + (1 row) + ``` + +- @> + + Description: Contains ranges + + Example: + + ```sql + MogDB=# SELECT int4range(2,4) @> int4range(2,3) AS RESULT; + result + -------- + t + (1 row) + ``` + +- @> + + Description: Contains elements + + Example: + + ```sql + MogDB=# SELECT '[2011-01-01,2011-03-01)'::tsrange @> '2011-01-10'::timestamp AS RESULT; + result + -------- + t + (1 row) + ``` + +- <@ + + Description: Range is contained by + + Example: + + ```sql + MogDB=# SELECT int4range(2,4) <@ int4range(1,7) AS RESULT; + result + -------- + t + (1 row) + ``` + +- <@ + + Description: Element is contained by + + Example: + + ```sql + MogDB=# SELECT 42 <@ int4range(1,7) AS RESULT; + result + -------- + f + (1 row) + ``` + +- && + + Description: Overlap (have points in common) + + Example: + + ```sql + MogDB=# SELECT int8range(3,7) && int8range(4,12) AS RESULT; + result + -------- + t + (1 row) + ``` + +- << + + Description: Strictly left of + + Example: + + ```sql + MogDB=# SELECT int8range(1,10) << int8range(100,110) AS RESULT; + result + -------- + t + (1 row) + ``` + +- >> + + Description: Strictly right of + + Example: + + ```sql + MogDB=# SELECT int8range(50,60) >> int8range(20,30) AS RESULT; + result + -------- + t + (1 row) + ``` + +- &< + + Description: Does not extend to the right of + + Example: + + ```sql + MogDB=# SELECT int8range(1,20) &< int8range(18,20) AS RESULT; + result + -------- + t + (1 row) + ``` + +- &> + + Description: Does not extend to the left of + + Example: + + ```sql + MogDB=# SELECT int8range(7,20) &> int8range(5,10) AS RESULT; + result + -------- + t + (1 row) + ``` + +- -|- + + Description: Is adjacent to + + Example: + + ```sql + MogDB=# SELECT numrange(1.1,2.2) -|- numrange(2.2,3.3) AS RESULT; + result + -------- + t + (1 row) + ``` + +- \+ + + Description: Union + + Example: + + ```sql + MogDB=# SELECT numrange(5,15) + numrange(10,20) AS RESULT; + result + -------- + [5,20) + (1 row) + ``` + +- \* + + Description: Intersection + + Example: + + ```sql + MogDB=# SELECT int8range(5,15) * int8range(10,20) AS RESULT; + result + --------- + [10,15) + (1 row) + ``` + +- \- + + Description: Difference + + Example: + + ```sql + MogDB=# SELECT int8range(5,15) - int8range(10,20) AS RESULT; + result + -------- + [5,10) + (1 row) + ``` + +The simple comparison operators **<**, **>**, **<=**, and **>=** compare the lower bounds first, and only if those are equal, compare the upper bounds. + +The **<<**, **>>**, and **-|-** operators always return false when an empty range is involved; that is, an empty range is not considered to be either before or after any other range. + +The union and difference operators will fail if the resulting range would need to contain two disjoint sub-ranges. + +## Range Functions + +- numrange(numeric, numeric, [text]) + + Description: Specifies a range. + + Return type: Range's element type + + Example: + + ``` + MogDB=# SELECT numrange(1.1,2.2) AS RESULT; + result + -------- + [1.1,2.2) + (1 row) + MogDB=# SELECT numrange(1.1,2.2, '()') AS RESULT; + result + -------- + (1.1,2.2) + (1 row) + ``` + +- lower(anyrange) + + Description: Lower bound of a range + + Return type: Range's element type + + Example: + + ```sql + MogDB=# SELECT lower(numrange(1.1,2.2)) AS RESULT; + result + -------- + 1.1 + (1 row) + ``` + +- upper(anyrange) + + Description: Upper bound of a range + + Return type: Range's element type + + Example: + + ```sql + MogDB=# SELECT upper(numrange(1.1,2.2)) AS RESULT; + result + -------- + 2.2 + (1 row) + ``` + +- isempty(anyrange) + + Description: Is the range empty? + + Return type: Boolean + + Example: + + ```sql + MogDB=# SELECT isempty(numrange(1.1,2.2)) AS RESULT; + result + -------- + f + (1 row) + ``` + +- lower_inc(anyrange) + + Description: Is the lower bound inclusive? + + Return type: Boolean + + Example: + + ```sql + MogDB=# SELECT lower_inc(numrange(1.1,2.2)) AS RESULT; + result + -------- + t + (1 row) + ``` + +- upper_inc(anyrange) + + Description: Is the upper bound inclusive? + + Return type: Boolean + + Example: + + ```sql + MogDB=# SELECT upper_inc(numrange(1.1,2.2)) AS RESULT; + result + -------- + f + (1 row) + ``` + +- lower_inf(anyrange) + + Description: Is the lower bound infinite? + + Return type: Boolean + + Example: + + ```sql + MogDB=# SELECT lower_inf('(,)'::daterange) AS RESULT; + result + -------- + t + (1 row) + ``` + +- upper_inf(anyrange) + + Description: Is the upper bound infinite? + + Return type: Boolean + + Example: + + ```sql + MogDB=# SELECT upper_inf('(,)'::daterange) AS RESULT; + result + -------- + t + (1 row) + ``` + +The **lower** and **upper** functions return null if the range is empty or the requested bound is infinite. The **lower_inc**, **upper_inc**, **lower_inf**, and **upper_inf** functions all return false for an empty range. + +- elem_contained_by_range(anyelement, anyrange) + + Description: Determines whether an element is within the range. + + Return type: Boolean + + Example: + + ``` + MogDB=# SELECT elem_contained_by_range('2', numrange(1.1,2.2)); + elem_contained_by_range + ------------------------- + t + (1 row) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/set-returning-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/set-returning-functions.md index 9ab308c9..731c6982 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/set-returning-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/set-returning-functions.md @@ -1,131 +1,131 @@ ---- -title: Set Returning Functions -summary: Set Returning Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Set Returning Functions - -## Series Generating Functions - -- generate_series(start, stop) - - Description: Generates a series of values, from **start** to **stop** with a step size of one. - - Parameter type: int, bigint, numeric - - Return type: setof int, setof bigint, setof numeric (same as the parameter type) - -- generate_series(start, stop, step) - - Description: Generates a series of values, from **start** to **stop** with a step size of **step**. - - Parameter type: int, bigint, numeric - - Return type: setof int, setof bigint, setof numeric (same as the parameter type) - -- generate_series(start, stop, step interval) - - Description: Generates a series of values, from **start** to **stop** with a step size of **step**. - - Parameter type: timestamp or timestamp with time zone - - Return type: setof timestamp or setof timestamp with time zone (same as parameter type) - -When **step** is positive, zero rows are returned if **start** is greater than **stop**. Conversely, when **step** is negative, zero rows are returned if **start** is less than **stop**. Zero rows are also returned for **NULL** inputs. It is an error for **step** to be zero. - -Example: - -```sql -MogDB=# SELECT * FROM generate_series(2,4); - generate_series ------------------ - 2 - 3 - 4 -(3 rows) - -MogDB=# SELECT * FROM generate_series(5,1,-2); - generate_series ------------------ - 5 - 3 - 1 -(3 rows) - -MogDB=# SELECT * FROM generate_series(4,3); - generate_series ------------------ -(0 rows) - --- This example applies to the date-plus-integer operator. -MogDB=# SELECT current_date + s.a AS dates FROM generate_series(0,14,7) AS s(a); - dates ------------- - 2017-06-02 - 2017-06-09 - 2017-06-16 -(3 rows) - -MogDB=# SELECT * FROM generate_series('2008-03-01 00:00'::timestamp, '2008-03-04 12:00', '10 hours'); - generate_series ---------------------- - 2008-03-01 00:00:00 - 2008-03-01 10:00:00 - 2008-03-01 20:00:00 - 2008-03-02 06:00:00 - 2008-03-02 16:00:00 - 2008-03-03 02:00:00 - 2008-03-03 12:00:00 - 2008-03-03 22:00:00 - 2008-03-04 08:00:00 -(9 rows) -``` - -## Subscript Generating Functions - -- generate_subscripts(array anyarray, dim int) - - Description: Generates a series comprising the given array's subscripts. - - Return type: setof int - -- generate_subscripts(array anyarray, dim int, reverse boolean) - - Description: Generates a series comprising the given array's subscripts. When **reverse** is true, the series is returned in reverse order. - - Return type: setof int - -**generate_subscripts** is a function that generates the set of valid subscripts for the specified dimension of the given array. Zero rows are returned for arrays that do not have the requested dimension, or for NULL arrays (but valid subscripts are returned for NULL array elements). Example: - -```sql --- Basic usage -MogDB=# SELECT generate_subscripts('{NULL,1,NULL,2}'::int[], 1) AS s; - s ---- - 1 - 2 - 3 - 4 -(4 rows) --- Unnest a 2D array: -MogDB=# CREATE OR REPLACE FUNCTION unnest2(anyarray) -RETURNS SETOF anyelement AS $$ -SELECT $1[i][j] - FROM generate_subscripts($1,1) g1(i), - generate_subscripts($1,2) g2(j); -$$ LANGUAGE sql IMMUTABLE; - -MogDB=# SELECT * FROM unnest2(ARRAY[[1,2],[3,4]]); - unnest2 ---------- - 1 - 2 - 3 - 4 -(4 rows) - --- Delete the function. -MogDB=# DROP FUNCTION unnest2; -``` +--- +title: Set Returning Functions +summary: Set Returning Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Set Returning Functions + +## Series Generating Functions + +- generate_series(start, stop) + + Description: Generates a series of values, from **start** to **stop** with a step size of one. + + Parameter type: int, bigint, numeric + + Return type: setof int, setof bigint, setof numeric (same as the parameter type) + +- generate_series(start, stop, step) + + Description: Generates a series of values, from **start** to **stop** with a step size of **step**. + + Parameter type: int, bigint, numeric + + Return type: setof int, setof bigint, setof numeric (same as the parameter type) + +- generate_series(start, stop, step interval) + + Description: Generates a series of values, from **start** to **stop** with a step size of **step**. + + Parameter type: timestamp or timestamp with time zone + + Return type: setof timestamp or setof timestamp with time zone (same as parameter type) + +When **step** is positive, zero rows are returned if **start** is greater than **stop**. Conversely, when **step** is negative, zero rows are returned if **start** is less than **stop**. Zero rows are also returned for **NULL** inputs. It is an error for **step** to be zero. + +Example: + +```sql +MogDB=# SELECT * FROM generate_series(2,4); + generate_series +----------------- + 2 + 3 + 4 +(3 rows) + +MogDB=# SELECT * FROM generate_series(5,1,-2); + generate_series +----------------- + 5 + 3 + 1 +(3 rows) + +MogDB=# SELECT * FROM generate_series(4,3); + generate_series +----------------- +(0 rows) + +-- This example applies to the date-plus-integer operator. +MogDB=# SELECT current_date + s.a AS dates FROM generate_series(0,14,7) AS s(a); + dates +------------ + 2017-06-02 + 2017-06-09 + 2017-06-16 +(3 rows) + +MogDB=# SELECT * FROM generate_series('2008-03-01 00:00'::timestamp, '2008-03-04 12:00', '10 hours'); + generate_series +--------------------- + 2008-03-01 00:00:00 + 2008-03-01 10:00:00 + 2008-03-01 20:00:00 + 2008-03-02 06:00:00 + 2008-03-02 16:00:00 + 2008-03-03 02:00:00 + 2008-03-03 12:00:00 + 2008-03-03 22:00:00 + 2008-03-04 08:00:00 +(9 rows) +``` + +## Subscript Generating Functions + +- generate_subscripts(array anyarray, dim int) + + Description: Generates a series comprising the given array's subscripts. + + Return type: setof int + +- generate_subscripts(array anyarray, dim int, reverse boolean) + + Description: Generates a series comprising the given array's subscripts. When **reverse** is true, the series is returned in reverse order. + + Return type: setof int + +**generate_subscripts** is a function that generates the set of valid subscripts for the specified dimension of the given array. Zero rows are returned for arrays that do not have the requested dimension, or for NULL arrays (but valid subscripts are returned for NULL array elements). Example: + +```sql +-- Basic usage +MogDB=# SELECT generate_subscripts('{NULL,1,NULL,2}'::int[], 1) AS s; + s +--- + 1 + 2 + 3 + 4 +(4 rows) +-- Unnest a 2D array: +MogDB=# CREATE OR REPLACE FUNCTION unnest2(anyarray) +RETURNS SETOF anyelement AS $$ +SELECT $1[i][j] + FROM generate_subscripts($1,1) g1(i), + generate_subscripts($1,2) g2(j); +$$ LANGUAGE sql IMMUTABLE; + +MogDB=# SELECT * FROM unnest2(ARRAY[[1,2],[3,4]]); + unnest2 +--------- + 1 + 2 + 3 + 4 +(4 rows) + +-- Delete the function. +MogDB=# DROP FUNCTION unnest2; +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-1.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-1.md index dff20a07..a4a3bef8 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-1.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-1.md @@ -1,660 +1,660 @@ ---- -title: Statistics Information Functions -summary: Statistics Information Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Statistics Information Functions(1) - -Statistics information functions are divided into the following two categories: functions that access databases, using the OID of each table or index in a database to mark the database for which statistics are generated; functions that access servers, identified by the server process ID, whose value ranges from 1 to the number of currently active servers. - -- pg_stat_get_db_conflict_tablespace(oid) - - Description: Specifies the number of queries canceled due to a conflict between the restored tablespace and the deleted tablespace in the database. - - Return type: bigint - -- pg_control_group_config - - Description: Prints Cgroup configurations on the current node. - - Return type: record - -- pg_stat_get_db_stat_reset_time(oid) - - Description: Specifies the most recent time when database statistics were reset. It is initialized to the system time during the first connection to each database. The reset time is updated when you call **pg_stat_reset** on the database and execute **pg_stat_reset_single_table_counters** against any table or index in it. - - Return type: timestamptz - -- pg_stat_get_function_total_time(oid) - - Description: Specifies the total wall clock time spent in the function, in microseconds. The time spent on this function calling other functions is included. - - Return type: bigint - -- pg_stat_get_xact_tuples_returned(oid) - - Description: Specifies the number of rows read through sequential scans when the parameter is a table in the current transaction or the number of index entries returned when the parameter is an index. - - Return type: bigint - -- pg_lock_status() - - Description: Queries information about locks held by open transactions. All users can execute this function. - - Return type: For details, see **PG_LOCKS** which is obtained by querying this function. - -- pg_stat_get_xact_numscans(oid) - - Description: Specifies the number of sequential scans performed when the parameter is a table in the current transaction or the number of index scans performed when the parameter is an index. - - Return type: bigint - -- pg_stat_get_xact_blocks_fetched(oid) - - Description: Specifies the number of disk block fetch requests for a table or an index in the current transaction. - - Return type: bigint - -- pg_stat_get_xact_blocks_hit(oid) - - Description: Specifies the number of disk block fetch requests for tables or indexes found in cache in the current transaction. - - Return type: bigint - -- pg_stat_get_xact_function_calls(oid) - - Description: Specifies the number of times the function is called in the current transaction. - - Return type: bigint - -- pg_stat_get_xact_function_self_time(oid) - - Description: Specifies the time spent on this function in the current transaction, excluding the time spent on this function internally calling other functions. - - Return type: bigint - -- pg_stat_get_xact_function_total_time(oid) - - Description: Specifies the total wall clock time (in microseconds) spent on the function in the current transaction, including the time spent on this function internally calling other functions. - - Return type: bigint - -- pg_stat_get_wal_senders() - - Description: Queries walsender information on the primary server. - - Return type: setofrecord - - The following table describes return columns. - - **Table 1** Return column description - - | Column | Type | Description | - | :------------------------- | :----------------------- | :------------------------------------------------- | - | pid | bigint | Thread ID of the WAL sender | - | sender_pid | integer | Lightweight thread ID of the WAL sender | - | local_role | text | Type of the primary node | - | peer_role | text | Type of the standby node | - | peer_state | text | Status of the standby node | - | state | text | Status of the WAL sender | - | catchup_start | timestamp with time zone | Startup time of a catchup task | - | catchup_end | timestamp with time zone | End time of a catchup task | - | sender_sent_location | text | Sending position of the primary node | - | sender_write_location | text | Writing position of the primary node | - | sender_flush_location | text | Flushing position of the primary node | - | sender_replay_location | text | Redo position of the primary node | - | receiver_received_location | text | Receiving position of the standby node | - | receiver_write_location | text | Writing position of the standby node | - | receiver_flush_location | text | Flushing position of the standby node | - | receiver_replay_location | text | Redo position of the standby node | - | sync_percent | text | Synchronization percentage | - | sync_state | text | Synchronization status | - | sync_group | text | Group to which the synchronous replication belongs | - | sync_priority | text | Priority of synchronous replication | - | sync_most_available | text | Maximum availability mode | - | channel | text | Channel information of the WAL sender | - -- get_paxos_replication_info() - - Description: Queries the primary/standby replication status in Paxos mode. - - Return type: setofrecord - - The following table describes return columns. - - **Table 2** Return column description - - | Column | Type | Description | - | :-------------------- | :--- | :----------------------------------------------------------- | - | paxos_write_location | text | Location of the Xlog that has been written to the Distribute Consensus Framework (DCF) | - | paxos_commit_location | text | Location of the Xlog agreed in the DCF | - | local_write_location | text | Writing position of a node | - | local_flush_location | text | Flushing position of a node | - | local_replay_location | text | Redo position of a node | - | dcf_replication_info | text | DCF module information of a node | - -- pg_stat_get_stream_replications() - - Description: Queries the primary/standby replication status. - - Return type: setofrecord - - The following table describes return values. - - **Table 3** Return value description - - | Return Parameter | Type | Description | - | :----------------- | :------ | :-------------------- | - | local_role | text | Local role | - | static_connections | integer | Connection statistics | - | db_state | text | Database status | - | detail_information | text | Detailed information | - -- pg_stat_get_db_numbackends(oid) - - Description: Specifies the number of active server processes for a database. - - Return type: integer - -- pg_stat_get_db_xact_commit(oid) - - Description: Specifies the number of transactions committed in a database. - - Return type: bigint - -- pg_stat_get_db_xact_rollback(oid) - - Description: Specifies the number of transactions rolled back in a database. - - Return type: bigint - -- pg_stat_get_db_blocks_fetched(oid) - - Description: Specifies the number of disk blocks fetch requests for a database. - - Return type: bigint - -- pg_stat_get_db_blocks_hit(oid) - - Description: Specifies the number of disk block fetch requests found in cache for a database. - - Return type: bigint - -- pg_stat_get_db_tuples_returned(oid) - - Description: Specifies the number of tuples returned for a database. - - Return type: bigint - -- pg_stat_get_db_tuples_fetched(oid) - - Description: Specifies the number of tuples fetched for a database. - - Return type: bigint - -- pg_stat_get_db_tuples_inserted(oid) - - Description: Specifies the number of tuples inserted in a database. - - Return type: bigint - -- pg_stat_get_db_tuples_updated(oid) - - Description: Specifies the number of tuples updated in a database. - - Return type: bigint - -- pg_stat_get_db_tuples_deleted(oid) - - Description: Specifies the number of tuples deleted in a database. - - Return type: bigint - -- pg_stat_get_db_conflict_lock(oid) - - Description: Specifies the number of lock conflicts in a database. - - Return type: bigint - -- pg_stat_get_db_deadlocks(oid) - - Description: Specifies the number of deadlocks in a database. - - Return type: bigint - -- pg_stat_get_numscans(oid) - - Description: Specifies the number of sequential row scans done if parameters are in a table or the number of index scans done if parameters are in an index. - - Return type: bigint - -- pg_stat_get_role_name(oid) - - Description: Obtains the username based on the user OID. Only users with the **sysadmin** or **monitor admin** permission can access the information. - - Return type: text - - Example: - - ``` - MogDB=# select pg_stat_get_role_name(10); - pg_stat_get_role_name - ----------------------- - aabbcc - (1 row) - ``` - -- pg_stat_get_tuples_returned(oid) - - Description: Specifies the number of sequential row scans done if parameters are in a table or the number of index scans done if parameters are in an index. - - Return type: bigint - -- pg_stat_get_tuples_fetched(oid) - - Description: Specifies the number of table rows fetched by bitmap scans if parameters are in a table or the number of table rows fetched by simple index scans using the index if parameters are in an index. - - Return type: bigint - -- pg_stat_get_tuples_inserted(oid) - - Description: Specifies the number of rows inserted into a table. - - Return type: bigint - -- pg_stat_get_tuples_updated(oid) - - Description: Specifies the number of rows updated in a table. - - Return type: bigint - -- pg_stat_get_tuples_deleted(oid) - - Description: Specifies the number of rows deleted from a table. - - Return type: bigint - -- pg_stat_get_tuples_changed(oid) - - Description: Specifies the total number of inserted, updated, and deleted rows after a table was last analyzed or autoanalyzed. - - Return type: bigint - -- pg_stat_get_tuples_hot_updated(oid) - - Description: Specifies the number of rows hot updated in a table. - - Return type: bigint - -- pg_stat_get_live_tuples(oid) - - Description: Specifies the number of live rows in a table. - - Return type: bigint - -- pg_stat_get_dead_tuples(oid) - - Description: Specifies the number of dead rows in a table. - - Return type: bigint - -- pg_stat_get_blocks_fetched(oid) - - Description: Specifies the number of disk block fetch requests for a table or an index. - - Return type: bigint - -- pg_stat_get_blocks_hit(oid) - - Description: Specifies the number of disk block requests found in cache for a table or an index. - - Return type: bigint - -- pg_stat_get_partition_tuples_inserted(oid) - - Description: Specifies the number of rows in the corresponding table partition. - - Return type: bigint - -- pg_stat_get_partition_tuples_updated(oid) - - Description: Specifies the number of rows that have been updated in the corresponding table partition. - - Return type: bigint - -- pg_stat_get_partition_tuples_deleted(oid) - - Description: Specifies the number of rows deleted from the corresponding table partition. - - Return type: bigint - -- pg_stat_get_partition_tuples_changed(oid) - - Description: Specifies the total number of inserted, updated, and deleted rows after a table partition was last analyzed or autoanalyzed. - - Return type: bigint - -- pg_stat_get_partition_live_tuples(oid) - - Description: Specifies the number of live rows in a partitioned table. - - Return type: bigint - -- pg_stat_get_partition_dead_tuples(oid) - - Description: Specifies the number of dead rows in a partitioned table. - - Return type: bigint - -- pg_stat_get_xact_tuples_fetched(oid) - - Description: Specifies the number of tuple rows scanned in a transaction. - - Return type: bigint - -- pg_stat_get_xact_tuples_inserted(oid) - - Description: Specifies the number of tuple inserted into the active subtransactions related to a table. - - Return type: bigint - -- pg_stat_get_xact_tuples_deleted(oid) - - Description: Specifies the number of deleted tuples in the active subtransactions related to a table. - - Return type: bigint - -- pg_stat_get_xact_tuples_hot_updated(oid) - - Description: Specifies the number of hot updated tuples in the active subtransactions related to a table. - - Return type: bigint - -- pg_stat_get_xact_tuples_updated(oid) - - Description: Specifies the number of updated tuples in the active subtransactions related to a table. - - Return type: bigint - -- pg_stat_get_xact_partition_tuples_inserted(oid) - - Description: Specifies the number of inserted tuples in the active subtransactions related to a table partition. - - Return type: bigint - -- pg_stat_get_xact_partition_tuples_deleted(oid) - - Description: Specifies the number of deleted tuples in the active subtransactions related to a table partition. - - Return type: bigint - -- pg_stat_get_xact_partition_tuples_hot_updated(oid) - - Description: Specifies the number of hot updated tuples in the active subtransactions related to a table partition. - - Return type: bigint - -- pg_stat_get_xact_partition_tuples_updated(oid) - - Description: Specifies the number of updated tuples in the active subtransactions related to a table partition. - - Return type: bigint - -- pg_stat_get_last_vacuum_time(oid) - - Description: Specifies the most recent time when the autovacuum thread is manually started to clear a table. - - Return type: timestamptz - -- pg_stat_get_last_autovacuum_time(oid) - - Description: Specifies the time of the last vacuum initiated by the autovacuum daemon on a table. - - Return type: timestamptz - -- pg_stat_get_vacuum_count(oid) - - Description: Specifies the number of times a table is manually cleared. - - Return type: bigint - -- pg_stat_get_autovacuum_count(oid) - - Description: Specifies the number of times the autovacuum daemon is started to clear a table. - - Return type: bigint - -- pg_stat_get_last_analyze_time(oid) - - Description: Specifies the last time when a table starts to be analyzed manually or by the autovacuum thread. - - Return type: timestamptz - -- pg_stat_get_last_autoanalyze_time(oid) - - Description: Specifies the time when the last analysis initiated by the autovacuum daemon on a table. - - Return type: timestamptz - -- pg_stat_get_analyze_count(oid) - - Description: Specifies the number of times a table is manually analyzed. - - Return type: bigint - -- pg_stat_get_autoanalyze_count(oid) - - Description: Specifies the number of times the autovacuum daemon analyzes a table. - - Return type: bigint - -- pg_total_autovac_tuples(bool) - - Description: Returns tuple records related to the total autovac, such as **nodename**, **nspname**, **relname**, and tuple IUDs. The input parameters specify whether to query **relation** information. - - Return type: setofrecord - - The following table describes return parameters. - - **Table 4** Return parameter description - - | Return Parameter | Type | Description | - | :-------------------- | :----- | :------------------------------------------------------ | - | nodename | name | Node name | - | nspname | name | Name of a namespace | - | relname | name | Name of an object, such as a table, an index, or a view | - | partname | name | Partition name | - | n_dead_tuples | bigint | Number of dead rows in a table partition | - | n_live_tuples | bigint | Number of live rows in a table partition | - | changes_since_analyze | bigint | Number of changes generated by ANALYZE | - -- pg_autovac_status(oid) - - Description: Returns autovac information, such as **nodename**, **nspname**, **relname**, **analyze**, **vacuum**, thresholds of **analyze** and **vacuum**, and the number of analyzed or vacuumed tuples. Only users with the **sysadmin** permission can use this function. - - Return type: setofrecord - - The following table describes return parameters. - - **Table 5** Return parameter description - - | Return Parameter | Type | Description | - | :--------------- | :------ | :------------------------------------------------------ | - | nspname | text | Name of a namespace | - | relname | text | Name of an object, such as a table, an index, or a view | - | nodename | text | Node name | - | doanalyze | Boolean | Whether to execute **ANALYZE** | - | anltuples | bigint | Number of ANALYZE tuples | - | anlthresh | bigint | ANALYZE threshold | - | dovacuum | Boolean | Whether to execute **VACUUM** | - | vactuples | bigint | Number of VACUUM tuples | - | vacthresh | bigint | VACUUM threshold | - -- pg_autovac_timeout(oid) - - Description: Returns the number of consecutive timeouts during the autovac operation on a table. If the table information is invalid or the node information is abnormal, **NULL** will be returned. - - Return type: bigint - -- pg_stat_get_last_data_changed_time(oid) - - Description: Returns the time when **INSERT**, **UPDATE**, **DELETE**, or **EXCHANGE**/**TRUNCATE**/**DROP** **PARTITION** was last performed on a table. The data in the **last_data_changed** column of the PG_STAT_ALL_TABLES view is calculated by using this function. The performance of obtaining the last modification time by using the view is poor when the table has a large amount of data. In this case, you are advised to use the function. - - Return type: timestamptz - -- pg_stat_set_last_data_changed_time(oid) - - Description: Manually changes the time when **INSERT**, **UPDATE**, **DELETE**, or **EXCHANGE**/**TRUNCATE**/**DROP** **PARTITION** was last performed. - - Return type: void - -- pg_backend_pid() - - Description: Specifies the thread ID of the server thread attached to the current session. - - Return type: bigint - -- pg_stat_get_activity(integer) - - Description: Returns a record about the backend with the specified PID. A record for each active backend in the system is returned if **NULL** is specified. The returned result does not contain the **connection_info** column. The initial user, system administrators and users with the **monadmin** permission can view all data. Common users can only query their own results. - - Example: - - ``` - MogDB=# select * from pg_stat_get_activity(139881386280704); - datid | pid | sessionid | usesysid | application_name | state | query | waiting | xact_start | query_start | - backend_start | state_change | client_addr | client_hostname | client_port | enqueue | query_id | srespool | global_sessionid | unique_sql_id | trace_id - -------+-----------------+-----------+----------+------------------+--------+------------------------------------------------------+---------+-------------------------------+-------------------------------+----- - --------------------------+------------------------------+-------------+-----------------+-------------+---------+-------------------+--------------+------------------+---------------+---------- - 16545 | 139881386280704 | 69 | 10 | gsql | active | select * from pg_stat_get_activity(139881386280704); | f | 2022-01-18 19:43:05.167718+08 | 2022-01-18 19:43:05.167718+08 | 2022 - -01-18 19:42:33.513507+08 | 2022-01-18 19:43:05.16773+08 | | | -1 | | 72620543991624410 | default_pool | 1938253334#69#0 | 3751941862 | - (1 row) - ``` - - Return type: setofrecord - - The following table describes return parameters. - - **Table 6** Return parameter description - - | Return Parameter | Type | Description | - | :--------------- | :----------------------- | :----------------------------------------------------------- | - | datid | oid | OID of the database that the user session connects to in the backend | - | pid | bigint | Backend thread ID | - | sessionid | bigint | Session ID | - | usesysid | oid | OID of the user logged in to the backend | - | application_name | text | Name of the application connected to the backend | - | state | text | Overall status of the backend | - | query | text | Latest query at the backend. If **state** is **active**, this column shows the ongoing query. In all other states, it shows the last query that was executed. | - | waiting | Boolean | Whether the backend is currently waiting on a lock. If yes, the value is **true**. | - | xact_start | timestamp with time zone | Time when current transaction was started (null if no transaction is active).If the current query is the first of its transaction, the value of this column is the same as that of the **query_start** column. | - | query_start | timestamp with time zone | Time when the currently active query was started, or time when the last query was started if **state** is not **active** | - | backend_start | timestamp with time zone | Time when this process was started, that is, when the client connected to the server | - | state_change | timestamp with time zone | Time when **state** was last modified | - | client_addr | inet | IP address of the client connected to the backend. If this column is **NULL**, it indicates either the client is connected via a Unix socket on the server or this is an internal process, such as **AUTOVACUUM**. | - | client_hostname | text | Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This column will be non-null only for IP connections and only when log_hostname is enabled. | - | client_port | integer | TCP port number that the client uses for communication with this backend (**–1** if a Unix socket is used) | - | enqueue | text | Unsupported currently | - | query_id | bigint | ID of a query | - | srespool | name | Name of the resource pool | - | global_sessionid | text | Global session ID | - | unique_sql_id | bigint | Unique SQL statement ID | - | trace_id | text | Driver-specific trace ID, which is associated with an application request | - -- pg_stat_get_activity_with_conninfo(integer) - - Description: Returns a record about the backend with the specified PID. A record for each active backend in the system is returned if **NULL** is specified. The initial user, system administrators and users with the **monadmin** permission can view all data. Common users can only query their own results. - - Return type: setofrecord - - The following table describes return values. - - **Table 7** Return value description - - | Return Value | Return Type | Description | - | :--------------- | :----------------------- | :----------------------------------------------------------- | - | datid | oid | OID of the database that the user session connects to in the backend | - | pid | bigint | Backend thread ID | - | sessionid | bigint | Session ID | - | usesysid | oid | OID of the user logged in to the backend | - | application_name | text | Name of the application connected to the backend | - | state | text | Overall status of the backend | - | query | text | Latest query at the backend. If **state** is **active**, this column shows the ongoing query. In all other states, it shows the last query that was executed. | - | waiting | Boolean | Whether the backend is currently waiting on a lock. If yes, the value is **true**. | - | xact_start | timestamp with time zone | Time when current transaction was started (null if no transaction is active). If the current query is the first of its transaction, the value of this column is the same as that of the **query_start** column. | - | query_start | timestamp with time zone | Time when the currently active query was started, or time when the last query was started if **state** is not **active** | - | backend_start | timestamp with time zone | Time when this process was started, that is, when the client connected to the server | - | state_change | timestamp with time zone | Time when **state** was last modified | - | client_addr | inet | IP address of the client connected to the backend If this column is **NULL**, it indicates either the client is connected via a Unix socket on the server or this is an internal process, such as **AUTOVACUUM**. | - | client_hostname | text | Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This column will be non-null only for IP connections and only when log_hostname is enabled. | - | client_port | integer | TCP port number that the client uses for communication with this backend (**–1** if a Unix socket is used) | - | enqueue | text | Unsupported currently | - | query_id | bigint | ID of a query | - | connection_info | text | A string in JSON format recording the driver type, driver version, driver deployment path, and process owner of the connected database | - | srespool | name | Name of the resource pool | - | global_sessionid | text | Global session ID | - | unique_sql_id | bigint | Unique SQL statement ID | - | trace_id | text | Driver-specific trace ID, which is associated with an application request | - -- pg_user_iostat(text) - - Description: Displays the I/O load management information about the job currently executed by the user. - - Return type: record - - The following table describes return fields. - - | Name | Type | Description | - | :------------- | :--- | :----------------------------------------------------------- | - | userid | oid | User ID | - | min_curr_iops | int4 | Minimum I/O of the current user across database nodes. The IOPS is counted by ones for column storage and by ten thousands for row storage. | - | max_curr_iops | int4 | Maximum I/O of the current user across database nodes. The IOPS is counted by ones for column storage and by ten thousands for row storage. | - | min_peak_iops | int4 | Minimum peak I/O of the current user across database nodes. The IOPS is counted by ones for column storage and by ten thousands for row storage. | - | max_peak_iops | int4 | Maximum peak I/O of the current user across database nodes. The IOPS is counted by ones for column storage and by ten thousands for row storage. | - | io_limits | int4 | **io_limits** set for the resource pool specified by the user. The IOPS is counted by ones for column storage and by ten thousands for row storage. | - | io_priority | text | **io_priority** set for the user. The IOPS is counted by ones for column storage and by ten thousands for row storage. | - | curr_io_limits | int4 | Real-time **io_limits** value when **io_priority** is used to control I/Os | - -- pg_stat_get_function_calls(oid) - - Description: Specifies the number of times the function has been called. - - Return type: bigint - -- pg_stat_get_function_self_time(oid) - - Description: Specifies the time spent in only this function. The time spent on this function calling other functions is excluded. - - Return type: bigint - -- pg_stat_get_backend_idset() - - Description: Sets the number of currently active server processes (from 1 to the number of active server processes). - - Return type: setofinteger - -- pg_stat_get_backend_pid(integer) - - Description: Specifies the ID of the given server thread. - - Return type: bigint - -- pg_stat_get_backend_dbid(integer) - - Description: Specifies the ID of the database connected to the given server process. - - Return type: oid - -- pg_stat_get_backend_userid(integer) - - Description: Specifies the user ID of the given server process. - - Return type: oid - -- pg_stat_get_backend_activity(integer) - - Description: Active command of the given server process, but only if the current user is a system administrator or the same user as that of the session being queried and **track_activities** is on - +--- +title: Statistics Information Functions +summary: Statistics Information Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Statistics Information Functions(1) + +Statistics information functions are divided into the following two categories: functions that access databases, using the OID of each table or index in a database to mark the database for which statistics are generated; functions that access servers, identified by the server process ID, whose value ranges from 1 to the number of currently active servers. + +- pg_stat_get_db_conflict_tablespace(oid) + + Description: Specifies the number of queries canceled due to a conflict between the restored tablespace and the deleted tablespace in the database. + + Return type: bigint + +- pg_control_group_config + + Description: Prints Cgroup configurations on the current node. + + Return type: record + +- pg_stat_get_db_stat_reset_time(oid) + + Description: Specifies the most recent time when database statistics were reset. It is initialized to the system time during the first connection to each database. The reset time is updated when you call **pg_stat_reset** on the database and execute **pg_stat_reset_single_table_counters** against any table or index in it. + + Return type: timestamptz + +- pg_stat_get_function_total_time(oid) + + Description: Specifies the total wall clock time spent in the function, in microseconds. The time spent on this function calling other functions is included. + + Return type: bigint + +- pg_stat_get_xact_tuples_returned(oid) + + Description: Specifies the number of rows read through sequential scans when the parameter is a table in the current transaction or the number of index entries returned when the parameter is an index. + + Return type: bigint + +- pg_lock_status() + + Description: Queries information about locks held by open transactions. All users can execute this function. + + Return type: For details, see **PG_LOCKS** which is obtained by querying this function. + +- pg_stat_get_xact_numscans(oid) + + Description: Specifies the number of sequential scans performed when the parameter is a table in the current transaction or the number of index scans performed when the parameter is an index. + + Return type: bigint + +- pg_stat_get_xact_blocks_fetched(oid) + + Description: Specifies the number of disk block fetch requests for a table or an index in the current transaction. + + Return type: bigint + +- pg_stat_get_xact_blocks_hit(oid) + + Description: Specifies the number of disk block fetch requests for tables or indexes found in cache in the current transaction. + + Return type: bigint + +- pg_stat_get_xact_function_calls(oid) + + Description: Specifies the number of times the function is called in the current transaction. + + Return type: bigint + +- pg_stat_get_xact_function_self_time(oid) + + Description: Specifies the time spent on this function in the current transaction, excluding the time spent on this function internally calling other functions. + + Return type: bigint + +- pg_stat_get_xact_function_total_time(oid) + + Description: Specifies the total wall clock time (in microseconds) spent on the function in the current transaction, including the time spent on this function internally calling other functions. + + Return type: bigint + +- pg_stat_get_wal_senders() + + Description: Queries walsender information on the primary server. + + Return type: setofrecord + + The following table describes return columns. + + **Table 1** Return column description + + | Column | Type | Description | + | :------------------------- | :----------------------- | :------------------------------------------------- | + | pid | bigint | Thread ID of the WAL sender | + | sender_pid | integer | Lightweight thread ID of the WAL sender | + | local_role | text | Type of the primary node | + | peer_role | text | Type of the standby node | + | peer_state | text | Status of the standby node | + | state | text | Status of the WAL sender | + | catchup_start | timestamp with time zone | Startup time of a catchup task | + | catchup_end | timestamp with time zone | End time of a catchup task | + | sender_sent_location | text | Sending position of the primary node | + | sender_write_location | text | Writing position of the primary node | + | sender_flush_location | text | Flushing position of the primary node | + | sender_replay_location | text | Redo position of the primary node | + | receiver_received_location | text | Receiving position of the standby node | + | receiver_write_location | text | Writing position of the standby node | + | receiver_flush_location | text | Flushing position of the standby node | + | receiver_replay_location | text | Redo position of the standby node | + | sync_percent | text | Synchronization percentage | + | sync_state | text | Synchronization status | + | sync_group | text | Group to which the synchronous replication belongs | + | sync_priority | text | Priority of synchronous replication | + | sync_most_available | text | Maximum availability mode | + | channel | text | Channel information of the WAL sender | + +- get_paxos_replication_info() + + Description: Queries the primary/standby replication status in Paxos mode. + + Return type: setofrecord + + The following table describes return columns. + + **Table 2** Return column description + + | Column | Type | Description | + | :-------------------- | :--- | :----------------------------------------------------------- | + | paxos_write_location | text | Location of the Xlog that has been written to the Distribute Consensus Framework (DCF) | + | paxos_commit_location | text | Location of the Xlog agreed in the DCF | + | local_write_location | text | Writing position of a node | + | local_flush_location | text | Flushing position of a node | + | local_replay_location | text | Redo position of a node | + | dcf_replication_info | text | DCF module information of a node | + +- pg_stat_get_stream_replications() + + Description: Queries the primary/standby replication status. + + Return type: setofrecord + + The following table describes return values. + + **Table 3** Return value description + + | Return Parameter | Type | Description | + | :----------------- | :------ | :-------------------- | + | local_role | text | Local role | + | static_connections | integer | Connection statistics | + | db_state | text | Database status | + | detail_information | text | Detailed information | + +- pg_stat_get_db_numbackends(oid) + + Description: Specifies the number of active server processes for a database. + + Return type: integer + +- pg_stat_get_db_xact_commit(oid) + + Description: Specifies the number of transactions committed in a database. + + Return type: bigint + +- pg_stat_get_db_xact_rollback(oid) + + Description: Specifies the number of transactions rolled back in a database. + + Return type: bigint + +- pg_stat_get_db_blocks_fetched(oid) + + Description: Specifies the number of disk blocks fetch requests for a database. + + Return type: bigint + +- pg_stat_get_db_blocks_hit(oid) + + Description: Specifies the number of disk block fetch requests found in cache for a database. + + Return type: bigint + +- pg_stat_get_db_tuples_returned(oid) + + Description: Specifies the number of tuples returned for a database. + + Return type: bigint + +- pg_stat_get_db_tuples_fetched(oid) + + Description: Specifies the number of tuples fetched for a database. + + Return type: bigint + +- pg_stat_get_db_tuples_inserted(oid) + + Description: Specifies the number of tuples inserted in a database. + + Return type: bigint + +- pg_stat_get_db_tuples_updated(oid) + + Description: Specifies the number of tuples updated in a database. + + Return type: bigint + +- pg_stat_get_db_tuples_deleted(oid) + + Description: Specifies the number of tuples deleted in a database. + + Return type: bigint + +- pg_stat_get_db_conflict_lock(oid) + + Description: Specifies the number of lock conflicts in a database. + + Return type: bigint + +- pg_stat_get_db_deadlocks(oid) + + Description: Specifies the number of deadlocks in a database. + + Return type: bigint + +- pg_stat_get_numscans(oid) + + Description: Specifies the number of sequential row scans done if parameters are in a table or the number of index scans done if parameters are in an index. + + Return type: bigint + +- pg_stat_get_role_name(oid) + + Description: Obtains the username based on the user OID. Only users with the **sysadmin** or **monitor admin** permission can access the information. + + Return type: text + + Example: + + ``` + MogDB=# select pg_stat_get_role_name(10); + pg_stat_get_role_name + ----------------------- + aabbcc + (1 row) + ``` + +- pg_stat_get_tuples_returned(oid) + + Description: Specifies the number of sequential row scans done if parameters are in a table or the number of index scans done if parameters are in an index. + + Return type: bigint + +- pg_stat_get_tuples_fetched(oid) + + Description: Specifies the number of table rows fetched by bitmap scans if parameters are in a table or the number of table rows fetched by simple index scans using the index if parameters are in an index. + + Return type: bigint + +- pg_stat_get_tuples_inserted(oid) + + Description: Specifies the number of rows inserted into a table. + + Return type: bigint + +- pg_stat_get_tuples_updated(oid) + + Description: Specifies the number of rows updated in a table. + + Return type: bigint + +- pg_stat_get_tuples_deleted(oid) + + Description: Specifies the number of rows deleted from a table. + + Return type: bigint + +- pg_stat_get_tuples_changed(oid) + + Description: Specifies the total number of inserted, updated, and deleted rows after a table was last analyzed or autoanalyzed. + + Return type: bigint + +- pg_stat_get_tuples_hot_updated(oid) + + Description: Specifies the number of rows hot updated in a table. + + Return type: bigint + +- pg_stat_get_live_tuples(oid) + + Description: Specifies the number of live rows in a table. + + Return type: bigint + +- pg_stat_get_dead_tuples(oid) + + Description: Specifies the number of dead rows in a table. + + Return type: bigint + +- pg_stat_get_blocks_fetched(oid) + + Description: Specifies the number of disk block fetch requests for a table or an index. + + Return type: bigint + +- pg_stat_get_blocks_hit(oid) + + Description: Specifies the number of disk block requests found in cache for a table or an index. + + Return type: bigint + +- pg_stat_get_partition_tuples_inserted(oid) + + Description: Specifies the number of rows in the corresponding table partition. + + Return type: bigint + +- pg_stat_get_partition_tuples_updated(oid) + + Description: Specifies the number of rows that have been updated in the corresponding table partition. + + Return type: bigint + +- pg_stat_get_partition_tuples_deleted(oid) + + Description: Specifies the number of rows deleted from the corresponding table partition. + + Return type: bigint + +- pg_stat_get_partition_tuples_changed(oid) + + Description: Specifies the total number of inserted, updated, and deleted rows after a table partition was last analyzed or autoanalyzed. + + Return type: bigint + +- pg_stat_get_partition_live_tuples(oid) + + Description: Specifies the number of live rows in a partitioned table. + + Return type: bigint + +- pg_stat_get_partition_dead_tuples(oid) + + Description: Specifies the number of dead rows in a partitioned table. + + Return type: bigint + +- pg_stat_get_xact_tuples_fetched(oid) + + Description: Specifies the number of tuple rows scanned in a transaction. + + Return type: bigint + +- pg_stat_get_xact_tuples_inserted(oid) + + Description: Specifies the number of tuple inserted into the active subtransactions related to a table. + + Return type: bigint + +- pg_stat_get_xact_tuples_deleted(oid) + + Description: Specifies the number of deleted tuples in the active subtransactions related to a table. + + Return type: bigint + +- pg_stat_get_xact_tuples_hot_updated(oid) + + Description: Specifies the number of hot updated tuples in the active subtransactions related to a table. + + Return type: bigint + +- pg_stat_get_xact_tuples_updated(oid) + + Description: Specifies the number of updated tuples in the active subtransactions related to a table. + + Return type: bigint + +- pg_stat_get_xact_partition_tuples_inserted(oid) + + Description: Specifies the number of inserted tuples in the active subtransactions related to a table partition. + + Return type: bigint + +- pg_stat_get_xact_partition_tuples_deleted(oid) + + Description: Specifies the number of deleted tuples in the active subtransactions related to a table partition. + + Return type: bigint + +- pg_stat_get_xact_partition_tuples_hot_updated(oid) + + Description: Specifies the number of hot updated tuples in the active subtransactions related to a table partition. + + Return type: bigint + +- pg_stat_get_xact_partition_tuples_updated(oid) + + Description: Specifies the number of updated tuples in the active subtransactions related to a table partition. + + Return type: bigint + +- pg_stat_get_last_vacuum_time(oid) + + Description: Specifies the most recent time when the autovacuum thread is manually started to clear a table. + + Return type: timestamptz + +- pg_stat_get_last_autovacuum_time(oid) + + Description: Specifies the time of the last vacuum initiated by the autovacuum daemon on a table. + + Return type: timestamptz + +- pg_stat_get_vacuum_count(oid) + + Description: Specifies the number of times a table is manually cleared. + + Return type: bigint + +- pg_stat_get_autovacuum_count(oid) + + Description: Specifies the number of times the autovacuum daemon is started to clear a table. + + Return type: bigint + +- pg_stat_get_last_analyze_time(oid) + + Description: Specifies the last time when a table starts to be analyzed manually or by the autovacuum thread. + + Return type: timestamptz + +- pg_stat_get_last_autoanalyze_time(oid) + + Description: Specifies the time when the last analysis initiated by the autovacuum daemon on a table. + + Return type: timestamptz + +- pg_stat_get_analyze_count(oid) + + Description: Specifies the number of times a table is manually analyzed. + + Return type: bigint + +- pg_stat_get_autoanalyze_count(oid) + + Description: Specifies the number of times the autovacuum daemon analyzes a table. + + Return type: bigint + +- pg_total_autovac_tuples(bool,bool) + + Description: Returns tuple records related to the total autovac, such as **nodename**, **nspname**, **relname**, and tuple IUDs. The input parameters specify whether to query **relation** and **local** information, respectively. + + Return type: setofrecord + + The following table describes return parameters. + + **Table 4** Return parameter description + + | Return Parameter | Type | Description | + | :-------------------- | :----- | :------------------------------------------------------ | + | nodename | name | Node name | + | nspname | name | Name of a namespace | + | relname | name | Name of an object, such as a table, an index, or a view | + | partname | name | Partition name | + | n_dead_tuples | bigint | Number of dead rows in a table partition | + | n_live_tuples | bigint | Number of live rows in a table partition | + | changes_since_analyze | bigint | Number of changes generated by ANALYZE | + +- pg_autovac_status(oid) + + Description: Returns autovac information, such as **nodename**, **nspname**, **relname**, **analyze**, **vacuum**, thresholds of **analyze** and **vacuum**, and the number of analyzed or vacuumed tuples. Only users with the **sysadmin** permission can use this function. + + Return type: setofrecord + + The following table describes return parameters. + + **Table 5** Return parameter description + + | Return Parameter | Type | Description | + | :--------------- | :------ | :------------------------------------------------------ | + | nspname | text | Name of a namespace | + | relname | text | Name of an object, such as a table, an index, or a view | + | nodename | text | Node name | + | doanalyze | Boolean | Whether to execute **ANALYZE** | + | anltuples | bigint | Number of ANALYZE tuples | + | anlthresh | bigint | ANALYZE threshold | + | dovacuum | Boolean | Whether to execute **VACUUM** | + | vactuples | bigint | Number of VACUUM tuples | + | vacthresh | bigint | VACUUM threshold | + +- pg_autovac_timeout(oid) + + Description: Returns the number of consecutive timeouts during the autovac operation on a table. If the table information is invalid or the node information is abnormal, **NULL** will be returned. + + Return type: bigint + +- pg_stat_get_last_data_changed_time(oid) + + Description: Returns the time when **INSERT**, **UPDATE**, **DELETE**, or **EXCHANGE**/**TRUNCATE**/**DROP** **PARTITION** was last performed on a table. The data in the **last_data_changed** column of the PG_STAT_ALL_TABLES view is calculated by using this function. The performance of obtaining the last modification time by using the view is poor when the table has a large amount of data. In this case, you are advised to use the function. + + Return type: timestamptz + +- pg_stat_set_last_data_changed_time(oid) + + Description: Manually changes the time when **INSERT**, **UPDATE**, **DELETE**, or **EXCHANGE**/**TRUNCATE**/**DROP** **PARTITION** was last performed. + + Return type: void + +- pg_backend_pid() + + Description: Specifies the thread ID of the server thread attached to the current session. + + Return type: bigint + +- pg_stat_get_activity(integer) + + Description: Returns a record about the backend with the specified PID. A record for each active backend in the system is returned if **NULL** is specified. The returned result does not contain the **connection_info** column. The initial user, system administrators and users with the **monadmin** permission can view all data. Common users can only query their own results. + + Example: + + ``` + MogDB=# select * from pg_stat_get_activity(139881386280704); + datid | pid | sessionid | usesysid | application_name | state | query | waiting | xact_start | query_start | + backend_start | state_change | client_addr | client_hostname | client_port | enqueue | query_id | srespool | global_sessionid | unique_sql_id | trace_id + -------+-----------------+-----------+----------+------------------+--------+------------------------------------------------------+---------+-------------------------------+-------------------------------+----- + --------------------------+------------------------------+-------------+-----------------+-------------+---------+-------------------+--------------+------------------+---------------+---------- + 16545 | 139881386280704 | 69 | 10 | gsql | active | select * from pg_stat_get_activity(139881386280704); | f | 2022-01-18 19:43:05.167718+08 | 2022-01-18 19:43:05.167718+08 | 2022 + -01-18 19:42:33.513507+08 | 2022-01-18 19:43:05.16773+08 | | | -1 | | 72620543991624410 | default_pool | 1938253334#69#0 | 3751941862 | + (1 row) + ``` + + Return type: setofrecord + + The following table describes return parameters. + + **Table 6** Return parameter description + + | Return Parameter | Type | Description | + | :--------------- | :----------------------- | :----------------------------------------------------------- | + | datid | oid | OID of the database that the user session connects to in the backend | + | pid | bigint | Backend thread ID | + | sessionid | bigint | Session ID | + | usesysid | oid | OID of the user logged in to the backend | + | application_name | text | Name of the application connected to the backend | + | state | text | Overall status of the backend | + | query | text | Latest query at the backend. If **state** is **active**, this column shows the ongoing query. In all other states, it shows the last query that was executed. | + | waiting | Boolean | Whether the backend is currently waiting on a lock. If yes, the value is **true**. | + | xact_start | timestamp with time zone | Time when current transaction was started (null if no transaction is active).If the current query is the first of its transaction, the value of this column is the same as that of the **query_start** column. | + | query_start | timestamp with time zone | Time when the currently active query was started, or time when the last query was started if **state** is not **active** | + | backend_start | timestamp with time zone | Time when this process was started, that is, when the client connected to the server | + | state_change | timestamp with time zone | Time when **state** was last modified | + | client_addr | inet | IP address of the client connected to the backend. If this column is **NULL**, it indicates either the client is connected via a Unix socket on the server or this is an internal process, such as **AUTOVACUUM**. | + | client_hostname | text | Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This column will be non-null only for IP connections and only when log_hostname is enabled. | + | client_port | integer | TCP port number that the client uses for communication with this backend (**–1** if a Unix socket is used) | + | enqueue | text | Unsupported currently | + | query_id | bigint | ID of a query | + | srespool | name | Name of the resource pool | + | global_sessionid | text | Global session ID | + | unique_sql_id | bigint | Unique SQL statement ID | + | trace_id | text | Driver-specific trace ID, which is associated with an application request | + +- pg_stat_get_activity_with_conninfo(integer) + + Description: Returns a record about the backend with the specified PID. A record for each active backend in the system is returned if **NULL** is specified. The initial user, system administrators and users with the **monadmin** permission can view all data. Common users can only query their own results. + + Return type: setofrecord + + The following table describes return values. + + **Table 7** Return value description + + | Return Value | Return Type | Description | + | :--------------- | :----------------------- | :----------------------------------------------------------- | + | datid | oid | OID of the database that the user session connects to in the backend | + | pid | bigint | Backend thread ID | + | sessionid | bigint | Session ID | + | usesysid | oid | OID of the user logged in to the backend | + | application_name | text | Name of the application connected to the backend | + | state | text | Overall status of the backend | + | query | text | Latest query at the backend. If **state** is **active**, this column shows the ongoing query. In all other states, it shows the last query that was executed. | + | waiting | Boolean | Whether the backend is currently waiting on a lock. If yes, the value is **true**. | + | xact_start | timestamp with time zone | Time when current transaction was started (null if no transaction is active). If the current query is the first of its transaction, the value of this column is the same as that of the **query_start** column. | + | query_start | timestamp with time zone | Time when the currently active query was started, or time when the last query was started if **state** is not **active** | + | backend_start | timestamp with time zone | Time when this process was started, that is, when the client connected to the server | + | state_change | timestamp with time zone | Time when **state** was last modified | + | client_addr | inet | IP address of the client connected to the backend If this column is **NULL**, it indicates either the client is connected via a Unix socket on the server or this is an internal process, such as **AUTOVACUUM**. | + | client_hostname | text | Host name of the connected client, as reported by a reverse DNS lookup of client_addr. This column will be non-null only for IP connections and only when log_hostname is enabled. | + | client_port | integer | TCP port number that the client uses for communication with this backend (**–1** if a Unix socket is used) | + | enqueue | text | Unsupported currently | + | query_id | bigint | ID of a query | + | connection_info | text | A string in JSON format recording the driver type, driver version, driver deployment path, and process owner of the connected database | + | srespool | name | Name of the resource pool | + | global_sessionid | text | Global session ID | + | unique_sql_id | bigint | Unique SQL statement ID | + | trace_id | text | Driver-specific trace ID, which is associated with an application request | + +- pg_user_iostat(text) + + Description: Displays the I/O load management information about the job currently executed by the user. + + Return type: record + + The following table describes return fields. + + | Name | Type | Description | + | :------------- | :--- | :----------------------------------------------------------- | + | userid | oid | User ID | + | min_curr_iops | int4 | Minimum I/O of the current user across database nodes. The IOPS is counted by ones for column storage and by ten thousands for row storage. | + | max_curr_iops | int4 | Maximum I/O of the current user across database nodes. The IOPS is counted by ones for column storage and by ten thousands for row storage. | + | min_peak_iops | int4 | Minimum peak I/O of the current user across database nodes. The IOPS is counted by ones for column storage and by ten thousands for row storage. | + | max_peak_iops | int4 | Maximum peak I/O of the current user across database nodes. The IOPS is counted by ones for column storage and by ten thousands for row storage. | + | io_limits | int4 | **io_limits** set for the resource pool specified by the user. The IOPS is counted by ones for column storage and by ten thousands for row storage. | + | io_priority | text | **io_priority** set for the user. The IOPS is counted by ones for column storage and by ten thousands for row storage. | + | curr_io_limits | int4 | Real-time **io_limits** value when **io_priority** is used to control I/Os | + +- pg_stat_get_function_calls(oid) + + Description: Specifies the number of times the function has been called. + + Return type: bigint + +- pg_stat_get_function_self_time(oid) + + Description: Specifies the time spent in only this function. The time spent on this function calling other functions is excluded. + + Return type: bigint + +- pg_stat_get_backend_idset() + + Description: Sets the number of currently active server processes (from 1 to the number of active server processes). + + Return type: setofinteger + +- pg_stat_get_backend_pid(integer) + + Description: Specifies the ID of the given server thread. + + Return type: bigint + +- pg_stat_get_backend_dbid(integer) + + Description: Specifies the ID of the database connected to the given server process. + + Return type: oid + +- pg_stat_get_backend_userid(integer) + + Description: Specifies the user ID of the given server process. + + Return type: oid + +- pg_stat_get_backend_activity(integer) + + Description: Active command of the given server process, but only if the current user is a system administrator or the same user as that of the session being queried and **track_activities** is on + Return type: text \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-2.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-2.md index ec61591b..fdd805a9 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-2.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-2.md @@ -1,662 +1,662 @@ ---- -title: Statistics Information Functions -summary: Statistics Information Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Statistics Information Functions (2) - -- pg_stat_get_backend_waiting(integer) - - Description: True if the given server process is waiting for a lock, but only if the current user is a system administrator or the same user as that of the session being queried and **track_activities** is on - - Return type: Boolean - -- pg_stat_get_backend_activity_start(integer) - - Description: Specifies the time when the given server process's currently executing query is started only if the current user is the system administrator or the user of the session being queried and **track_activities** is enabled. - - Return type: timestamp with time zone - -- pg_stat_get_backend_xact_start(integer) - - Description: Specifies the time when the given server process's currently executing transaction is started only if the current user is the system administrator or the user of the session being queried and **track_activities** is enabled. - - Return type: timestamp with time zone - -- pg_stat_get_backend_start(integer) - - Description: Specifies the time when the given server process is started. If the current user is neither the system administrator nor the user of the session being queried, NULL is **returned**. - - Return type: timestamp with time zone - -- pg_stat_get_backend_client_addr(integer) - - Description: Specifies the IP address of the client connected to the given server process. If the connection is over a Unix domain socket, or if the current user is neither a system administrator nor the same user as that of the session being queried, **NULL** will be returned. - - Return type: inet - -- pg_stat_get_backend_client_port(integer) - - Description: Specifies the TCP port number of the client connected to the given server process. If the connection is over a Unix domain socket, **-1** will be returned. If the current user is neither a system administrator nor the same user as that of the session being queried, **NULL** will be returned. - - Return type: integer - -- pg_stat_get_bgwriter_timed_checkpoints() - - Description: Specifies the time when the background writer starts scheduled checkpoints (because the **checkpoint_timeout** time has expired). - - Return type: bigint - -- pg_stat_get_bgwriter_requested_checkpoints() - - Description: Specifies the time when the background writer starts checkpoints based on requests from the backend because **checkpoint_segments** has been exceeded or the **CHECKPOINT** command has been executed. - - Return type: bigint - -- pg_stat_get_bgwriter_buf_written_checkpoints() - - Description: Specifies the number of buffers written by the background writer during checkpoints. - - Return type: bigint - -- pg_stat_get_bgwriter_buf_written_clean() - - Description: Specifies the number of buffers written by the background writer for routine cleaning of dirty pages. - - Return type: bigint - -- pg_stat_get_bgwriter_maxwritten_clean() - - Description: Specifies the time when the background writer stops its cleaning scan because it has written more buffers than specified in the **bgwriter_lru_maxpages** parameter. - - Return type: bigint - -- pg_stat_get_buf_written_backend() - - Description: Specifies the number of buffers written by the backend because they need to allocate a new buffer. - - Return type: bigint - -- pg_stat_get_buf_alloc() - - Description: Specifies the total number of the allocated buffers. - - Return type: bigint - -- pg_stat_clear_snapshot() - - Description: Discards the current statistics snapshot. - - Return type: void - -- pg_stat_reset() - - Description: Resets all statistics counters for the current database to zero (requires system administrator permissions). - - Return type: void - -- pg_stat_reset_shared(text) - - Description: Resets all statistics counters for the current database in each node in a shared cluster to zero (requires system administrator permissions). - - Return type: void - -- pg_stat_reset_single_table_counters(oid) - - Description: Resets statistics for a single table or index in the current database to zero (requires system administrator permissions). - - Return type: void - -- pg_stat_reset_single_function_counters(oid) - - Description: Resets statistics for a single function in the current database to zero (requires system administrator permissions). - - Return type: void - -- pg_stat_session_cu(int, int, int) - - Description: Obtains the compression unit (CU) hit statistics of sessions running on the current node. - - Return type: record - -- pg_stat_get_cu_mem_hit(oid) - - Description: Obtains the number of CU memory hits of a column storage table in the current database of the current node. - - Return type: bigint - -- pg_stat_get_cu_hdd_sync(oid) - - Description: Obtains the times CU is synchronously read from a disk by a column storage table in the current database of the current node. - - Return type: bigint - -- pg_stat_get_cu_hdd_asyn(oid) - - Description: Obtains the times CU is asynchronously read from a disk by a column storage table in the current database of the current node. - - Return type: bigint - -- pg_stat_get_db_cu_mem_hit(oid) - - Description: Obtains the CU memory hit in a database of the current node. - - Return type: bigint - -- pg_stat_get_db_cu_hdd_sync(oid) - - Description: Obtains the times CU is synchronously read from a disk by a database of the current node. - - Return type: bigint - -- fenced_udf_process(integer) - - Description: Shows the number of local UDF Master and Work processes. If the input parameter is set to **1**, the number of Master processes is queried. If the input parameter is set to **2**, the number of Worker processes is queried. If the input parameter is set to **3**, all Worker processes are killed. - - Return type: text - -- total_cpu() - - Description: Obtains the CPU time used by the current node, in jiffies. - - Return type: bigint - -- mot_global_memory_detail() - - Description: Checks the size of the MOT global memory, including data and indexes. - - Return type: record - -- mot_local_memory_detail() - - Description: Checks the size of the MOT local memory, including data and indexes. - - Return type: record - -- mot_session_memory_detail() - - Description: Checks the MOT memory usage of all sessions. - - Return type: record - -- total_memory() - - Description: Obtains the size of the virtual memory used by the current node, in KB. - - Return type: bigint - -- pg_stat_get_db_cu_hdd_asyn(oid) - - Description: Obtains the times CU is asynchronously read from a disk by a database of the current node. - - Return type: bigint - -- pg_stat_bad_block(text, int, int, int, int, int, timestamp with time zone, timestamp with time zone) - - Description: Obtains damage information about pages or CUs after the current node is started. - - Example: select * from pg_stat_bad_block(); - - Return type: record - -- pg_stat_bad_block_clear() - - Description: Deletes the page and CU damage information that is read and recorded on the node (requires system administrator permissions). - - Return type: void - -- gs_respool_exception_info(pool text) - - Description: Queries the query rule of a specified resource pool. - - Return type: record - -- gs_control_group_info(pool text) - - Description: Queries information about Cgroups associated with a resource pool. - - Return type: record - - The command output is as follows: - - | Attribute | Value | Description | - | :-------- | :------------------ | :------------------------------------------------------ | - | name | class_a:workload_a1 | Class name and workload name | - | class | class_a | Class Cgroup name | - | workload | workload_a1 | Workload Cgroup name | - | type | DEFWD | Cgroup type (Top, CLASS, BAKWD, DEFWD, and TSWD) | - | gid | 87 | Cgroup ID | - | shares | 30 | Percentage of CPU resources to those on the parent node | - | limits | 0 | Percentage of CPU cores to those on the parent node | - | rate | 0 | Allocation ratio in Timeshare | - | cpucores | 0-3 | Number of CPU cores | - -- gs_all_control_group_info() - - Description: Collects information about all Cgroups in the database. - - Return type: record - -- gs_get_control_group_info() - - Description: Collects information about all Cgroups. - - Return type: record - -- get_instr_workload_info(integer) - - Description: Obtains the transaction volume and time information on the primary database node. - - Return type: record - - | Attribute | Value | Description | - | :------------------ | :----------- | :----------------------------------------------------------- | - | resourcepool_oid | 10 | OID of the resource pool (the logic is equivalent to the load) | - | commit_counter | 4 | Number of frontend transactions that were committed | - | rollback_counter | 1 | Number of frontend transactions that were rolled back | - | resp_min | 949 | Minimum response time of frontend transactions (unit: μs) | - | resp_max | 201891 | Maximum response time of frontend transactions (unit: μs) | - | resp_avg | 43564 | Average response time of frontend transactions (unit: μs) | - | resp_total | 217822 | Total response time of frontend transactions (unit: μs) | - | bg_commit_counter | 910 | Number of backend transactions that were committed | - | bg_rollback_counter | 0 | Number of backend transactions that were rolled back | - | bg_resp_min | 97 | Minimum response time of backend transactions (unit: μs) | - | bg_resp_max | 678080687 | Maximum response time of backend transactions (unit: μs) | - | bg_resp_avg | 327847884 | Average response time of backend transactions (unit: μs) | - | bg_resp_total | 298341575300 | Total response time of backend transactions (unit: μs) | - -- pv_instance_time() - - Description: Obtains the time consumed in each execution phase on the current node. - - Return type: record - - | Stat_name Attribute | Value | Description | - | :------------------ | :------ | :----------------------------------------------------------- | - | DB_TIME | 1062385 | Total end-to-end wall time consumed by all threads (unit: μs) | - | CPU_TIME | 311777 | Total CPU time consumed by all threads (unit: μs) | - | EXECUTION_TIME | 380037 | Total time consumed on the executor (unit: μs) | - | PARSE_TIME | 6033 | Total time consumed for parsing SQL statements (unit: μs) | - | PLAN_TIME | 173356 | Total time consumed for generating an execution plan (unit: μs) | - | REWRITE_TIME | 2274 | Total time consumed on query rewriting (unit: μs) | - | PL_EXECUTION_TIME | 0 | Total time consumed for executing PL/SQL statements (unit: μs) | - | PL_COMPILATION_TIME | 557 | Total time consumed for SQL compilation (unit: μs) | - | NET_SEND_TIME | 1673 | Total time consumed for sending data over network (unit: μs) | - | DATA_IO_TIME | 426622 | Total time consumed for data read and write (unit: μs) | - -- DBE_PERF.get_global_instance_time() - - Description: Provides the time consumed in each key phase in MogDB. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- get_instr_unique_sql() - - Description: Obtains information about execution statements (normalized SQL statements) on the current node as a user with the **sysadmin** permission. - - Return type: record - -- reset_unique_sql(text, text, bigint) - - Description: Resets information about system execution statements (normalized SQL statements) information as a user with the **sysadmin** permission. The value of the first parameter can be **global** or **local**. **global** indicates that information on all nodes is cleared, and **local** indicates that only information on the current node is cleared. The value of the second parameter can be **ALL**, **BY_USERID**, or **BY_CNID**. **ALL** indicates that all information is cleared. **BY_USERID** indicates that the SQL information of the user specified by **USERID** is cleared. **BY_CNID** indicates that the SQL information related to the primary node of the database in the system is cleared. The third parameter indicates **CNID** and **USERID**. If the second parameter is set to **ALL**, the third parameter does not take effect and can be set to any value. - - Return type: Boolean - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function involves distributed nodes. Currently, MogDB is a centralized database, for which the function of the value **global** is the same as that of the value **local** and the second parameter cannot set to be **BY_CNID**. - -- get_instr_wait_event(NULL) - - Description: Obtains the statistics on wait events of the current node. - - Return type: record - -- get_instr_user_login() - - Description: Obtains the number of user login and logout times on the current node. Only users with the **sysadmin** or **monitor admin** permission can execute this function. - - Return type: record - -- get_instr_rt_percentile(integer) - - Description: Obtains the SQL response time P80 and P95 distribution information of the database. - - Return type: record - -- get_node_stat_reset_time() - - Description: Obtains statistics about reset (restart, primary/standby switchover, and database deletion) time of the current node. - - Return type: record - -- DBE_PERF.get_global_os_runtime() - - Description: Displays the running status of the current operating system. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_os_threads() - - Description: Provides information about the threads under all normal nodes of MogDB. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_summary_workload_sql_count() - - Description: Provides statistics about the number of SELECT, UPDATE, INSERT, DELETE, DDL, DML, and DCL statements of different service loads in MogDB. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_summary_workload_sql_elapse_time() - - Description: Provides statistics about the number of SELECT, UPDATE, INSERT, and DELETE statements and response time information (TOTAL, AVG, MIN, and MAX) for different loads in MogDB. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_workload_transaction() - - Description: Obtains the transaction volume and time information on all nodes of MogDB. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_session_stat() - - Description: Obtains the session status information on all nodes of MogDB. To query this function, you must have the **sysadmin** permission. - - Return type: record - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The status information contains the following 17 items: **commit**, **rollback**, **sql**, **table_scan**, **blocks_fetched**, **physical_read_operation**, **shared_blocks_dirtied**, **local_blocks_dirtied**, **shared_blocks_read**, **local_blocks_read**, **blocks_read_time**, **blocks_write_time**, **sort_imemory**, **sort_idisk**, **cu_mem_hit**, **cu_hdd_sync_read**, and **cu_hdd_asyread**. - -- DBE_PERF.get_global_session_time() - - Description: Provides the time consumed in each key phase of each node in MogDB. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_session_memory() - - Description: Displays statistics about memory usage at the session level on each node in the unit of MB, including all the memory allocated to Postgres and stream threads on DNs for jobs currently executed by users. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_session_memory_detail() - - Description: Displays statistics about thread memory usage on each node by MemoryContext node. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- create_wlm_session_info(int flag) - - Description: Clears top SQL query statement-level statistics recorded in the current memory. Only the administrator can execute this function. - - Return type: int - -- pg_stat_get_wlm_session_info(int flag) - - Description: Obtains top SQL query statement-level statistics recorded in the current memory. If the input parameter is not 0, the information is cleared from the memory. Only users with the **system admin** or **monitor admin** permission can execute this function. - - Return type: record - -- gs_paxos_stat_replication() - - Description: Queries the standby server information on the primary server. Currently, only the centralized DCF mode is supported. - - Return type: setofrecord - - The following table describes return columns. - - | Column | Type | Description | - | ------------------------ | ---- | ------------------------------------------------------------ | - | local_role | text | Role of the node that sends logs | - | peer_role | text | Role of the node that receives logs | - | local_dcf_role | text | DCF role of the node that sends logs | - | peer_dcf_role | text | DCF role of the node that receives logs | - | peer_state | text | Status of the node that receives logs | - | sender_write_location | text | Location in the Xlog buffer where the node that sends logs is written | - | sender_commit_location | text | Consistency point reached for the DCF logs of the node that sends logs | - | sender_flush_location | text | Location in the Xlog disk where the node that sends logs is written | - | sender_replay_location | text | Location where the node that sends logs replays logs | - | receiver_write_location | text | Location in the Xlog buffer where the node that receives logs is written | - | receiver_commit_location | text | Consistency point reached for the DCF logs of the node that receives logs | - | receiver_flush_location | text | Location in the Xlog disk where the node that receives logs is written | - | receiver_replay_location | text | Location where the node that receives logs replays Xlogs | - | sync_percent | text | Synchronization percentage | - | dcf_run_mode | int4 | DCF synchronization mode | - | channel | text | Channel information | - -- gs_wlm_get_resource_pool_info(int) - - Description: Obtains resource usage statistics of all users. The input parameter can be any value of the INT type or be null. - - Return type: record - -- gs_wlm_get_all_user_resource_info() - - Description: Obtains resource usage statistics of all users. - - Return type: record - -- gs_wlm_get_user_info(int) - - Description: Obtains information about all users. The input parameter can be any value of the INT type or be null. Only users with the **sysadmin** permission can execute this function. - - Return type: record - -- gs_wlm_get_workload_records() - - Description: Obtains all job information in dynamic load management. This function is valid only when dynamic load management is enabled. - - Return type: record - -- gs_wlm_readjust_user_space() - - Description: Corrects the storage space usage of all users. Only the administrator can execute this function. - - Return type: record - -- gs_wlm_readjust_user_space_through_username(text name) - - Description: Corrects the storage space usage of a specified user. Common users can use this function to modify only their own usage. Only the administrator can modify the usage of all users. If the value of **name** is **0000**, the usage of all users needs to be modified. - - Return type: record - -- gs_wlm_readjust_user_space_with_reset_flag(text name, boolean isfirst) - - Description: Corrects the storage space usage of a specified user. If the input parameter **isfirst** is set to **true**, statistics are collected from 0. Otherwise, statistics are collected from the previous result. Common users can use this function to modify only their own usage. Only the administrator can modify the usage of all users. If the value of **name** is **0000**, the usage of all users needs to be modified. - - Return type: record - -- gs_wlm_session_respool(bigint) - - Description: Obtains the session resource pool information about all backend threads. The input parameter can be any value of the bigint type or can be null. - - Return type: record - -- gs_wlm_get_session_info() - - Description: This API has been discarded and is unavailable currently. - -- gs_wlm_get_user_session_info() - - Description: This API has been discarded and is unavailable currently. - -- gs_io_wait_status() - - Description: This API does not support single-node systems or centralized systems and is unavailable currently. - -- global_stat_get_hotkeys_info() - - Description: Obtains the statistics on hot keys in the entire database instance. This API does not support single-node systems or centralized systems and is unavailable currently. - -- global_stat_clean_hotkeys() - - Description: Clears statistics on hot keys in the entire database instance. This API does not support single-node systems or centralized systems and is unavailable currently. - -- DBE_PERF.get_global_session_stat_activity() - - Description: Displays information about threads that are running on each node in MogDB. To query this function, you must have the **monitoradmin** permission. - - Return type: record - -- DBE_PERF.get_global_thread_wait_status() - - Description: Displays the block waiting status of backend threads and auxiliary threads on all nodes. To query this function, you must have the **sysadmin** or **monitoradmin** permission. - - Return type: record - -- DBE_PERF.get_global_operator_history_table() - - Description: Displays the operator-related records (persistent) generated after jobs are executed on the primary database node of the current user. To query this function, you must have the **sysadmin** and **monitoradmin** permissions. - - Return type: record - -- DBE_PERF.get_global_operator_history() - - Description: Displays the operator-related records generated after jobs are executed on the primary database node of the current user. To query this function, you must have the **sysadmin** and **monitoradmin** permissions. - - Return type: record - -- DBE_PERF.get_global_operator_runtime() - - Description: Displays real-time operator-related records of jobs executed on the primary database node of the current user. To query this function, you must have the **sysadmin** and **monitoradmin** permissions. - - Return type: record - -- DBE_PERF.get_global_statement_complex_history() - - Description: Displays the historical records of complex queries on the primary database node of the current user. To query this function, you must have the **monitoradmin** permission. - - Return type: record - -- DBE_PERF.get_global_statement_complex_history_table() - - Description: Displays the historical records (persistent) of complex queries on the primary database node of the current user. To query this function, you must have the **monitoradmin** permission. - - Return type: record - -- DBE_PERF.get_global_statement_complex_runtime() - - Description: Displays the real-time information of complex queries on the primary database node of the current user. To query this function, you must have the **sysadmin** or **monadmin** permission. - - Return type: record - -- DBE_PERF.get_global_memory_node_detail() - - Description: Displays the memory usage of a certain database on all nodes. To query this function, you must have the **monitoradmin** permission. - - Return type: record - -- DBE_PERF.get_global_shared_memory_detail() - - Description: Displays the usage information about all the shared memory contexts of all nodes. To query this function, you must have the **monitoradmin** permission. - - Return type: record - -- DBE_PERF.get_global_statio_all_indexes() - - Description: Displays statistics about each index displayed in a row in the current database, showing I/O statistics about accesses to that specific index. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_summary_stat_all_tables() - - Description: Displays statistics about a row in each table (including the TOAST table) on each node. - - Return type: record - -- DBE_PERF.get_global_stat_all_tables() - - Description: Displays statistics about a row in each table (including the TOAST table) on each node. - - Return type: record - -- DBE_PERF.get_local_toastname_and_toastindexname() - - Description: Provides the mapping between the name and index of the local TOAST table and its associated table. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_summary_statio_all_indexes() - - Description: Collects statistics about each index displayed in a row in the current databases of all nodes and displays the I/O statistics of a specific index. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_statio_all_sequences() - - Description: Provides I/O status information about all sequences in the namespace. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_statio_all_tables() - - Description: Displays the I/O statistics about each table in databases on each node. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_summary_statio_all_tables() - - Description: Collects I/O statistics about each table in databases in MogDB. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_local_toast_relation() - - Description: Provides the mapping between the name of the local TOAST table and its associated table. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_statio_sys_indexes() - - Description: Displays the I/O status information about all system catalog indexes in namespaces on each node. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_summary_statio_sys_indexes() - - Description: Collects the I/O status information about all system catalog indexes in namespaces on each node. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_statio_sys_sequences() - - Description: Provides the I/O status information about all the system sequences in the namespace. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_statio_sys_tables() - - Description: Provides I/O status information about all system catalogs in namespaces on each node. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_summary_statio_sys_tables() - - Description: Displays the I/O status information of all system catalogs in the namespace in MogDB. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_statio_user_indexes() - - Description: Displays the I/O status information about all user relationship table indexes in namespaces on each node. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_summary_statio_user_indexes() - - Description: Displays the I/O status information about all user relationship table indexes in namespaces in MogDB. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_statio_user_sequences() - - Description: Displays the I/O status information about all user sequences in the namespace of each node. To query this function, you must have the **sysadmin** permission. - - Return type: record - -- DBE_PERF.get_global_statio_user_tables() - - Description: Displays the I/O status information about all user relationship tables in namespaces on each node. To query this function, you must have the **sysadmin** permission. - +--- +title: Statistics Information Functions +summary: Statistics Information Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Statistics Information Functions (2) + +- pg_stat_get_backend_waiting(integer) + + Description: True if the given server process is waiting for a lock, but only if the current user is a system administrator or the same user as that of the session being queried and **track_activities** is on + + Return type: Boolean + +- pg_stat_get_backend_activity_start(integer) + + Description: Specifies the time when the given server process's currently executing query is started only if the current user is the system administrator or the user of the session being queried and **track_activities** is enabled. + + Return type: timestamp with time zone + +- pg_stat_get_backend_xact_start(integer) + + Description: Specifies the time when the given server process's currently executing transaction is started only if the current user is the system administrator or the user of the session being queried and **track_activities** is enabled. + + Return type: timestamp with time zone + +- pg_stat_get_backend_start(integer) + + Description: Specifies the time when the given server process is started. If the current user is neither the system administrator nor the user of the session being queried, NULL is **returned**. + + Return type: timestamp with time zone + +- pg_stat_get_backend_client_addr(integer) + + Description: Specifies the IP address of the client connected to the given server process. If the connection is over a Unix domain socket, or if the current user is neither a system administrator nor the same user as that of the session being queried, **NULL** will be returned. + + Return type: inet + +- pg_stat_get_backend_client_port(integer) + + Description: Specifies the TCP port number of the client connected to the given server process. If the connection is over a Unix domain socket, **-1** will be returned. If the current user is neither a system administrator nor the same user as that of the session being queried, **NULL** will be returned. + + Return type: integer + +- pg_stat_get_bgwriter_timed_checkpoints() + + Description: Specifies the time when the background writer starts scheduled checkpoints (because the **checkpoint_timeout** time has expired). + + Return type: bigint + +- pg_stat_get_bgwriter_requested_checkpoints() + + Description: Specifies the time when the background writer starts checkpoints based on requests from the backend because **checkpoint_segments** has been exceeded or the **CHECKPOINT** command has been executed. + + Return type: bigint + +- pg_stat_get_bgwriter_buf_written_checkpoints() + + Description: Specifies the number of buffers written by the background writer during checkpoints. + + Return type: bigint + +- pg_stat_get_bgwriter_buf_written_clean() + + Description: Specifies the number of buffers written by the background writer for routine cleaning of dirty pages. + + Return type: bigint + +- pg_stat_get_bgwriter_maxwritten_clean() + + Description: Specifies the time when the background writer stops its cleaning scan because it has written more buffers than specified in the **bgwriter_lru_maxpages** parameter. + + Return type: bigint + +- pg_stat_get_buf_written_backend() + + Description: Specifies the number of buffers written by the backend because they need to allocate a new buffer. + + Return type: bigint + +- pg_stat_get_buf_alloc() + + Description: Specifies the total number of the allocated buffers. + + Return type: bigint + +- pg_stat_clear_snapshot() + + Description: Discards the current statistics snapshot. + + Return type: void + +- pg_stat_reset() + + Description: Resets all statistics counters for the current database to zero (requires system administrator permissions). + + Return type: void + +- pg_stat_reset_shared(text) + + Description: Resets all statistics counters for the current database in each node in a shared cluster to zero (requires system administrator permissions). + + Return type: void + +- pg_stat_reset_single_table_counters(oid) + + Description: Resets statistics for a single table or index in the current database to zero (requires system administrator permissions). + + Return type: void + +- pg_stat_reset_single_function_counters(oid) + + Description: Resets statistics for a single function in the current database to zero (requires system administrator permissions). + + Return type: void + +- pg_stat_session_cu(int, int, int) + + Description: Obtains the compression unit (CU) hit statistics of sessions running on the current node. + + Return type: record + +- pg_stat_get_cu_mem_hit(oid) + + Description: Obtains the number of CU memory hits of a column storage table in the current database of the current node. + + Return type: bigint + +- pg_stat_get_cu_hdd_sync(oid) + + Description: Obtains the times CU is synchronously read from a disk by a column storage table in the current database of the current node. + + Return type: bigint + +- pg_stat_get_cu_hdd_asyn(oid) + + Description: Obtains the times CU is asynchronously read from a disk by a column storage table in the current database of the current node. + + Return type: bigint + +- pg_stat_get_db_cu_mem_hit(oid) + + Description: Obtains the CU memory hit in a database of the current node. + + Return type: bigint + +- pg_stat_get_db_cu_hdd_sync(oid) + + Description: Obtains the times CU is synchronously read from a disk by a database of the current node. + + Return type: bigint + +- fenced_udf_process(integer) + + Description: Shows the number of local UDF Master and Work processes. If the input parameter is set to **1**, the number of Master processes is queried. If the input parameter is set to **2**, the number of Worker processes is queried. If the input parameter is set to **3**, all Worker processes are killed. + + Return type: text + +- total_cpu() + + Description: Obtains the CPU time used by the current node, in jiffies. + + Return type: bigint + +- mot_global_memory_detail() + + Description: Checks the size of the MOT global memory, including data and indexes. + + Return type: record + +- mot_local_memory_detail() + + Description: Checks the size of the MOT local memory, including data and indexes. + + Return type: record + +- mot_session_memory_detail() + + Description: Checks the MOT memory usage of all sessions. + + Return type: record + +- total_memory() + + Description: Obtains the size of the virtual memory used by the current node, in KB. + + Return type: bigint + +- pg_stat_get_db_cu_hdd_asyn(oid) + + Description: Obtains the times CU is asynchronously read from a disk by a database of the current node. + + Return type: bigint + +- pg_stat_bad_block(text, int, int, int, int, int, timestamp with time zone, timestamp with time zone) + + Description: Obtains damage information about pages or CUs after the current node is started. + + Example: select * from pg_stat_bad_block(); + + Return type: record + +- pg_stat_bad_block_clear() + + Description: Deletes the page and CU damage information that is read and recorded on the node (requires system administrator permissions). + + Return type: void + +- gs_respool_exception_info(pool text) + + Description: Queries the query rule of a specified resource pool. + + Return type: record + +- gs_control_group_info(pool text) + + Description: Queries information about Cgroups associated with a resource pool. + + Return type: record + + The command output is as follows: + + | Attribute | Value | Description | + | :-------- | :------------------ | :------------------------------------------------------ | + | name | class_a:workload_a1 | Class name and workload name | + | class | class_a | Class Cgroup name | + | workload | workload_a1 | Workload Cgroup name | + | type | DEFWD | Cgroup type (Top, CLASS, BAKWD, DEFWD, and TSWD) | + | gid | 87 | Cgroup ID | + | shares | 30 | Percentage of CPU resources to those on the parent node | + | limits | 0 | Percentage of CPU cores to those on the parent node | + | rate | 0 | Allocation ratio in Timeshare | + | cpucores | 0-3 | Number of CPU cores | + +- gs_all_control_group_info() + + Description: Collects information about all Cgroups in the database. + + Return type: record + +- gs_get_control_group_info() + + Description: Collects information about all Cgroups. + + Return type: record + +- get_instr_workload_info(integer) + + Description: Obtains the transaction volume and time information on the primary database node. + + Return type: record + + | Attribute | Value | Description | + | :------------------ | :----------- | :----------------------------------------------------------- | + | resourcepool_oid | 10 | OID of the resource pool (the logic is equivalent to the load) | + | commit_counter | 4 | Number of frontend transactions that were committed | + | rollback_counter | 1 | Number of frontend transactions that were rolled back | + | resp_min | 949 | Minimum response time of frontend transactions (unit: μs) | + | resp_max | 201891 | Maximum response time of frontend transactions (unit: μs) | + | resp_avg | 43564 | Average response time of frontend transactions (unit: μs) | + | resp_total | 217822 | Total response time of frontend transactions (unit: μs) | + | bg_commit_counter | 910 | Number of backend transactions that were committed | + | bg_rollback_counter | 0 | Number of backend transactions that were rolled back | + | bg_resp_min | 97 | Minimum response time of backend transactions (unit: μs) | + | bg_resp_max | 678080687 | Maximum response time of backend transactions (unit: μs) | + | bg_resp_avg | 327847884 | Average response time of backend transactions (unit: μs) | + | bg_resp_total | 298341575300 | Total response time of backend transactions (unit: μs) | + +- pv_instance_time() + + Description: Obtains the time consumed in each execution phase on the current node. + + Return type: record + + | Stat_name Attribute | Value | Description | + | :------------------ | :------ | :----------------------------------------------------------- | + | DB_TIME | 1062385 | Total end-to-end wall time consumed by all threads (unit: μs) | + | CPU_TIME | 311777 | Total CPU time consumed by all threads (unit: μs) | + | EXECUTION_TIME | 380037 | Total time consumed on the executor (unit: μs) | + | PARSE_TIME | 6033 | Total time consumed for parsing SQL statements (unit: μs) | + | PLAN_TIME | 173356 | Total time consumed for generating an execution plan (unit: μs) | + | REWRITE_TIME | 2274 | Total time consumed on query rewriting (unit: μs) | + | PL_EXECUTION_TIME | 0 | Total time consumed for executing PL/SQL statements (unit: μs) | + | PL_COMPILATION_TIME | 557 | Total time consumed for SQL compilation (unit: μs) | + | NET_SEND_TIME | 1673 | Total time consumed for sending data over network (unit: μs) | + | DATA_IO_TIME | 426622 | Total time consumed for data read and write (unit: μs) | + +- DBE_PERF.get_global_instance_time() + + Description: Provides the time consumed in each key phase in MogDB. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- get_instr_unique_sql() + + Description: Obtains information about execution statements (normalized SQL statements) on the current node as a user with the **sysadmin** permission. + + Return type: record + +- reset_unique_sql(text, text, bigint) + + Description: Resets information about system execution statements (normalized SQL statements) information as a user with the **sysadmin** permission. The value of the first parameter can be **global** or **local**. **global** indicates that information on all nodes is cleared, and **local** indicates that only information on the current node is cleared. The value of the second parameter can be **ALL**, **BY_USERID**, or **BY_CNID**. **ALL** indicates that all information is cleared. **BY_USERID** indicates that the SQL information of the user specified by **USERID** is cleared. **BY_CNID** indicates that the SQL information related to the primary node of the database in the system is cleared. The third parameter indicates **CNID** and **USERID**. If the second parameter is set to **ALL**, the third parameter does not take effect and can be set to any value. + + Return type: Boolean + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This function involves distributed nodes. Currently, MogDB is a centralized database, for which the function of the value **global** is the same as that of the value **local** and the second parameter cannot set to be **BY_CNID**. + +- get_instr_wait_event(NULL) + + Description: Obtains the statistics on wait events of the current node. + + Return type: record + +- get_instr_user_login() + + Description: Obtains the number of user login and logout times on the current node. Only users with the **sysadmin** or **monitor admin** permission can execute this function. + + Return type: record + +- get_instr_rt_percentile(integer) + + Description: Obtains the SQL response time P80 and P95 distribution information of the database. + + Return type: record + +- get_node_stat_reset_time() + + Description: Obtains statistics about reset (restart, primary/standby switchover, and database deletion) time of the current node. + + Return type: record + +- DBE_PERF.get_global_os_runtime() + + Description: Displays the running status of the current operating system. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_os_threads() + + Description: Provides information about the threads under all normal nodes of MogDB. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_summary_workload_sql_count() + + Description: Provides statistics about the number of SELECT, UPDATE, INSERT, DELETE, DDL, DML, and DCL statements of different service loads in MogDB. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_summary_workload_sql_elapse_time() + + Description: Provides statistics about the number of SELECT, UPDATE, INSERT, and DELETE statements and response time information (TOTAL, AVG, MIN, and MAX) for different loads in MogDB. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_workload_transaction() + + Description: Obtains the transaction volume and time information on all nodes of MogDB. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_session_stat() + + Description: Obtains the session status information on all nodes of MogDB. To query this function, you must have the **sysadmin** permission. + + Return type: record + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The status information contains the following 17 items: **commit**, **rollback**, **sql**, **table_scan**, **blocks_fetched**, **physical_read_operation**, **shared_blocks_dirtied**, **local_blocks_dirtied**, **shared_blocks_read**, **local_blocks_read**, **blocks_read_time**, **blocks_write_time**, **sort_imemory**, **sort_idisk**, **cu_mem_hit**, **cu_hdd_sync_read**, and **cu_hdd_asyread**. + +- DBE_PERF.get_global_session_time() + + Description: Provides the time consumed in each key phase of each node in MogDB. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_session_memory() + + Description: Displays statistics about memory usage at the session level on each node in the unit of MB, including all the memory allocated to Postgres and stream threads on DNs for jobs currently executed by users. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_session_memory_detail() + + Description: Displays statistics about thread memory usage on each node by MemoryContext node. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- create_wlm_session_info(int flag) + + Description: Clears top SQL query statement-level statistics recorded in the current memory. Only the administrator can execute this function. + + Return type: int + +- pg_stat_get_wlm_session_info(int flag) + + Description: Obtains top SQL query statement-level statistics recorded in the current memory. If the input parameter is not 0, the information is cleared from the memory. Only users with the **system admin** or **monitor admin** permission can execute this function. + + Return type: record + +- gs_paxos_stat_replication() + + Description: Queries the standby server information on the primary server. Currently, only the centralized DCF mode is supported. + + Return type: setofrecord + + The following table describes return columns. + + | Column | Type | Description | + | ------------------------ | ---- | ------------------------------------------------------------ | + | local_role | text | Role of the node that sends logs | + | peer_role | text | Role of the node that receives logs | + | local_dcf_role | text | DCF role of the node that sends logs | + | peer_dcf_role | text | DCF role of the node that receives logs | + | peer_state | text | Status of the node that receives logs | + | sender_write_location | text | Location in the Xlog buffer where the node that sends logs is written | + | sender_commit_location | text | Consistency point reached for the DCF logs of the node that sends logs | + | sender_flush_location | text | Location in the Xlog disk where the node that sends logs is written | + | sender_replay_location | text | Location where the node that sends logs replays logs | + | receiver_write_location | text | Location in the Xlog buffer where the node that receives logs is written | + | receiver_commit_location | text | Consistency point reached for the DCF logs of the node that receives logs | + | receiver_flush_location | text | Location in the Xlog disk where the node that receives logs is written | + | receiver_replay_location | text | Location where the node that receives logs replays Xlogs | + | sync_percent | text | Synchronization percentage | + | dcf_run_mode | int4 | DCF synchronization mode | + | channel | text | Channel information | + +- gs_wlm_get_resource_pool_info(int) + + Description: Obtains resource usage statistics of all users. The input parameter can be any value of the INT type or be null. + + Return type: record + +- gs_wlm_get_all_user_resource_info() + + Description: Obtains resource usage statistics of all users. + + Return type: record + +- gs_wlm_get_user_info(int) + + Description: Obtains information about all users. The input parameter can be any value of the INT type or be null. Only users with the **sysadmin** permission can execute this function. + + Return type: record + +- gs_wlm_get_workload_records() + + Description: Obtains all job information in dynamic load management. This function is valid only when dynamic load management is enabled. + + Return type: record + +- gs_wlm_readjust_user_space() + + Description: Corrects the storage space usage of all users. Only the administrator can execute this function. + + Return type: record + +- gs_wlm_readjust_user_space_through_username(text name) + + Description: Corrects the storage space usage of a specified user. Common users can use this function to modify only their own usage. Only the administrator can modify the usage of all users. If the value of **name** is **0000**, the usage of all users needs to be modified. + + Return type: record + +- gs_wlm_readjust_user_space_with_reset_flag(text name, boolean isfirst) + + Description: Corrects the storage space usage of a specified user. If the input parameter **isfirst** is set to **true**, statistics are collected from 0. Otherwise, statistics are collected from the previous result. Common users can use this function to modify only their own usage. Only the administrator can modify the usage of all users. If the value of **name** is **0000**, the usage of all users needs to be modified. + + Return type: record + +- gs_wlm_session_respool(bigint) + + Description: Obtains the session resource pool information about all backend threads. The input parameter can be any value of the bigint type or can be null. + + Return type: record + +- gs_wlm_get_session_info() + + Description: This API has been discarded and is unavailable currently. + +- gs_wlm_get_user_session_info() + + Description: This API has been discarded and is unavailable currently. + +- gs_io_wait_status() + + Description: This API does not support single-node systems or centralized systems and is unavailable currently. + +- global_stat_get_hotkeys_info() + + Description: Obtains the statistics on hot keys in the entire database instance. This API does not support single-node systems or centralized systems and is unavailable currently. + +- global_stat_clean_hotkeys() + + Description: Clears statistics on hot keys in the entire database instance. This API does not support single-node systems or centralized systems and is unavailable currently. + +- DBE_PERF.get_global_session_stat_activity() + + Description: Displays information about threads that are running on each node in MogDB. To query this function, you must have the **monitoradmin** permission. + + Return type: record + +- DBE_PERF.get_global_thread_wait_status() + + Description: Displays the block waiting status of backend threads and auxiliary threads on all nodes. To query this function, you must have the **sysadmin** or **monitoradmin** permission. + + Return type: record + +- DBE_PERF.get_global_operator_history_table() + + Description: Displays the operator-related records (persistent) generated after jobs are executed on the primary database node of the current user. To query this function, you must have the **sysadmin** and **monitoradmin** permissions. + + Return type: record + +- DBE_PERF.get_global_operator_history() + + Description: Displays the operator-related records generated after jobs are executed on the primary database node of the current user. To query this function, you must have the **sysadmin** and **monitoradmin** permissions. + + Return type: record + +- DBE_PERF.get_global_operator_runtime() + + Description: Displays real-time operator-related records of jobs executed on the primary database node of the current user. To query this function, you must have the **sysadmin** and **monitoradmin** permissions. + + Return type: record + +- DBE_PERF.get_global_statement_complex_history() + + Description: Displays the historical records of complex queries on the primary database node of the current user. To query this function, you must have the **monitoradmin** permission. + + Return type: record + +- DBE_PERF.get_global_statement_complex_history_table() + + Description: Displays the historical records (persistent) of complex queries on the primary database node of the current user. To query this function, you must have the **monitoradmin** permission. + + Return type: record + +- DBE_PERF.get_global_statement_complex_runtime() + + Description: Displays the real-time information of complex queries on the primary database node of the current user. To query this function, you must have the **sysadmin** or **monadmin** permission. + + Return type: record + +- DBE_PERF.get_global_memory_node_detail() + + Description: Displays the memory usage of a certain database on all nodes. To query this function, you must have the **monitoradmin** permission. + + Return type: record + +- DBE_PERF.get_global_shared_memory_detail() + + Description: Displays the usage information about all the shared memory contexts of all nodes. To query this function, you must have the **monitoradmin** permission. + + Return type: record + +- DBE_PERF.get_global_statio_all_indexes() + + Description: Displays statistics about each index displayed in a row in the current database, showing I/O statistics about accesses to that specific index. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_summary_stat_all_tables() + + Description: Displays statistics about a row in each table (including the TOAST table) on each node. + + Return type: record + +- DBE_PERF.get_global_stat_all_tables() + + Description: Displays statistics about a row in each table (including the TOAST table) on each node. + + Return type: record + +- DBE_PERF.get_local_toastname_and_toastindexname() + + Description: Provides the mapping between the name and index of the local TOAST table and its associated table. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_summary_statio_all_indexes() + + Description: Collects statistics about each index displayed in a row in the current databases of all nodes and displays the I/O statistics of a specific index. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_statio_all_sequences() + + Description: Provides I/O status information about all sequences in the namespace. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_statio_all_tables() + + Description: Displays the I/O statistics about each table in databases on each node. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_summary_statio_all_tables() + + Description: Collects I/O statistics about each table in databases in MogDB. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_local_toast_relation() + + Description: Provides the mapping between the name of the local TOAST table and its associated table. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_statio_sys_indexes() + + Description: Displays the I/O status information about all system catalog indexes in namespaces on each node. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_summary_statio_sys_indexes() + + Description: Collects the I/O status information about all system catalog indexes in namespaces on each node. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_statio_sys_sequences() + + Description: Provides the I/O status information about all the system sequences in the namespace. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_statio_sys_tables() + + Description: Provides I/O status information about all system catalogs in namespaces on each node. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_summary_statio_sys_tables() + + Description: Displays the I/O status information of all system catalogs in the namespace in MogDB. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_statio_user_indexes() + + Description: Displays the I/O status information about all user relationship table indexes in namespaces on each node. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_summary_statio_user_indexes() + + Description: Displays the I/O status information about all user relationship table indexes in namespaces in MogDB. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_statio_user_sequences() + + Description: Displays the I/O status information about all user sequences in the namespace of each node. To query this function, you must have the **sysadmin** permission. + + Return type: record + +- DBE_PERF.get_global_statio_user_tables() + + Description: Displays the I/O status information about all user relationship tables in namespaces on each node. To query this function, you must have the **sysadmin** permission. + Return type: record \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions.md index 161fb4f3..8e16c99c 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions.md @@ -1,14 +1,14 @@ ---- -title: Statistics Information Functions -summary: Statistics Information Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Statistics Information Functions - -- **[Statistics Information Functions (1)](statistics-information-functions-1.md)** - -- **[Statistics Information Functions (2)](statistics-information-functions-2.md)** - +--- +title: Statistics Information Functions +summary: Statistics Information Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Statistics Information Functions + +- **[Statistics Information Functions (1)](statistics-information-functions-1.md)** + +- **[Statistics Information Functions (2)](statistics-information-functions-2.md)** + - **[Statistics Information Functions (3)](statistics-information-functions-3.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-information-functions/guc-value-inquiry-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-information-functions/guc-value-inquiry-functions.md deleted file mode 100644 index fae1561e..00000000 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-information-functions/guc-value-inquiry-functions.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: GUC Value Inquiry Functions -summary: GUC Value Inquiry Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# GUC Value Inquiry Functions - -- show(text) - - Description: returns the current setting of this GUC variable. - - Return value type: SETOF RECORD - - Example: - - ```sql - MogDB=# select SHOW('enable_custom_parser'); - show - ---------------------------------------------------- - name | set | description - ---------------------+-----+---------------------- - enable_custom_parser | off | Enables custom parser - (3 rows) - ``` - -- show_parameter(text) - - Description: Returns the value currently set for this GUC variable. - - Return value type: SETOF RECORD - - Example: - - ```sql - MogDB=# select show_parameter('enable_default'); - show_parameter - - -------------------------------------------------------------------------------------- - name | set | description - ---------------------------------+-----+--------------------------------------------- - enable_default_cfunc_libpath | on | Enable check for c function lib path. - enable_default_compression_table | off | Enables create table default compression tab - le or not. - enable_default_ustore_table | off | Creates all user-defined tables with orienta - tion inplace - (5 rows) - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-information-functions/system-information-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-information-functions/system-information-functions.md index d44cbcdc..f3b2c086 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-information-functions/system-information-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-information-functions/system-information-functions.md @@ -1,17 +1,16 @@ ---- -title: System Information Functions -summary: System Information Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# System Information Functions - -- **[Session Information Functions](session-information-functions.md)** -- **[Access Privilege Inquiry Functions](access-privilege-inquiry-function.md)** -- **[Schema Visibility Inquiry Functions](schema-visibility-inquiry-functions.md)** -- **[System Catalog Information Functions](system-catalog-information-functions.md)** -- **[Comment Information Functions](comment-information-functions.md)** -- **[Transaction IDs and Snapshots](transaction-ids-and-snapshots.md)** -- **[GUC Value Inquiry Functions](guc-value-inquiry-functions.md)** +--- +title: System Information Functions +summary: System Information Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# System Information Functions + +- **[Session Information Functions](session-information-functions.md)** +- **[Access Privilege Inquiry Functions](access-privilege-inquiry-function.md)** +- **[Schema Visibility Inquiry Functions](schema-visibility-inquiry-functions.md)** +- **[System Catalog Information Functions](system-catalog-information-functions.md)** +- **[Comment Information Functions](comment-information-functions.md)** +- **[Transaction IDs and Snapshots](transaction-ids-and-snapshots.md)** - **[Other Function](other-function.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/advisory-lock-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/advisory-lock-functions.md index 7f00229a..c5e13b63 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/advisory-lock-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/advisory-lock-functions.md @@ -1,244 +1,244 @@ ---- -title: Advisory Lock Functions -summary: Advisory Lock Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Advisory Lock Functions - -Advisory lock functions manage advisory locks. - -- pg_advisory_lock(key bigint) - - Description: Obtains an exclusive session-level advisory lock. - - Return type: void - - Note:**pg_advisory_lock** locks resources defined by an application. The resources can be identified using a 64-bit or two nonoverlapped 32-bit key values. If another session locks the resources, the function blocks the resources until they can be used. The lock is exclusive. Multiple locking requests are pushed into the stack. Therefore, if the same resource is locked three times, it must be unlocked three times so that it is released to another session. - -- pg_advisory_lock(key1 int, key2 int) - - Description: Obtains an exclusive session-level advisory lock. - - Return type: void - - Note: Only users with the **sysadmin** permission can add session-level exclusive advisory locks to the key-value pair (65535, 65535). - -- pg_advisory_lock(int4, int4, Name) - - Description: Obtains the exclusive advisory lock of a specified database. - - Return type: void - -- pg_advisory_lock_shared(key bigint) - - Description: Obtains a shared session-level advisory lock. - - Return type: void - -- pg_advisory_lock_shared(key1 int, key2 int) - - Description: Obtains a shared session-level advisory lock. - - Return type: void - - Note:**pg_advisory_lock_shared** works in the same way as **pg_advisory_lock**, except the lock can be shared with other sessions requesting shared locks. Only would-be exclusive lockers are locked out. - -- pg_advisory_unlock(key bigint) - - Description: Releases an exclusive session-level advisory lock. - - Return type: Boolean - -- pg_advisory_unlock(key1 int, key2 int) - - Description: Releases an exclusive session-level advisory lock. - - Return type: Boolean - - Note:**pg_advisory_unlock** releases the obtained exclusive advisory lock. If the release is successful, the function returns **true**. If the lock was not held, it will return **false**. In addition, a SQL warning will be reported by the server. - -- pg_advisory_unlock(int4, int4, Name) - - Description: Releases the exclusive advisory lock of a specified database. - - Return type: Boolean - - Note: If the release is successful, **true** is returned. If no lock is held, **false** is returned. - -- pg_advisory_unlock_shared(key bigint) - - Description: Releases a shared session-level advisory lock. - - Return type: Boolean - -- pg_advisory_unlock_shared(key1 int, key2 int) - - Description: Releases a shared session-level advisory lock. - - Return type: Boolean - - Note:**pg_advisory_unlock_shared** works in the same way as **pg_advisory_unlock**, except it releases a shared session-level advisory lock. - -- pg_advisory_unlock_all() - - Description: Releases all advisory locks owned by the current session. - - Return type: void - - Note:**pg_advisory_unlock_all** releases all advisory locks owned by the current session. The function is implicitly invoked when the session ends even if the client is abnormally disconnected. - -- pg_advisory_xact_lock(key bigint) - - Description: Obtains an exclusive transaction-level advisory lock. - - Return type: void - -- pg_advisory_xact_lock(key1 int, key2 int) - - Description: Obtains an exclusive transaction-level advisory lock. - - Return type: void - - Note:**pg_advisory_xact_lock** works in the same way as **pg_advisory_lock**, except the lock is automatically released at the end of the current transaction and cannot be released explicitly. Only users with the **sysadmin** permission can add transaction-level exclusive advisory locks to the key-value pair (65535, 65535). - -- pg_advisory_xact_lock_shared(key bigint) - - Description: Obtains a shared transaction-level advisory lock. - - Return type: void - -- pg_advisory_xact_lock_shared(key1 int, key2 int) - - Description: Obtains a shared transaction-level advisory lock. - - Return type: void - - Note:**pg_advisory_xact_lock_shared** works in the same way as **pg_advisory_lock_shared**, except the lock is automatically released at the end of the current transaction and cannot be released explicitly. - -- pg_try_advisory_lock(key bigint) - - Description: Obtains an exclusive session-level advisory lock if available. - - Return type: Boolean - - Note:**pg_try_advisory_lock** is similar to **pg_advisory_lock**, except **pg_try_advisory_lock** does not block the resource until the resource is released. **pg_try_advisory_lock** either immediately obtains the lock and returns **true** or returns **false**, which indicates the lock cannot be performed currently. - -- pg_try_advisory_lock(key1 int, key2 int) - - Description: Obtains an exclusive session-level advisory lock if available. - - Return type: Boolean - - Note: Only users with the **sysadmin** permission can add session-level exclusive advisory locks to the key-value pair (65535, 65535). - -- pg_try_advisory_lock_shared(key bigint) - - Description: Obtains a shared session-level advisory lock if available. - - Return type: Boolean - -- pg_try_advisory_lock_shared(key1 int, key2 int) - - Description: Obtains a shared session-level advisory lock if available. - - Return type: Boolean - - Note:**pg_try_advisory_lock_shared** is similar to **pg_try_advisory_lock**, except **pg_try_advisory_lock_shared** attempts to obtain a shared lock instead of an exclusive lock. - -- pg_try_advisory_xact_lock(key bigint) - - Description: Obtains an exclusive transaction-level advisory lock if available. - - Return type: Boolean - -- pg_try_advisory_xact_lock(key1 int, key2 int) - - Description: Obtains an exclusive transaction-level advisory lock if available. - - Return type: Boolean - - Note:**pg_try_advisory_xact_lock** works in the same way as **pg_try_advisory_lock**, except the lock, if acquired, is automatically released at the end of the current transaction and cannot be released explicitly. Note: Only users with the **sysadmin** permission can add transaction-level exclusive advisory locks to the key-value pair (65535, 65535). - -- pg_try_advisory_xact_lock_shared(key bigint) - - Description: Obtains a shared transaction-level advisory lock if available. - - Return type: Boolean - -- pg_try_advisory_xact_lock_shared(key1 int, key2 int) - - Description: Obtains a shared transaction-level advisory lock if available. - - Return type: Boolean - - Note:**pg_try_advisory_xact_lock_shared** works in the same way as **pg_try_advisory_lock_shared**, except the lock, if acquired, is automatically released at the end of the current transaction and cannot be released explicitly. - -- lock_cluster_ddl() - - Description: Attempts to obtain a session-level exclusive advisory lock for all active primary database nodes in MogDB. - - Return type: Boolean - - Note: Only users with the **sysadmin** permission can call this function. - -- unlock_cluster_ddl() - - Description: Attempts to add a session-level exclusive advisory lock on the primary database node. - - Return type: Boolean - -- get_lock(text,text) - - Description: Adds a user lock to the database with a specified character string. The second parameter is the lock waiting time. - - Return type: Int - -- get_lock(text,double) - - Description: Adds a user lock to the database with a specified character string. The second parameter is the lock waiting time. - - Return type: Int - -- get_lock(text) - - Description: Adds a user lock to the database with a specified character string. - - Return type: Int - -- release_lock(text) - - Description: Releases a specified lock. If the lock is successfully released, **1** is returned. If the current session does not hold the specified lock, **0** is returned. If the current lock does not exist (the lock must be held), **NULL** is returned. - - Return type: Int - -- is_free_lock(text) - - Description: Checks whether a string is idle. If the string is not locked, **1** is returned. Otherwise, **0** is returned. If other errors occur during the check, **NULL** is returned. - - Return type: Int - -- is_used_lock(text) - - Description: Checks who holds the lock of a string and returns the session ID of the corresponding user. If the specified lock is not held, **NULL** is returned. - - Return type: Bigint - -- clear_all_invalid_locks() - - Description: Clears information about invalid locks in the lockname hash table and returns the number of cleared locks. - - Return type: Bigint - -- release_all_locks() - - Description: Releases all locks held by the current session and returns the number of release times. If a single string holds multiple locks, the number of release times is calculated based on the corresponding number instead of only once. - - Return type: Bigint - -- get_all_locks() - - Description: Queries all user locks in the current database and returns the names and holders of all user locks in the form of records. - +--- +title: Advisory Lock Functions +summary: Advisory Lock Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Advisory Lock Functions + +Advisory lock functions manage advisory locks. + +- pg_advisory_lock(key bigint) + + Description: Obtains an exclusive session-level advisory lock. + + Return type: void + + Note:**pg_advisory_lock** locks resources defined by an application. The resources can be identified using a 64-bit or two nonoverlapped 32-bit key values. If another session locks the resources, the function blocks the resources until they can be used. The lock is exclusive. Multiple locking requests are pushed into the stack. Therefore, if the same resource is locked three times, it must be unlocked three times so that it is released to another session. + +- pg_advisory_lock(key1 int, key2 int) + + Description: Obtains an exclusive session-level advisory lock. + + Return type: void + + Note: Only users with the **sysadmin** permission can add session-level exclusive advisory locks to the key-value pair (65535, 65535). + +- pg_advisory_lock(int4, int4, Name) + + Description: Obtains the exclusive advisory lock of a specified database. + + Return type: void + +- pg_advisory_lock_shared(key bigint) + + Description: Obtains a shared session-level advisory lock. + + Return type: void + +- pg_advisory_lock_shared(key1 int, key2 int) + + Description: Obtains a shared session-level advisory lock. + + Return type: void + + Note:**pg_advisory_lock_shared** works in the same way as **pg_advisory_lock**, except the lock can be shared with other sessions requesting shared locks. Only would-be exclusive lockers are locked out. + +- pg_advisory_unlock(key bigint) + + Description: Releases an exclusive session-level advisory lock. + + Return type: Boolean + +- pg_advisory_unlock(key1 int, key2 int) + + Description: Releases an exclusive session-level advisory lock. + + Return type: Boolean + + Note:**pg_advisory_unlock** releases the obtained exclusive advisory lock. If the release is successful, the function returns **true**. If the lock was not held, it will return **false**. In addition, a SQL warning will be reported by the server. + +- pg_advisory_unlock(int4, int4, Name) + + Description: Releases the exclusive advisory lock of a specified database. + + Return type: Boolean + + Note: If the release is successful, **true** is returned. If no lock is held, **false** is returned. + +- pg_advisory_unlock_shared(key bigint) + + Description: Releases a shared session-level advisory lock. + + Return type: Boolean + +- pg_advisory_unlock_shared(key1 int, key2 int) + + Description: Releases a shared session-level advisory lock. + + Return type: Boolean + + Note:**pg_advisory_unlock_shared** works in the same way as **pg_advisory_unlock**, except it releases a shared session-level advisory lock. + +- pg_advisory_unlock_all() + + Description: Releases all advisory locks owned by the current session. + + Return type: void + + Note:**pg_advisory_unlock_all** releases all advisory locks owned by the current session. The function is implicitly invoked when the session ends even if the client is abnormally disconnected. + +- pg_advisory_xact_lock(key bigint) + + Description: Obtains an exclusive transaction-level advisory lock. + + Return type: void + +- pg_advisory_xact_lock(key1 int, key2 int) + + Description: Obtains an exclusive transaction-level advisory lock. + + Return type: void + + Note:**pg_advisory_xact_lock** works in the same way as **pg_advisory_lock**, except the lock is automatically released at the end of the current transaction and cannot be released explicitly. Only users with the **sysadmin** permission can add transaction-level exclusive advisory locks to the key-value pair (65535, 65535). + +- pg_advisory_xact_lock_shared(key bigint) + + Description: Obtains a shared transaction-level advisory lock. + + Return type: void + +- pg_advisory_xact_lock_shared(key1 int, key2 int) + + Description: Obtains a shared transaction-level advisory lock. + + Return type: void + + Note:**pg_advisory_xact_lock_shared** works in the same way as **pg_advisory_lock_shared**, except the lock is automatically released at the end of the current transaction and cannot be released explicitly. + +- pg_try_advisory_lock(key bigint) + + Description: Obtains an exclusive session-level advisory lock if available. + + Return type: Boolean + + Note:**pg_try_advisory_lock** is similar to **pg_advisory_lock**, except **pg_try_advisory_lock** does not block the resource until the resource is released. **pg_try_advisory_lock** either immediately obtains the lock and returns **true** or returns **false**, which indicates the lock cannot be performed currently. + +- pg_try_advisory_lock(key1 int, key2 int) + + Description: Obtains an exclusive session-level advisory lock if available. + + Return type: Boolean + + Note: Only users with the **sysadmin** permission can add session-level exclusive advisory locks to the key-value pair (65535, 65535). + +- pg_try_advisory_lock_shared(key bigint) + + Description: Obtains a shared session-level advisory lock if available. + + Return type: Boolean + +- pg_try_advisory_lock_shared(key1 int, key2 int) + + Description: Obtains a shared session-level advisory lock if available. + + Return type: Boolean + + Note:**pg_try_advisory_lock_shared** is similar to **pg_try_advisory_lock**, except **pg_try_advisory_lock_shared** attempts to obtain a shared lock instead of an exclusive lock. + +- pg_try_advisory_xact_lock(key bigint) + + Description: Obtains an exclusive transaction-level advisory lock if available. + + Return type: Boolean + +- pg_try_advisory_xact_lock(key1 int, key2 int) + + Description: Obtains an exclusive transaction-level advisory lock if available. + + Return type: Boolean + + Note:**pg_try_advisory_xact_lock** works in the same way as **pg_try_advisory_lock**, except the lock, if acquired, is automatically released at the end of the current transaction and cannot be released explicitly. Note: Only users with the **sysadmin** permission can add transaction-level exclusive advisory locks to the key-value pair (65535, 65535). + +- pg_try_advisory_xact_lock_shared(key bigint) + + Description: Obtains a shared transaction-level advisory lock if available. + + Return type: Boolean + +- pg_try_advisory_xact_lock_shared(key1 int, key2 int) + + Description: Obtains a shared transaction-level advisory lock if available. + + Return type: Boolean + + Note:**pg_try_advisory_xact_lock_shared** works in the same way as **pg_try_advisory_lock_shared**, except the lock, if acquired, is automatically released at the end of the current transaction and cannot be released explicitly. + +- lock_cluster_ddl() + + Description: Attempts to obtain a session-level exclusive advisory lock for all active primary database nodes in MogDB. + + Return type: Boolean + + Note: Only users with the **sysadmin** permission can call this function. + +- unlock_cluster_ddl() + + Description: Attempts to add a session-level exclusive advisory lock on the primary database node. + + Return type: Boolean + +- get_lock(text,text) + + Description: Adds a user lock to the database with a specified character string. The second parameter is the lock waiting time. + + Return type: Int + +- get_lock(text,double) + + Description: Adds a user lock to the database with a specified character string. The second parameter is the lock waiting time. + + Return type: Int + +- get_lock(text) + + Description: Adds a user lock to the database with a specified character string. + + Return type: Int + +- release_lock(text) + + Description: Releases a specified lock. If the lock is successfully released, **1** is returned. If the current session does not hold the specified lock, **0** is returned. If the current lock does not exist (the lock must be held), **NULL** is returned. + + Return type: Int + +- is_free_lock(text) + + Description: Checks whether a string is idle. If the string is not locked, **1** is returned. Otherwise, **0** is returned. If other errors occur during the check, **NULL** is returned. + + Return type: Int + +- is_used_lock(text) + + Description: Checks who holds the lock of a string and returns the session ID of the corresponding user. If the specified lock is not held, **NULL** is returned. + + Return type: Bigint + +- clear_all_invalid_locks() + + Description: Clears information about invalid locks in the lockname hash table and returns the number of cleared locks. + + Return type: Bigint + +- release_all_locks() + + Description: Releases all locks held by the current session and returns the number of release times. If a single string holds multiple locks, the number of release times is calculated based on the corresponding number instead of only once. + + Return type: Bigint + +- get_all_locks() + + Description: Queries all user locks in the current database and returns the names and holders of all user locks in the form of records. + Return type: Record \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/configuration-settings-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/configuration-settings-functions.md index fe2e7d56..bf033f08 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/configuration-settings-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/configuration-settings-functions.md @@ -1,66 +1,66 @@ ---- -title: Configuration Settings Functions -summary: Configuration Settings Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Configuration Settings Functions - -Configuration setting functions are used for querying and modifying configuration parameters during running. - -- current_setting(setting_name) - - Description: Specifies the current setting. - - Return type: text - - Note:**current_setting** obtains the current setting of **setting_name** by query. It is equivalent to the **SHOW** statement. - - Example: - - ```sql - MogDB=# SELECT current_setting('datestyle'); - - current_setting - ----------------- - ISO, MDY - (1 row) - ``` - -- set_working_grand_version_num_manually(tmp_version) - - Description: Upgrades new features of the database by switching the authorization version. - - Return type: void - -- shell_in(type) - - Description: Inputs a route for the shell type that has not yet been filled. - - Return type: void - -- shell_out(type) - - Description: Outputs a route for the shell type that has not yet been filled. - - Return type: void - -- set_config(setting_name, new_value, is_local) - - Description: Sets the parameter and returns a new value. - - Return type: text - - Note: **set_config** sets **setting_name** to **new_value**. If **is_local** is set to **true**, **new_value** applies only to the current transaction. If you want **new_value** to apply for the current session, set the value to **false** instead. The function corresponds to the **SET** statement. - - Example: - - ```sql - MogDB=# SELECT set_config('log_statement_stats', 'off', false); - - set_config - ------------ - off - (1 row) - ``` +--- +title: Configuration Settings Functions +summary: Configuration Settings Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Configuration Settings Functions + +Configuration setting functions are used for querying and modifying configuration parameters during running. + +- current_setting(setting_name) + + Description: Specifies the current setting. + + Return type: text + + Note:**current_setting** obtains the current setting of **setting_name** by query. It is equivalent to the **SHOW** statement. + + Example: + + ```sql + MogDB=# SELECT current_setting('datestyle'); + + current_setting + ----------------- + ISO, MDY + (1 row) + ``` + +- set_working_grand_version_num_manually(tmp_version) + + Description: Upgrades new features of the database by switching the authorization version. + + Return type: void + +- shell_in(type) + + Description: Inputs a route for the shell type that has not yet been filled. + + Return type: void + +- shell_out(type) + + Description: Outputs a route for the shell type that has not yet been filled. + + Return type: void + +- set_config(setting_name, new_value, is_local) + + Description: Sets the parameter and returns a new value. + + Return type: text + + Note: **set_config** sets **setting_name** to **new_value**. If **is_local** is set to **true**, **new_value** applies only to the current transaction. If you want **new_value** to apply for the current session, set the value to **false** instead. The function corresponds to the **SET** statement. + + Example: + + ```sql + MogDB=# SELECT set_config('log_statement_stats', 'off', false); + + set_config + ------------ + off + (1 row) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/database-object-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/database-object-functions.md index 5c141fa7..41e63d1b 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/database-object-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/database-object-functions.md @@ -1,438 +1,438 @@ ---- -title: Database Object Functions -summary: Database Object Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Database Object Functions - -## Database Object Size Functions - -Database object size functions calculate the actual disk space used by database objects. - -- pg_column_size(any) - - Description: Specifies the number of bytes used to store a particular value (possibly compressed) - - Return type: int - - Note: **pg_column_size** displays the space for storing an independent data value. - - ```sql - MogDB=# SELECT pg_column_size(1); - pg_column_size - ---------------- - 4 - (1 row) - ``` - -- pg_database_size(oid) - - Description: Specifies the disk space used by the database with the specified OID. - - Return type: bigint - -- pg_database_size(name) - - Description: Specifies the disk space used by the database with the specified name. - - Return type: bigint - - Note: **pg_database_size** receives the OID or name of a database and returns the disk space used by the corresponding object. - - Example: - - ```sql - MogDB=# SELECT pg_database_size('postgres'); - pg_database_size - ------------------ - 51590112 - (1 row) - ``` - -- pg_relation_size(oid) - - Description: Specifies the disk space used by the table with a specified OID or index. - - Return type: bigint - -- get_db_source_datasize() - - Description: Estimates the total size of non-compressed data in the current database. - - Return type: bigint - - Remarks: (1) Perform an analysis before this function is called. (2) Calculate the total data capacity in the non-compressed state by estimating the compression rate of the column-store tables. - - Example: - - ```sql - MogDB=# analyze; - ANALYZE - MogDB=# select get_db_source_datasize(); - get_db_source_datasize - ------------------------ - 35384925667 - (1 row) - ``` - -- pg_relation_size(text) - - Description: Specifies the disk space used by the table with a specified name or index. The table name can be schema-qualified. - - Return type: bigint - -- pg_relation_size(relation regclass, fork text) - - Description: Specifies the disk space used by the specified bifurcating tree ('main', 'fsm', or 'vm') of a certain table or index. - - Return type: bigint - -- pg_relation_size(relation regclass) - - Description: Is an abbreviation of **pg_relation_size(…, 'main')**. - - Return type: bigint - - Note: **pg_relation_size** receives the OID or name of a table, an index, or a compressed table, and returns the size. - -- pg_partition_size(oid,oid) - - Description: Specifies the disk space used by the partition with a specified OID. The first **oid** is the OID of the table and the second **oid** is the OID of the partition. - - Return type: bigint - -- pg_partition_size(text, text) - - Description: Specifies the disk space used by the partition with a specified name. The first **text** is the table name and the second **text** is the partition name. - - Return type: bigint - -- pg_partition_indexes_size(oid,oid) - - Description: Specifies the disk space used by the index of the partition with a specified OID. The first **oid** is the OID of the table and the second **oid** is the OID of the partition. - - Return type: bigint - -- pg_partition_indexes_size(text,text) - - Description: Specifies the disk space used by the index of the partition with a specified name. The first **text** is the table name and the second **text** is the partition name. - - Return type: bigint - -- pg_indexes_size(regclass) - - Description: Specifies the total disk space used by the index appended to the specified table. - - Return type: bigint - -- pg_size_pretty(bigint) - - Description: Converts a size in bytes expressed as a 64-bit integer into a human-readable format with size units. - - Return type: text - -- pg_size_pretty(numeric) - - Description: Converts a size in bytes expressed as a numeric value into a human-readable format with size units. - - Return type: text - - Note: **pg_size_pretty** formats the results of other functions into a human-readable format. KB/MB/GB/TB can be used. - -- pg_table_size(regclass) - - Description: Specifies the disk space used by the specified table, excluding indexes (but including TOAST, free space mapping, and visibility mapping). - - Return type: bigint - -- pg_tablespace_size(oid) - - Description: Specifies the disk space used by the tablespace with a specified OID. - - Return type: bigint - -- pg_tablespace_size(name) - - Description: Specifies the disk space used by the tablespace with a specified name. - - Return type: bigint - - Note: - - **pg_tablespace_size** receives the OID or name of a database and returns the disk space used by the corresponding object. - -- pg_total_relation_size(oid) - - Description: Specifies the disk space used by the table with a specified OID, including the index and the compressed data. - - Return type: bigint - -- pg_total_relation_size(regclass) - - Description: Specifies the total disk space used by the specified table, including all indexes and TOAST data. - - Return type: bigint - -- pg_total_relation_size(text) - - Description: Specifies the disk space used by the table with a specified name, including the index and the compressed data. The table name can be schema-qualified. - - Return type: bigint - - Note: **pg_total_relation_size** receives the OID or name of a table or a compressed table, and returns the sizes of the data, related indexes, and the compressed table in bytes. - -- datalength(any) - - Description: Specifies the number of bytes used by an expression of a specified data type (data management space, data compression, or data type conversion is not considered). - - Return type: int - - Note: **datalength** is used to calculate the space of an independent data value. - - Example: - - ```sql - MogDB=# SELECT datalength(1); - datalength - ------------ - 4 - (1 row) - ``` - - The following table lists the supported data types and calculation methods. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Data TypeStorage Space
Numeric data typesInteger typesTINYINT1
SMALLINT2
INTEGER4
BINARY_INTEGER4
BIGINT8
Arbitrary precision typesDECIMALEvery four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
NUMERICEvery four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
NUMBEREvery four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
Sequence integerSMALLSERIAL2
SERIAL4
LARGESERIA8
BIGSERIALEvery four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
Floating point typesFLOAT44
DOUBLE PRECISION8
FLOAT88
BINARY_DOUBLE8
FLOAT[(p)]Every four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
DEC[(p[,s])]Every four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
INTEGER[(p[,s])]Every four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
Boolean data typesBoolean typesBOOLEAN1
Character data typesCharacter typesCHARn
CHAR(n)n
CHARACTER(n)n
NCHAR(n)n
VARCHAR(n)n
CHARACTERActual number of bytes of a character
VARYING(n)Actual number of bytes of a character
VARCHAR2(n)Actual number of bytes of a character
NVARCHAR(n)Actual number of bytes of a character
NVARCHAR2(n)Actual number of bytes of a character
TEXTActual number of bytes of a character
CLOBActual number of bytes of a character
Time data typesTime typesDATE8
TIME8
TIMEZ12
TIMESTAMP8
TIMESTAMPZ8
SMALLDATETIME8
INTERVAL DAY TO SECOND16
INTERVAL16
RELTIME4
ABSTIME4
TINTERVAL12
- -## Database Object Position Functions - -- pg_relation_filenode(relation regclass) - - Description: Specifies the ID of a filenode with the specified relationship. - - Return type: oid - - Description: **pg_relation_filenode** receives the OID or name of a table, an index, a sequence, or a compressed table, and returns the number of **filenode** allocated to it. **filenode** is the basic component of the file name used by the relationship. For most tables, the result is the same as that of **pg_class.relfilenode**. For a specified system directory, **relfilenode** is set to **0** and this function must be used to obtain the correct value. If a relationship that is not stored is transmitted, such as a view, this function returns **NULL**. - -- pg_relation_filepath(relation regclass) - - Description: Specifies the name of a file path with the specified relationship. - - Return type: text - - Description: **pg_relation_filepath** is similar to **pg_relation_filenode**, except that **pg_relation_filepath** returns the whole file path name for the relationship (relative to the data directory **PGDATA** of MogDB). - -- pg_filenode_relation(tablespace oid, filenode oid) - - Description: Obtains the table names corresponding to the tablespace and relfilenode. - - Return type: regclass - -- pg_partition_filenode(partition_oid) - - Description: Obtains **filenode** corresponding to the OID lock of a specified partitioned table. - - Return type: oid - -- pg_partition_filepath(partition_oid) - - Description: Specifies the file path name of a partition. - - Return type: text - -## Recycle Bin Object Functions - -- gs_is_recycle_object(classid, objid, objname) - - Description: Determines whether an object is in the recycle bin. - - Return type: Boolean +--- +title: Database Object Functions +summary: Database Object Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Database Object Functions + +## Database Object Size Functions + +Database object size functions calculate the actual disk space used by database objects. + +- pg_column_size(any) + + Description: Specifies the number of bytes used to store a particular value (possibly compressed) + + Return type: int + + Note: **pg_column_size** displays the space for storing an independent data value. + + ```sql + MogDB=# SELECT pg_column_size(1); + pg_column_size + ---------------- + 4 + (1 row) + ``` + +- pg_database_size(oid) + + Description: Specifies the disk space used by the database with the specified OID. + + Return type: bigint + +- pg_database_size(name) + + Description: Specifies the disk space used by the database with the specified name. + + Return type: bigint + + Note: **pg_database_size** receives the OID or name of a database and returns the disk space used by the corresponding object. + + Example: + + ```sql + MogDB=# SELECT pg_database_size('postgres'); + pg_database_size + ------------------ + 51590112 + (1 row) + ``` + +- pg_relation_size(oid) + + Description: Specifies the disk space used by the table with a specified OID or index. + + Return type: bigint + +- get_db_source_datasize() + + Description: Estimates the total size of non-compressed data in the current database. + + Return type: bigint + + Remarks: (1) Perform an analysis before this function is called. (2) Calculate the total data capacity in the non-compressed state by estimating the compression rate of the column-store tables. + + Example: + + ```sql + MogDB=# analyze; + ANALYZE + MogDB=# select get_db_source_datasize(); + get_db_source_datasize + ------------------------ + 35384925667 + (1 row) + ``` + +- pg_relation_size(text) + + Description: Specifies the disk space used by the table with a specified name or index. The table name can be schema-qualified. + + Return type: bigint + +- pg_relation_size(relation regclass, fork text) + + Description: Specifies the disk space used by the specified bifurcating tree ('main', 'fsm', or 'vm') of a certain table or index. + + Return type: bigint + +- pg_relation_size(relation regclass) + + Description: Is an abbreviation of **pg_relation_size(…, 'main')**. + + Return type: bigint + + Note: **pg_relation_size** receives the OID or name of a table, an index, or a compressed table, and returns the size. + +- pg_partition_size(oid,oid) + + Description: Specifies the disk space used by the partition with a specified OID. The first **oid** is the OID of the table and the second **oid** is the OID of the partition. + + Return type: bigint + +- pg_partition_size(text, text) + + Description: Specifies the disk space used by the partition with a specified name. The first **text** is the table name and the second **text** is the partition name. + + Return type: bigint + +- pg_partition_indexes_size(oid,oid) + + Description: Specifies the disk space used by the index of the partition with a specified OID. The first **oid** is the OID of the table and the second **oid** is the OID of the partition. + + Return type: bigint + +- pg_partition_indexes_size(text,text) + + Description: Specifies the disk space used by the index of the partition with a specified name. The first **text** is the table name and the second **text** is the partition name. + + Return type: bigint + +- pg_indexes_size(regclass) + + Description: Specifies the total disk space used by the index appended to the specified table. + + Return type: bigint + +- pg_size_pretty(bigint) + + Description: Converts a size in bytes expressed as a 64-bit integer into a human-readable format with size units. + + Return type: text + +- pg_size_pretty(numeric) + + Description: Converts a size in bytes expressed as a numeric value into a human-readable format with size units. + + Return type: text + + Note: **pg_size_pretty** formats the results of other functions into a human-readable format. KB/MB/GB/TB can be used. + +- pg_table_size(regclass) + + Description: Specifies the disk space used by the specified table, excluding indexes (but including TOAST, free space mapping, and visibility mapping). + + Return type: bigint + +- pg_tablespace_size(oid) + + Description: Specifies the disk space used by the tablespace with a specified OID. + + Return type: bigint + +- pg_tablespace_size(name) + + Description: Specifies the disk space used by the tablespace with a specified name. + + Return type: bigint + + Note: + + **pg_tablespace_size** receives the OID or name of a database and returns the disk space used by the corresponding object. + +- pg_total_relation_size(oid) + + Description: Specifies the disk space used by the table with a specified OID, including the index and the compressed data. + + Return type: bigint + +- pg_total_relation_size(regclass) + + Description: Specifies the total disk space used by the specified table, including all indexes and TOAST data. + + Return type: bigint + +- pg_total_relation_size(text) + + Description: Specifies the disk space used by the table with a specified name, including the index and the compressed data. The table name can be schema-qualified. + + Return type: bigint + + Note: **pg_total_relation_size** receives the OID or name of a table or a compressed table, and returns the sizes of the data, related indexes, and the compressed table in bytes. + +- datalength(any) + + Description: Specifies the number of bytes used by an expression of a specified data type (data management space, data compression, or data type conversion is not considered). + + Return type: int + + Note: **datalength** is used to calculate the space of an independent data value. + + Example: + + ```sql + MogDB=# SELECT datalength(1); + datalength + ------------ + 4 + (1 row) + ``` + + The following table lists the supported data types and calculation methods. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Data TypeStorage Space
Numeric data typesInteger typesTINYINT1
SMALLINT2
INTEGER4
BINARY_INTEGER4
BIGINT8
Arbitrary precision typesDECIMALEvery four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
NUMERICEvery four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
NUMBEREvery four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
Sequence integerSMALLSERIAL2
SERIAL4
LARGESERIA8
BIGSERIALEvery four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
Floating point typesFLOAT44
DOUBLE PRECISION8
FLOAT88
BINARY_DOUBLE8
FLOAT[(p)]Every four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
DEC[(p[,s])]Every four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
INTEGER[(p[,s])]Every four decimal digits occupy two bytes. The digits before and after the decimal point are calculated separately.
Boolean data typesBoolean typesBOOLEAN1
Character data typesCharacter typesCHARn
CHAR(n)n
CHARACTER(n)n
NCHAR(n)n
VARCHAR(n)n
CHARACTERActual number of bytes of a character
VARYING(n)Actual number of bytes of a character
VARCHAR2(n)Actual number of bytes of a character
NVARCHAR(n)Actual number of bytes of a character
NVARCHAR2(n)Actual number of bytes of a character
TEXTActual number of bytes of a character
CLOBActual number of bytes of a character
Time data typesTime typesDATE8
TIME8
TIMEZ12
TIMESTAMP8
TIMESTAMPZ8
SMALLDATETIME8
INTERVAL DAY TO SECOND16
INTERVAL16
RELTIME4
ABSTIME4
TINTERVAL12
+ +## Database Object Position Functions + +- pg_relation_filenode(relation regclass) + + Description: Specifies the ID of a filenode with the specified relationship. + + Return type: oid + + Description: **pg_relation_filenode** receives the OID or name of a table, an index, a sequence, or a compressed table, and returns the number of **filenode** allocated to it. **filenode** is the basic component of the file name used by the relationship. For most tables, the result is the same as that of **pg_class.relfilenode**. For a specified system directory, **relfilenode** is set to **0** and this function must be used to obtain the correct value. If a relationship that is not stored is transmitted, such as a view, this function returns **NULL**. + +- pg_relation_filepath(relation regclass) + + Description: Specifies the name of a file path with the specified relationship. + + Return type: text + + Description: **pg_relation_filepath** is similar to **pg_relation_filenode**, except that **pg_relation_filepath** returns the whole file path name for the relationship (relative to the data directory **PGDATA** of MogDB). + +- pg_filenode_relation(tablespace oid, filenode oid) + + Description: Obtains the table names corresponding to the tablespace and relfilenode. + + Return type: regclass + +- pg_partition_filenode(partition_oid) + + Description: Obtains **filenode** corresponding to the OID lock of a specified partitioned table. + + Return type: oid + +- pg_partition_filepath(partition_oid) + + Description: Specifies the file path name of a partition. + + Return type: text + +## Recycle Bin Object Functions + +- gs_is_recycle_object(classid, objid, objname) + + Description: Determines whether an object is in the recycle bin. + + Return type: Boolean diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/logical-replication-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/logical-replication-functions.md index d5e66208..a6588f15 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/logical-replication-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/logical-replication-functions.md @@ -1,593 +1,593 @@ ---- -title: Logical Replication Functions -summary: Logical Replication Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Logical Replication Functions - -- pg_create_logical_replication_slot('slot_name', 'plugin_name') - - Description: Creates a logical replication slot. - - Parameter description: - - - slot_name - - Indicates the name of the streaming replication slot. - - Value range: a string of characters, which can contain only lowercase letters, digits, underscores (_), question marks (?), hyphens (-), and periods (.). The '.' or '..' cannot be used as a replication slot name. - - - plugin_name - - Indicates the name of the plugin. - - Value range: a string, supporting **mppdb_decoding** - - Return type: name, text - - Note: The first return value is the slot name, and the second is the start LSN position for decoding in the logical replication slot. Users who invoke this function must have the **SYSADMIN** permission, the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. Currently, this function can be invoked only on the host. - -- pg_create_physical_replication_slot('slot_name', 'isDummyStandby') - - Description: Creates a physical replication slot. - - Parameter description: - - - slot_name - - Indicates the name of the streaming replication slot. - - Value range: a string of characters, which can contain only lowercase letters, digits, underscores (_), question marks (?), hyphens (-), and periods (.). The '.' or '..' cannot be used as a replication slot name. - - - isDummyStandby - - Specifies whether the replication slot is created by connecting the secondary server to the primary server. - - Type: bool - - Return type: name, text - - Note: Users who invoke this function must have the **SYSADMIN** permission or the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. Currently, primary/standby/secondary deployment is not supported by default. - -- pg_drop_replication_slot('slot_name') - - Description: Deletes a streaming replication slot. - - Parameter description: - - - slot_name - - Indicates the name of the streaming replication slot. - - Value range: a string of characters, which can contain only lowercase letters, digits, underscores (_), question marks (?), hyphens (-), and periods (.). The '.' or '..' cannot be used as a replication slot name. - - Return type: void - - Note: Users who invoke this function must have the **SYSADMIN** permission or the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. Currently, this function can be invoked only on the host. - -- pg_logical_slot_peek_changes('slot_name', 'LSN', upto_nchanges, 'options_name', 'options_value') - - Description: Performs decoding but does not go to the next streaming replication slot. (The decoding result will be returned again on future calls.) - - Parameter description: - - - slot_name - - Indicates the name of the streaming replication slot. - - Value range: a string of characters, which can contain only lowercase letters, digits, underscores (_), question marks (?), hyphens (-), and periods (.). The '.' or '..' cannot be used as a replication slot name. - - - LSN - - Indicates a target LSN. Decoding is performed only when an LSN is less than or equal to this value. - - Value range: a string, in the format of *xlogid*/*xrecoff*, for example, 1/2AAFC60 (If this parameter is set to **NULL**, the target LSN indicating the end position of decoding is not specified.) - - - upto_nchanges - - Indicates the number of decoded records (including the **begin** and **commit** timestamps). Assume that there are three transactions, which involve 3, 5, and 7 records, respectively. If **upto_nchanges** is set to **4**, 8 records of the first two transactions will be decoded. Specifically, decoding is stopped when the number of decoded records exceeds the value of **upto_nchanges** after decoding in the first two transactions is finished. - - Value range: a non-negative integer - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If any of the **LSN** and **upto_nchanges** values are reached, decoding ends. - - - **options**: Specifies optional parameters, consisting of **options_name** and **options_value**. - - - include-xids - - Indicates whether the decoded **data** column contains XID information. - - Valid value: **0** and **1**. The default value is **1**. - - - **0**: The decoded **data** column does not contain XID information. - - **1**: The decoded **data** column contains XID information. - - - skip-empty-xacts - - Indicates whether to ignore empty transaction information during decoding. - - Valid value: **0** and **1**. The default value is **0**. - - - **0**: The empty transaction information is not ignored during decoding. - - **1**: The empty transaction information is ignored during decoding. - - - include-timestamp - - Indicates whether decoded information contains the **commit** timestamp. - - Valid value: **0** and **1**. The default value is **0**. - - - **0**: The decoded information does not contain the **commit** timestamp. - - **1**: The decoded information contains the **commit** timestamp. - - - only-local - - Specifies whether to decode only local logs. - - Value range: **0** and **1**. The default value is **1**. - - - **0**: Non-local logs and local logs are decoded. - - **1**: Only local logs are decoded. - - - force-binary - - Specifies whether to output the decoding result in binary format. - - Value range: **0** - - - **0**: The decoding result is output in text format. - - - white-table-list - - Whitelist parameter, including the schemas and table names to be decoded. - - Value range: a string that contains table names in the whitelist. Different tables are separated by commas (,). An asterisk (*) is used to fuzzily match all tables. Schema names and table names are separated by periods (.). No space character is allowed. Example: **select \* from pg_logical_slot_peek_changes('slot1', NULL, 4096, 'white-table-list', 'public.t1,public.t2');** - - Return type: text, xid, text - - Note: The function returns the decoding result. Each decoding result contains three columns, corresponding to the above return types and indicating the LSN position, XID, and decoded content, respectively. - - Users who invoke this function must have the **SYSADMIN** permission or the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. - -- pg_logical_slot_get_changes('slot_name', 'LSN', upto_nchanges, 'options_name', 'options_value') - - Description: Performs decoding and goes to the next streaming replication slot. - - Parameter: This function has the same parameters as **pg_logical_slot_peek_changes**. - - Note: Users who invoke this function must have the **SYSADMIN** permission or the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. Currently, this function can be invoked only on the host. - -- pg_logical_slot_peek_binary_changes('slot_name', 'LSN', upto_nchanges, 'options_name', 'options_value') - - Description: Performs decoding in binary mode and does not go to the next streaming replication slot. (The decoded data can be obtained again during the next decoding.) - - Parameter description: - - - slot_name - - Indicates the name of the streaming replication slot. - - Value range: a string, supporting only letters, digits, and the following special characters: _?-. - - - LSN - - Indicates a target LSN. Decoding is performed only when an LSN is less than or equal to this value. - - Value range: a string, in the format of *xlogid*/*xrecoff*, for example, 1/2AAFC60 (If this parameter is set to **NULL**, the target LSN indicating the end position of decoding is not specified.) - - - upto_nchanges - - Indicates the number of decoded records (including the **begin** and **commit** timestamps). Assume that there are three transactions, which involve 3, 5, and 7 records, respectively. If **upto_nchanges** is set to **4**, 8 records of the first two transactions will be decoded. Specifically, decoding is stopped when the number of decoded records exceeds the value of **upto_nchanges** after decoding in the first two transactions is finished. - - Value range: a non-negative integer - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If any of the **LSN** and **upto_nchanges** values are reached, decoding ends. - - - **options**: Specifies optional parameters, consisting of **options_name** and **options_value**. - - - include-xids - - Indicates whether the decoded **data** column contains XID information. - - Valid value: **0** and **1**. The default value is **1**. - - - **0**: The decoded **data** column does not contain XID information. - - **1**: The decoded **data** column contains XID information. - - - skip-empty-xacts - - Indicates whether to ignore empty transaction information during decoding. - - Valid value: **0** and **1**. The default value is **0**. - - - **0**: The empty transaction information is not ignored during decoding. - - **1**: The empty transaction information is ignored during decoding. - - - include-timestamp - - Indicates whether decoded information contains the **commit** timestamp. - - Valid value: **0** and **1**. The default value is **0**. - - - **0**: The decoded information does not contain the **commit** timestamp. - - **1**: The decoded information contains the **commit** timestamp. - - - only-local - - Specifies whether to decode only local logs. - - Value range: **0** and **1**. The default value is **1**. - - - **0**: Non-local logs and local logs are decoded. - - **1**: Only local logs are decoded. - - - force-binary - - Specifies whether to output the decoding result in binary format. - - Value range: **0** or **1**. The default value is **0**. The result is output in binary format. - - - white-table-list - - Whitelist parameter, including the schema and table name to be decoded. - - Value range: a string that contains table names in the whitelist. Different tables are separated by commas (,). An asterisk (*) is used to fuzzily match all tables. Schema names and table names are separated by periods (.). No space character is allowed. Example: **select \* from pg_logical_slot_peek_binary_changes('slot1', NULL, 4096, 'white-table-list', 'public.t1,public.t2');** - - Return type: text, xid, bytea - - Note: The function returns the decoding result. Each decoding result contains three columns, corresponding to the above return types and indicating the LSN position, XID, and decoded content in binary format, respectively. Users who invoke this function must have the **SYSADMIN** permission or the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. - -- pg_logical_slot_get_binary_changes('slot_name', 'LSN', upto_nchanges, 'options_name', 'options_value') - - Description: Performs decoding in binary mode and does not go to the next streaming replication slot. - - Parameter: This function has the same parameters as **pg_logical_slot_peek_binary_changes**. - - Note: Users who invoke this function must have the **SYSADMIN** permission or the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. - -- pg_replication_slot_advance ('slot_name', 'LSN') - - Description: Directly goes to the streaming replication slot for a specified LSN, without outputting any decoding result. - - Parameter description: - - - slot_name - - Indicates the name of the streaming replication slot. - - Value range: a string of characters, which can contain only lowercase letters, digits, underscores (_), question marks (?), hyphens (-), and periods (.). The '.' or '..' cannot be used as a replication slot name. - - - LSN - - Indicates a target LSN. Next decoding will be performed only in transactions whose commission position is greater than this value. If an input LSN is smaller than the position recorded in the current streaming replication slot, the function directly returns. If the input LSN is greater than the LSN of the current physical log, the latter LSN will be directly used for decoding. - - Value range: a string, in the format of *xlogid*/*xrecoff* - - Return type: name, text - - Note: A return result contains the slot name and LSN that is actually used for decoding. Users who invoke this function must have the **SYSADMIN** permission, the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. Currently, this function can be invoked only on the host. - -- pg_logical_get_area_changes('LSN_start', 'LSN_end', upto_nchanges, 'decoding_plugin', 'xlog_path', 'options_name', 'options_value') - - Description: Specifies an LSN range or an Xlog file for decoding when no DDL is available. - - The constraints are as follows: - - 1. When the API is called, only when **wal_level** is set to **logical**, the generated log files can be parsed. If the used Xlog file is not of the logical level, the decoded content does not have the corresponding value and type, and there is no other impact. - 2. The Xlog file can be parsed only by a copy of a fully homogeneous DN to ensure that the metadata corresponding to the data can be found and no DDL or VACUUM FULL operation is performed. - 3. You can find the XLog to be parsed. - 4. Do not read too many Xlog files at a time. You are advised to read one Xlog file at a time. It is estimated that the memory occupied by one Xlog file is two to three times the size of the Xlog file. - 5. The Xlog file before scale-out cannot be decoded. - - Parameter description: - - - LSN_start - - Specifies the LSN at the start of decoding. - - Value range: a string, in the format of *xlogid*/*xrecoff*, for example, 1/2AAFC60 (If this parameter is set to **NULL**, the target LSN indicating the end position of decoding is not specified.) - - - LSN_end - - Specifies the LSN at the end of decoding. - - Value range: a string, in the format of *xlogid*/*xrecoff*, for example, 1/2AAFC60 (If this parameter is set to **NULL**, the target LSN indicating the end position of decoding is not specified.) - - - upto_nchanges - - Indicates the number of decoded records (including the **begin** and **commit** timestamps). Assume that there are three transactions, which involve 3, 5, and 7 records, respectively. If **upto_nchanges** is set to **4**, 8 records of the first two transactions will be decoded. Specifically, decoding is stopped when the number of decoded records exceeds the value of **upto_nchanges** after decoding in the first two transactions is finished. - - Value range: a non-negative integer - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If any of the **LSN** and **upto_nchanges** values are reached, decoding ends. - - - decoding_plugin - - Decoding plug-in, which is a .so plug-in that specifies the output format of the decoded content. - - Value range: **mppdb_decoding** and **sql_decoding**. - - - xlog_path - - Decoding plug-in, which specifies the Xlog absolute path and file level of the decoding file. - - Value range: **NULL** or a character string of the absolute path of the Xlog file. - - - **options**: This parameter is optional and consists of a series of **options_name** and **options_value**. You can retain the default value. For details, see **pg_logical_slot_peek_changes**. - - Example: - - ``` - MogDB=# SELECT pg_current_xlog_location(); - pg_current_xlog_location - -------------------------- - 0/E62E238 - (1 row) - - MogDB=# create table t1 (a int primary key,b int,c int); - NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1" - CREATE TABLE - MogDB=# insert into t1 values(1,1,1); - INSERT 0 1 - MogDB=# insert into t1 values(2,2,2); - INSERT 0 1 - - MogDB=# select data from pg_logical_get_area_changes('0/E62E238',NULL,NULL,'sql_decoding',NULL); - location | xid | data - -----------+-------+--------------------------------------------------- - 0/E62E8D0 | 27213 | COMMIT (at 2022-01-26 15:08:03.349057+08) 3020226 - 0/E6325F0 | 27214 | COMMIT (at 2022-01-26 15:08:07.309869+08) 3020234 - ...... - ``` - -- pg_get_replication_slots() - - Description: Obtains the replication slot list. - - Return type: text, text, text, oid, boolean, xid, xid, text, boolean - - Example: - - ``` - MogDB=# select * from pg_get_replication_slots(); - slot_name | plugin | slot_type | datoid | active | xmin | catalog_xmin | restart_lsn | dummy_standby - -----------+----------------+-----------+--------+--------+------+--------------+-------------+--------------- - wkl001 | mppdb_decoding | logical | 15914 | f | | 2079556 | 4/1B81D920 | f - dn_6002 | | physical | 0 | t | | | 8/7CB63BD8 | f - dn_6004 | | physical | 0 | t | | | 8/7CB63BD8 | f - dn_6003 | | physical | 0 | t | | | 8/7CB63BD8 | f - gfslot001 | mppdb_decoding | logical | 15914 | f | | 2412553 | 4/A54B2428 | f - (5 rows) - ``` - -- gs_get_parallel_decode_status() - - Description: Monitors the length of the read log queue and decoding result queue of each decoding thread to locate the concurrent decoding performance bottleneck. - - Return type: text, int, text, text - - Example: - - ``` - MogDB=# select * from gs_get_parallel_decode_status(); - slot_name | parallel_decode_num | read_change_queue_length | decode_change_queue_length - -----------+---------------------+------------------------------------------------------------------+------------------------------------------------------------------ - slot1 | 3 | queue0: 33, queue1: 36, queue2: 1017 | queue0: 1011, queue1: 1008, queue2: 27 - slot2 | 5 | queue0: 452, queue1: 1017, queue2: 233, queue3: 585, queue4: 183 | queue0: 754, queue1: 188, queue2: 972, queue3: 620, queue4: 1022 - (2 rows) - ``` - - Note: In the returned value, **slot_name** indicates the replication slot name, **parallel_decode_num** indicates the number of parallel decoding threads in the replication slot, **read_change_queue_length** indicates the current length of the log queue read by each decoding thread, and **decode_change_queue_length** indicates the current length of the decoding result queue of each decoding thread. - -- pg_replication_origin_create (node_name) - - Description: Creates a replication source with a given external name and returns the internal ID assigned to it. - - Note: The user who calls this function must have the **SYSADMIN** permission. - - Parameter description: - - - node_name - - Name of the replication source to be created. - - Value range: a string, supporting only letters, digits, and the following special characters: _?-. - - Return type: oid - -- pg_replication_origin_drop (node_name) - - Description: Deletes a previously created replication source, including any associated replay progress. - - Note: The user who calls this function must have the **SYSADMIN** permission. - - Parameter description: - - - node_name - - Name of the replication source to be deleted. - - Value range: a string, supporting only letters, digits, and the following special characters: _?-. - -- pg_replication_origin_oid (node_name) - - Description: Searches for a replication source by name and returns the internal ID. If no such replication source is found, an error is thrown. - - Note: The user who calls this function must have the **SYSADMIN** permission. - - Parameter description: - - - node_name - - Specifies the name of the replication source to be queried. - - Value range: a string, supporting only letters, digits, and the following special characters: _?-. - - Return type: oid - -- pg_replication_origin_session_setup (node_name) - - Description: Marks the current session for replaying from a given origin, allowing you to track replay progress. This parameter can be used only when no origin is selected. Run the **pg_replication_origin_session_reset** command to cancel the configuration. - - Note: The user who calls this function must have the **SYSADMIN** permission. - - Parameter description: - - - node_name - - Name of the replication source. - - Value range: a string, supporting only letters, digits, and the following special characters: _?-. - -- pg_replication_origin_session_reset () - - Description: Cancels the **pg_replication_origin_session_setup()** effect. - - Note: The user who calls this function must have the **SYSADMIN** permission. - -- pg_replication_origin_session_is_setup () - - Description: Returns a true value if a replication source is selected in the current session. - - Note: The user who calls this function must have the **SYSADMIN** permission. - - Return type: Boolean - -- pg_replication_origin_session_progress (flush) - - Description: Returns the replay position of the replication source selected in the current session. - - Note: The user who calls this function must have the **SYSADMIN** permission. - - Parameter description: - - - flush - - Determines whether the corresponding local transaction has been flushed to disk. - - Value range: Boolean - - Return type: LSN - -- pg_replication_origin_xact_setup (origin_lsn, origin_timestamp) - - Description: Marks the current transaction as recommitted at a given LSN and timestamp. This function can be called only when **pg_replication_origin_session_setup** is used to select a replication source. - - Note: The user who calls this function must have the **SYSADMIN** permission. - - Parameter description: - - - origin_lsn - - Position for replaying the replication source. - - Value range: LSN - - - origin_timestamp - - Time point when a transaction is committed - - Value range: timestamp with time zone - -- pg_replication_origin_xact_reset () - - Description: Cancels the **pg_replication_origin_xact_setup()** effect. - - Note: The user who calls this function must have the **SYSADMIN** permission. - -- pg_replication_origin_advance (node_name, lsn) - - Description: - - Sets the replication progress of a given node to a given position. This is primarily used to set the initial position, or to set a new position after a configuration change or similar change. - - Note: Improper use of this function may cause inconsistent replication data. - - Note: The user who calls this function must have the **SYSADMIN** permission. - - Parameter description: - - - node_name - - Name of an existing replication source. - - Value range: a string, supporting only letters, digits, and the following special characters: _?-. - - - lsn - - Position for replaying the replication source. - - Value range: LSN - -- pg_replication_origin_progress (node_name, flush) - - Description: Returns the position for replaying the given replication source. - - Note: The user who calls this function must have the **SYSADMIN** permission. - - Parameter description: - - - node_name - - Name of the replication source. - - Value range: a string, supporting only letters, digits, and the following special characters: _?-. - - - flush - - Determines whether the corresponding local transaction has been flushed to disk. - - Value range: Boolean - -- pg_show_replication_origin_status() - - Description: Displays the replication status of the replication source. - - Note: The user who calls this function must have the **SYSADMIN** permission. - - Return type: - - - **local_id**: OID, which specifies the ID of the replication source. - - **external_id**: text, which specifies the name of the replication source. - - **remote_lsn**: LSN of the replication source. - - **local_lsn**: local LSN. - -- pg_get_publication_tables(pub_name) - - Description: Returns the relid list of tables to be published based on the publication name. - - Parameter description: - - - pub_name - - Existing publication name. - - Value range: a string, supporting only letters, digits, and the following special characters: _?-. - - Return type: relid list - -- pg_stat_get_subscription(sub_oid oid) → record - - Description: - - Rerurns the subscription status information after a subscription OID is entered. - - Parameter description: - -- subid - - Subscription OID. - - Value range: oid - - Return type: - - - **relid**: OID of the table. - - **pid**: thread_id, which indicates the thread ID of the background apply/sync thread. - - **received_lsn**: pg_lsn, which indicates the latest LSN received from the publication side. - - **last_msg_send_time**: timestamp, which indicates the time when the last message is sent from the publication side. - - **last_msg_receipt_time**: timestamp, which indicates the time when the last message is received by the subscription side. - - **latest_end_lsn**: pg_lsn, which indicates the LSN of the publication side when the last keepalive message is received. - - **latest_end_time**: timstamp, which indicates the time when the last keepalive message is received. +--- +title: Logical Replication Functions +summary: Logical Replication Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Logical Replication Functions + +- pg_create_logical_replication_slot('slot_name', 'plugin_name') + + Description: Creates a logical replication slot. + + Parameter description: + + - slot_name + + Indicates the name of the streaming replication slot. + + Value range: a string of characters, which can contain only lowercase letters, digits, underscores (_), question marks (?), hyphens (-), and periods (.). The '.' or '..' cannot be used as a replication slot name. + + - plugin_name + + Indicates the name of the plugin. + + Value range: a string, supporting **mppdb_decoding** + + Return type: name, text + + Note: The first return value is the slot name, and the second is the start LSN position for decoding in the logical replication slot. Users who invoke this function must have the **SYSADMIN** permission, the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. Currently, this function can be invoked only on the host. + +- pg_create_physical_replication_slot('slot_name', 'isDummyStandby') + + Description: Creates a physical replication slot. + + Parameter description: + + - slot_name + + Indicates the name of the streaming replication slot. + + Value range: a string of characters, which can contain only lowercase letters, digits, underscores (_), question marks (?), hyphens (-), and periods (.). The '.' or '..' cannot be used as a replication slot name. + + - isDummyStandby + + Specifies whether the replication slot is created by connecting the secondary server to the primary server. + + Type: bool + + Return type: name, text + + Note: Users who invoke this function must have the **SYSADMIN** permission or the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. Currently, primary/standby/secondary deployment is not supported by default. + +- pg_drop_replication_slot('slot_name') + + Description: Deletes a streaming replication slot. + + Parameter description: + + - slot_name + + Indicates the name of the streaming replication slot. + + Value range: a string of characters, which can contain only lowercase letters, digits, underscores (_), question marks (?), hyphens (-), and periods (.). The '.' or '..' cannot be used as a replication slot name. + + Return type: void + + Note: Users who invoke this function must have the **SYSADMIN** permission or the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. Currently, this function can be invoked only on the host. + +- pg_logical_slot_peek_changes('slot_name', 'LSN', upto_nchanges, 'options_name', 'options_value') + + Description: Performs decoding but does not go to the next streaming replication slot. (The decoding result will be returned again on future calls.) + + Parameter description: + + - slot_name + + Indicates the name of the streaming replication slot. + + Value range: a string of characters, which can contain only lowercase letters, digits, underscores (_), question marks (?), hyphens (-), and periods (.). The '.' or '..' cannot be used as a replication slot name. + + - LSN + + Indicates a target LSN. Decoding is performed only when an LSN is less than or equal to this value. + + Value range: a string, in the format of *xlogid*/*xrecoff*, for example, 1/2AAFC60 (If this parameter is set to **NULL**, the target LSN indicating the end position of decoding is not specified.) + + - upto_nchanges + + Indicates the number of decoded records (including the **begin** and **commit** timestamps). Assume that there are three transactions, which involve 3, 5, and 7 records, respectively. If **upto_nchanges** is set to **4**, 8 records of the first two transactions will be decoded. Specifically, decoding is stopped when the number of decoded records exceeds the value of **upto_nchanges** after decoding in the first two transactions is finished. + + Value range: a non-negative integer + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If any of the **LSN** and **upto_nchanges** values are reached, decoding ends. + + - **options**: Specifies optional parameters, consisting of **options_name** and **options_value**. + + - include-xids + + Indicates whether the decoded **data** column contains XID information. + + Valid value: **0** and **1**. The default value is **1**. + + - **0**: The decoded **data** column does not contain XID information. + - **1**: The decoded **data** column contains XID information. + + - skip-empty-xacts + + Indicates whether to ignore empty transaction information during decoding. + + Valid value: **0** and **1**. The default value is **0**. + + - **0**: The empty transaction information is not ignored during decoding. + - **1**: The empty transaction information is ignored during decoding. + + - include-timestamp + + Indicates whether decoded information contains the **commit** timestamp. + + Valid value: **0** and **1**. The default value is **0**. + + - **0**: The decoded information does not contain the **commit** timestamp. + - **1**: The decoded information contains the **commit** timestamp. + + - only-local + + Specifies whether to decode only local logs. + + Value range: **0** and **1**. The default value is **1**. + + - **0**: Non-local logs and local logs are decoded. + - **1**: Only local logs are decoded. + + - force-binary + + Specifies whether to output the decoding result in binary format. + + Value range: **0** + + - **0**: The decoding result is output in text format. + + - white-table-list + + Whitelist parameter, including the schemas and table names to be decoded. + + Value range: a string that contains table names in the whitelist. Different tables are separated by commas (,). An asterisk (*) is used to fuzzily match all tables. Schema names and table names are separated by periods (.). No space character is allowed. Example: **select \* from pg_logical_slot_peek_changes('slot1', NULL, 4096, 'white-table-list', 'public.t1,public.t2');** + + Return type: text, xid, text + + Note: The function returns the decoding result. Each decoding result contains three columns, corresponding to the above return types and indicating the LSN position, XID, and decoded content, respectively. + + Users who invoke this function must have the **SYSADMIN** permission or the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. + +- pg_logical_slot_get_changes('slot_name', 'LSN', upto_nchanges, 'options_name', 'options_value') + + Description: Performs decoding and goes to the next streaming replication slot. + + Parameter: This function has the same parameters as **pg_logical_slot_peek_changes**. + + Note: Users who invoke this function must have the **SYSADMIN** permission or the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. Currently, this function can be invoked only on the host. + +- pg_logical_slot_peek_binary_changes('slot_name', 'LSN', upto_nchanges, 'options_name', 'options_value') + + Description: Performs decoding in binary mode and does not go to the next streaming replication slot. (The decoded data can be obtained again during the next decoding.) + + Parameter description: + + - slot_name + + Indicates the name of the streaming replication slot. + + Value range: a string, supporting only letters, digits, and the following special characters: _?-. + + - LSN + + Indicates a target LSN. Decoding is performed only when an LSN is less than or equal to this value. + + Value range: a string, in the format of *xlogid*/*xrecoff*, for example, 1/2AAFC60 (If this parameter is set to **NULL**, the target LSN indicating the end position of decoding is not specified.) + + - upto_nchanges + + Indicates the number of decoded records (including the **begin** and **commit** timestamps). Assume that there are three transactions, which involve 3, 5, and 7 records, respectively. If **upto_nchanges** is set to **4**, 8 records of the first two transactions will be decoded. Specifically, decoding is stopped when the number of decoded records exceeds the value of **upto_nchanges** after decoding in the first two transactions is finished. + + Value range: a non-negative integer + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If any of the **LSN** and **upto_nchanges** values are reached, decoding ends. + + - **options**: Specifies optional parameters, consisting of **options_name** and **options_value**. + + - include-xids + + Indicates whether the decoded **data** column contains XID information. + + Valid value: **0** and **1**. The default value is **1**. + + - **0**: The decoded **data** column does not contain XID information. + - **1**: The decoded **data** column contains XID information. + + - skip-empty-xacts + + Indicates whether to ignore empty transaction information during decoding. + + Valid value: **0** and **1**. The default value is **0**. + + - **0**: The empty transaction information is not ignored during decoding. + - **1**: The empty transaction information is ignored during decoding. + + - include-timestamp + + Indicates whether decoded information contains the **commit** timestamp. + + Valid value: **0** and **1**. The default value is **0**. + + - **0**: The decoded information does not contain the **commit** timestamp. + - **1**: The decoded information contains the **commit** timestamp. + + - only-local + + Specifies whether to decode only local logs. + + Value range: **0** and **1**. The default value is **1**. + + - **0**: Non-local logs and local logs are decoded. + - **1**: Only local logs are decoded. + + - force-binary + + Specifies whether to output the decoding result in binary format. + + Value range: **0** or **1**. The default value is **0**. The result is output in binary format. + + - white-table-list + + Whitelist parameter, including the schema and table name to be decoded. + + Value range: a string that contains table names in the whitelist. Different tables are separated by commas (,). An asterisk (*) is used to fuzzily match all tables. Schema names and table names are separated by periods (.). No space character is allowed. Example: **select \* from pg_logical_slot_peek_binary_changes('slot1', NULL, 4096, 'white-table-list', 'public.t1,public.t2');** + + Return type: text, xid, bytea + + Note: The function returns the decoding result. Each decoding result contains three columns, corresponding to the above return types and indicating the LSN position, XID, and decoded content in binary format, respectively. Users who invoke this function must have the **SYSADMIN** permission or the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. + +- pg_logical_slot_get_binary_changes('slot_name', 'LSN', upto_nchanges, 'options_name', 'options_value') + + Description: Performs decoding in binary mode and does not go to the next streaming replication slot. + + Parameter: This function has the same parameters as **pg_logical_slot_peek_binary_changes**. + + Note: Users who invoke this function must have the **SYSADMIN** permission or the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. + +- pg_replication_slot_advance ('slot_name', 'LSN') + + Description: Directly goes to the streaming replication slot for a specified LSN, without outputting any decoding result. + + Parameter description: + + - slot_name + + Indicates the name of the streaming replication slot. + + Value range: a string of characters, which can contain only lowercase letters, digits, underscores (_), question marks (?), hyphens (-), and periods (.). The '.' or '..' cannot be used as a replication slot name. + + - LSN + + Indicates a target LSN. Next decoding will be performed only in transactions whose commission position is greater than this value. If an input LSN is smaller than the position recorded in the current streaming replication slot, the function directly returns. If the input LSN is greater than the LSN of the current physical log, the latter LSN will be directly used for decoding. + + Value range: a string, in the format of *xlogid*/*xrecoff* + + Return type: name, text + + Note: A return result contains the slot name and LSN that is actually used for decoding. Users who invoke this function must have the **SYSADMIN** permission, the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. Currently, this function can be invoked only on the host. + +- pg_logical_get_area_changes('LSN_start', 'LSN_end', upto_nchanges, 'decoding_plugin', 'xlog_path', 'options_name', 'options_value') + + Description: Specifies an LSN range or an Xlog file for decoding when no DDL is available. + + The constraints are as follows: + + 1. When the API is called, only when **wal_level** is set to **logical**, the generated log files can be parsed. If the used Xlog file is not of the logical level, the decoded content does not have the corresponding value and type, and there is no other impact. + 2. The Xlog file can be parsed only by a copy of a fully homogeneous DN to ensure that the metadata corresponding to the data can be found and no DDL or VACUUM FULL operation is performed. + 3. You can find the XLog to be parsed. + 4. Do not read too many Xlog files at a time. You are advised to read one Xlog file at a time. It is estimated that the memory occupied by one Xlog file is two to three times the size of the Xlog file. + 5. The Xlog file before scale-out cannot be decoded. + + Parameter description: + + - LSN_start + + Specifies the LSN at the start of decoding. + + Value range: a string, in the format of *xlogid*/*xrecoff*, for example, 1/2AAFC60 (If this parameter is set to **NULL**, the target LSN indicating the end position of decoding is not specified.) + + - LSN_end + + Specifies the LSN at the end of decoding. + + Value range: a string, in the format of *xlogid*/*xrecoff*, for example, 1/2AAFC60 (If this parameter is set to **NULL**, the target LSN indicating the end position of decoding is not specified.) + + - upto_nchanges + + Indicates the number of decoded records (including the **begin** and **commit** timestamps). Assume that there are three transactions, which involve 3, 5, and 7 records, respectively. If **upto_nchanges** is set to **4**, 8 records of the first two transactions will be decoded. Specifically, decoding is stopped when the number of decoded records exceeds the value of **upto_nchanges** after decoding in the first two transactions is finished. + + Value range: a non-negative integer + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If any of the **LSN** and **upto_nchanges** values are reached, decoding ends. + + - decoding_plugin + + Decoding plug-in, which is a .so plug-in that specifies the output format of the decoded content. + + Value range: **mppdb_decoding** and **sql_decoding**. + + - xlog_path + + Decoding plug-in, which specifies the Xlog absolute path and file level of the decoding file. + + Value range: **NULL** or a character string of the absolute path of the Xlog file. + + - **options**: This parameter is optional and consists of a series of **options_name** and **options_value**. You can retain the default value. For details, see **pg_logical_slot_peek_changes**. + + Example: + + ``` + MogDB=# SELECT pg_current_xlog_location(); + pg_current_xlog_location + -------------------------- + 0/E62E238 + (1 row) + + MogDB=# create table t1 (a int primary key,b int,c int); + NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1" + CREATE TABLE + MogDB=# insert into t1 values(1,1,1); + INSERT 0 1 + MogDB=# insert into t1 values(2,2,2); + INSERT 0 1 + + MogDB=# select data from pg_logical_get_area_changes('0/E62E238',NULL,NULL,'sql_decoding',NULL); + location | xid | data + -----------+-------+--------------------------------------------------- + 0/E62E8D0 | 27213 | COMMIT (at 2022-01-26 15:08:03.349057+08) 3020226 + 0/E6325F0 | 27214 | COMMIT (at 2022-01-26 15:08:07.309869+08) 3020234 + ...... + ``` + +- pg_get_replication_slots() + + Description: Obtains the replication slot list. + + Return type: text, text, text, oid, boolean, xid, xid, text, boolean + + Example: + + ``` + MogDB=# select * from pg_get_replication_slots(); + slot_name | plugin | slot_type | datoid | active | xmin | catalog_xmin | restart_lsn | dummy_standby + -----------+----------------+-----------+--------+--------+------+--------------+-------------+--------------- + wkl001 | mppdb_decoding | logical | 15914 | f | | 2079556 | 4/1B81D920 | f + dn_6002 | | physical | 0 | t | | | 8/7CB63BD8 | f + dn_6004 | | physical | 0 | t | | | 8/7CB63BD8 | f + dn_6003 | | physical | 0 | t | | | 8/7CB63BD8 | f + gfslot001 | mppdb_decoding | logical | 15914 | f | | 2412553 | 4/A54B2428 | f + (5 rows) + ``` + +- gs_get_parallel_decode_status() + + Description: Monitors the length of the read log queue and decoding result queue of each decoding thread to locate the concurrent decoding performance bottleneck. + + Return type: text, int, text, text + + Example: + + ``` + MogDB=# select * from gs_get_parallel_decode_status(); + slot_name | parallel_decode_num | read_change_queue_length | decode_change_queue_length + -----------+---------------------+------------------------------------------------------------------+------------------------------------------------------------------ + slot1 | 3 | queue0: 33, queue1: 36, queue2: 1017 | queue0: 1011, queue1: 1008, queue2: 27 + slot2 | 5 | queue0: 452, queue1: 1017, queue2: 233, queue3: 585, queue4: 183 | queue0: 754, queue1: 188, queue2: 972, queue3: 620, queue4: 1022 + (2 rows) + ``` + + Note: In the returned value, **slot_name** indicates the replication slot name, **parallel_decode_num** indicates the number of parallel decoding threads in the replication slot, **read_change_queue_length** indicates the current length of the log queue read by each decoding thread, and **decode_change_queue_length** indicates the current length of the decoding result queue of each decoding thread. + +- pg_replication_origin_create (node_name) + + Description: Creates a replication source with a given external name and returns the internal ID assigned to it. + + Note: The user who calls this function must have the **SYSADMIN** permission. + + Parameter description: + + - node_name + + Name of the replication source to be created. + + Value range: a string, supporting only letters, digits, and the following special characters: _?-. + + Return type: oid + +- pg_replication_origin_drop (node_name) + + Description: Deletes a previously created replication source, including any associated replay progress. + + Note: The user who calls this function must have the **SYSADMIN** permission. + + Parameter description: + + - node_name + + Name of the replication source to be deleted. + + Value range: a string, supporting only letters, digits, and the following special characters: _?-. + +- pg_replication_origin_oid (node_name) + + Description: Searches for a replication source by name and returns the internal ID. If no such replication source is found, an error is thrown. + + Note: The user who calls this function must have the **SYSADMIN** permission. + + Parameter description: + + - node_name + + Specifies the name of the replication source to be queried. + + Value range: a string, supporting only letters, digits, and the following special characters: _?-. + + Return type: oid + +- pg_replication_origin_session_setup (node_name) + + Description: Marks the current session for replaying from a given origin, allowing you to track replay progress. This parameter can be used only when no origin is selected. Run the **pg_replication_origin_session_reset** command to cancel the configuration. + + Note: The user who calls this function must have the **SYSADMIN** permission. + + Parameter description: + + - node_name + + Name of the replication source. + + Value range: a string, supporting only letters, digits, and the following special characters: _?-. + +- pg_replication_origin_session_reset () + + Description: Cancels the **pg_replication_origin_session_setup()** effect. + + Note: The user who calls this function must have the **SYSADMIN** permission. + +- pg_replication_origin_session_is_setup () + + Description: Returns a true value if a replication source is selected in the current session. + + Note: The user who calls this function must have the **SYSADMIN** permission. + + Return type: Boolean + +- pg_replication_origin_session_progress (flush) + + Description: Returns the replay position of the replication source selected in the current session. + + Note: The user who calls this function must have the **SYSADMIN** permission. + + Parameter description: + + - flush + + Determines whether the corresponding local transaction has been flushed to disk. + + Value range: Boolean + + Return type: LSN + +- pg_replication_origin_xact_setup (origin_lsn, origin_timestamp) + + Description: Marks the current transaction as recommitted at a given LSN and timestamp. This function can be called only when **pg_replication_origin_session_setup** is used to select a replication source. + + Note: The user who calls this function must have the **SYSADMIN** permission. + + Parameter description: + + - origin_lsn + + Position for replaying the replication source. + + Value range: LSN + + - origin_timestamp + + Time point when a transaction is committed + + Value range: timestamp with time zone + +- pg_replication_origin_xact_reset () + + Description: Cancels the **pg_replication_origin_xact_setup()** effect. + + Note: The user who calls this function must have the **SYSADMIN** permission. + +- pg_replication_origin_advance (node_name, lsn) + + Description: + + Sets the replication progress of a given node to a given position. This is primarily used to set the initial position, or to set a new position after a configuration change or similar change. + + Note: Improper use of this function may cause inconsistent replication data. + + Note: The user who calls this function must have the **SYSADMIN** permission. + + Parameter description: + + - node_name + + Name of an existing replication source. + + Value range: a string, supporting only letters, digits, and the following special characters: _?-. + + - lsn + + Position for replaying the replication source. + + Value range: LSN + +- pg_replication_origin_progress (node_name, flush) + + Description: Returns the position for replaying the given replication source. + + Note: The user who calls this function must have the **SYSADMIN** permission. + + Parameter description: + + - node_name + + Name of the replication source. + + Value range: a string, supporting only letters, digits, and the following special characters: _?-. + + - flush + + Determines whether the corresponding local transaction has been flushed to disk. + + Value range: Boolean + +- pg_show_replication_origin_status() + + Description: Displays the replication status of the replication source. + + Note: The user who calls this function must have the **SYSADMIN** permission. + + Return type: + + - **local_id**: OID, which specifies the ID of the replication source. + - **external_id**: text, which specifies the name of the replication source. + - **remote_lsn**: LSN of the replication source. + - **local_lsn**: local LSN. + +- pg_get_publication_tables(pub_name) + + Description: Returns the relid list of tables to be published based on the publication name. + + Parameter description: + + - pub_name + + Existing publication name. + + Value range: a string, supporting only letters, digits, and the following special characters: _?-. + + Return type: relid list + +- pg_stat_get_subscription(sub_oid oid) → record + + Description: + + Rerurns the subscription status information after a subscription OID is entered. + + Parameter description: + +- subid + + Subscription OID. + + Value range: oid + + Return type: + + - **relid**: OID of the table. + - **pid**: thread_id, which indicates the thread ID of the background apply/sync thread. + - **received_lsn**: pg_lsn, which indicates the latest LSN received from the publication side. + - **last_msg_send_time**: timestamp, which indicates the time when the last message is sent from the publication side. + - **last_msg_receipt_time**: timestamp, which indicates the time when the last message is received by the subscription side. + - **latest_end_lsn**: pg_lsn, which indicates the LSN of the publication side when the last keepalive message is received. + - **latest_end_time**: timstamp, which indicates the time when the last keepalive message is received. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/other-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/other-functions.md index 6451fef5..ba53f71b 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/other-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/other-functions.md @@ -1,660 +1,660 @@ ---- -title: Other Functions -summary: Other Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Other Functions - -- plan_seed() - - Description: Obtains the seed value of the previous query statement (internal use). - - Return type: int - -- pg_stat_get_env() - - Description: Obtains the environment variable information of the current node. Only users with the **sysadmin** or **monitor admin** permission can access the environment variable information. - - Return type: record - - Example: - - ```sql - MogDB=# select pg_stat_get_env(); - pg_stat_get_env - --------------------------------------------------------------------------------------------------------------------------------------- - (coordinator1,localhost,144773,49100,/data1/GaussDB_Kernel_TRUNK/install,/data1/GaussDB_Kernel_TRUNK/install/data/coordinator1,pg_log) - (1 row) - ``` - -- pg_catalog.plancache_clean() - - Description: Clears the global plan cache that is not used on nodes. - - Return type: Boolean - -- pg_catalog.plancache_status() - - Description: Displays information about the global plan cache on nodes. The information returned by the function is the same as that in GLOBAL_PLANCACHE_STATUS. - - Return type: record - -- textlen(text) - - Description: Provides the method of querying the logical length of text. - - Return type: int - -- threadpool_status() - - Description: Displays the status of worker threads and sessions in the thread pool. - - Return type: record - -- get_local_active_session() - - Description: Provides sampling records of the historical active sessions stored in the memory of the current node. - - Return type: record - -- pg_stat_get_thread() - - Description: Provides status information about all threads on the current node. Users with the **sysadmin** or **monitor admin** permission can view information about all threads, and common users can view only their own thread information. - - Return type: record - -- pg_stat_get_sql_count() - - Description: Provides the counts of the **SELECT**, **UPDATE**, **INSERT**, **DELETE**, and **MERGE INTO** statements executed on the current node. Users with the **sysadmin** or **monitor admin** permission can view information about all users, and common users can view only their own statistics. - - Return type: record - -- pg_stat_get_data_senders() - - Description: Provides detailed information about the data-copy sending thread active at the moment. - - Return type: record - -- get_wait_event_info() - - Description: Provides detailed information about the wait event. - - Return type: record - -- generate_wdr_report(begin_snap_id bigint, end_snap_id bigint, report_type cstring, report_scope cstring, node_name cstring) - - Description: Generates system diagnosis reports based on two snapshots. You need to run the command in the **postgres** database. By default, the initial user or the user with the **monadmin** permission can access the database. The result can be queried only in the system database but cannot be queried in the user database. - - Return type: record - - **Table 1** generate_wdr_report parameter description - - | Parameter | Description | Range | - | ------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | - | begin_snap_id | Snapshot ID that starts the diagnosis report period. | N/A | - | end_snap_id | Snapshot ID that ends the diagnosis report period. By default, the value of **end_snap_id** is greater than that of **begin_snap_id**. | N/A | - | report_type | Specifies the type of the generated report. | - **summary**
- **detail**
- **all**: Both **summary** and **detail** types are included. | - | report_scope | Specifies the scope for a report to be generated. | - **cluster**: database-level information
- **node**: node-level information | - | node_name | When **report\_scope** is set to **node**, set this parameter to the name of the corresponding node. (You can run the **select \* from pg_node_env;** command to query the node name.)If **report\_scope** is set to **cluster**, this parameter can be omitted, left blank, or set to **NULL**. | - **cluster**: This value is omitted, left blank or set to **NULL**.
- **node**: indicates the node name in MogDB. | - -- create_wdr_snapshot() - - Description: Manually generates system diagnosis snapshots. This function requires the **sysadmin** permission. - - Return type: text - -- kill_snapshot() - - Description: Kills the WDR snapshot backend thread. Users who invoke this function must have the **SYSADMIN** permission, the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. - - Return type: void - -- capture_view_to_json(text,integer) - - Description: Saves the view result to the directory specified by GUC: **perf_directory**. If **is_crossdb** is set to **1**, the view is accessed once for all databases. If the value of **is_crossdb** is **0**, the current database is accessed only once. Only users with the **sysadmin** or **monitor admin** permission can execute this function. - - Return type: int - -- reset_unique_sql - - Description: Clears the unique SQL statements in the memory of the database node. (The **sysadmin** permission is required.) - - Return type: Boolean - - **Table 2** reset_unique_sql parameter description - - | Parameter | Type | Description | - | :---------- | :--- | :----------------------------------------------------------- | - | scope | text | Clearance scope type. The options are as follows:
- **GLOBAL**: Clears all nodes. If the value is **GLOBAL**, this function can be executed only on the primary node.
- **LOCAL**: Clears the current node. | - | clean_type | text | - BY_USERID: Unique SQL statements are cleared based on user IDs.
- BY_CNID: Unique SQL statements are cleared based on primary node IDs.
- **ALL**: All data is cleared. | - | clean_value | int8 | Clearance value corresponding to the clearance type. | - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > - > - The values **GLOBAL** and **LOCAL** of **scope** apply to distributed databases. For MogDB, the values have the same meaning, indicating that the local node is cleared. - > - The value **BY_CNID** of **clean_type** applies only to distributed databases and is invalid for MogDB. - -- wdr_xdb_query(db_name_str text, query text) - - Description: Provides the capability of executing local cross-database queries. For example, when connecting to the Postgres database, access tables in the **test** database. - - ```sql - select col1 from wdr_xdb_query('dbname=test','select col1 from t1') as dd(col1 int); - ``` - - Return type: record - -- pg_wlm_jump_queue(pid int) - - Description: Moves a task to the top of the queue of the primary node of the database. - - Return type: Boolean - - - **true**: success - - **false**: failure - -- gs_wlm_switch_cgroup(pid int, cgroup text) - - Description: Moves a job to another Cgroup to change the job priority. - - Return type: Boolean - - - **true**: success - - **false**: failure - -- pv_session_memctx_detail(threadid tid, MemoryContextName text) - - Description: Records information about the memory context **MemoryContextName** of the thread **tid** into the *threadid***_timestamp.log** file in the *$GAUSSLOG***/pg_log/***${node_name}***/dumpmem** directory. *threadid* can be obtained from *sessid* in the **GS_SESSION_MEMORY_DETAIL** view. In the officially released version, only the **MemoryContextName** that is an empty string (two single quotation marks indicate that the input is an empty string) is accepted. In this case, all memory context information is recorded. Otherwise, no operation is performed. For the DEBUG version for internal development and test personnel to debug, you can specify the **MemoryContextName** to be counted. In this case, all the memory usage of the context is recorded in the specified file. Only the administrator can execute this function. - - Return type: Boolean - - - **true**: success - - **false**: failure - -- pg_shared_memctx_detail(MemoryContextName text) - - Description: Records information about the memory context **MemoryContextName** into the *threadid***_timestamp.log** file in the *$GAUSSLOG***/pg_log/***${node_name}***/dumpmem** directory. This function is provided only for internal development and test personnel to debug in the DEBUG version. Calling this function in the officially released version does not involve any operation. Only the administrator can execute this function. - - Return type: Boolean - - - **true**: success - - **false**: failure - -- local_bgwriter_stat() - - Description: Displays the information about pages flushed by the bgwriter thread of this instance, number of pages in the candidate buffer chain, and buffer elimination information. - - Return type: record - -- local_candidate_stat() - - Description: Displays the number of pages in the candidate buffer chain of this instance and buffer elimination information, including the normal buffer pool and segment buffer pool. - - Return type: record - -- local_ckpt_stat() - - Description: Displays the information about checkpoints and flushing pages of the current instance. - - Return type: record - -- local_double_write_stat() - - Description: Displays the doublewrite file status of the current instance. - - Return type: record - - **Table 3** local_double_write_stat parameters - - | Parameter | Type | Description | - | :-------------------- | :--- | :----------------------------------------------------------- | - | node_name | text | Instance name | - | curr_dwn | int8 | Sequence number of the doublewrite file | - | curr_start_page | int8 | Start page for restoring the doublewrite file | - | file_trunc_num | int8 | Number of times that the doublewrite file is reused | - | file_reset_num | int8 | Number of reset times after the doublewrite file is full | - | total_writes | int8 | Total number of I/Os of the doublewrite file | - | low_threshold_writes | int8 | Number of I/Os for writing doublewrite files with low efficiency (the number of I/O flushing pages at a time is less than 16) | - | high_threshold_writes | int8 | Number of I/Os for writing doublewrite files with high efficiency (the number of I/O flushing pages at a time is more than 421) | - | total_pages | int8 | Total number of pages that are flushed to the doublewrite file area | - | low_threshold_pages | int8 | Number of pages that are flushed with low efficiency | - | high_threshold_pages | int8 | Number of pages that are flushed with high efficiency | - | file_id | int8 | ID of the current doublewrite file | - -- local_single_flush_dw_stat() - - Description: Displays the elimination of dual-write files on a single page in the instance. - - Return type: record - -- local_pagewriter_stat() - - Description: Displays the page flushing information and checkpoint information of the current instance. - - Return type: record - -- local_redo_stat() - - Description: Displays the replay status of the current standby instance. - - Return type: record - - Note: The returned replay status includes the current replay position and the replay position of the minimum restoration point. - -- local_recovery_status() - - Description: Displays log flow control information about the primary and standby nodes. - - Return type: record - -- gs_wlm_node_recover(boolean isForce) - - Description: Obtains top SQL query statement-level statistics recorded in the current memory. If the input parameter is not **0**, the information is cleared from the memory. - - Return type: record - -- gs_wlm_node_clean(cstring nodename) - - Description: Clears data after the dynamic load management node is faulty. Only administrators can execute this function. This function is called by the database instance management module. You are not advised to directly call this function. This view is not supported in a centralized or standalone system. - - Return type: Boolean - -- gs_cgroup_map_ng_conf(group name) - - Description: Reads the Cgroup configuration file of a specified logical database. - - Return type: record - -- gs_wlm_switch_cgroup(sess_id int8, cgroup name) - - Description: Switches the Cgroup of a specified session. - - Return type: record - -- comm_client_info() - - Description: Queries information about active client connections of a single node. - - Return type: SETOF record - -- pg_sync_cstore_delta(text) - - Description: Synchronizes the delta table structure of a specified column-store table with that of the column-store primary table. - - Return type: bigint - -- pg_sync_cstore_delta() - - Description: Synchronizes the delta table structure of all column-store tables with that of the column-store primary table. - - Return type: bigint - -- pg_get_flush_lsn() - - Description: Returns the position of the Xlog flushed from the current node. - - Return type: text - -- pg_get_sync_flush_lsn() - - Description: Returns the position of the Xlog flushed by the majority on the current node. - - Return type: text - -- gs_create_log_tables() - - Description: Creates foreign tables and views for run logs and performance logs. - - Return type: void - - Example: - - ``` - MogDB=# select gs_create_log_tables(); - gs_create_log_tables - ---------------------- - - (1 row) - ``` - -- dbe_perf.get_global_full_sql_by_timestamp(start_timestamp timestamp with time zone, end_timestamp timestamp with time zone) - - Description: Obtains full SQL information at the database level. The result can be queried only in the system database but cannot be queried in the user database. - - Return type: record - - **Table 4** dbe_perf.get_global_full_sql_by_timestamp parameter description - - | Parameter | Type | Description | - | :-------------- | :----------------------- | :--------------------------------------- | - | start_timestamp | timestamp with time zone | Start point of the SQL start time range. | - | end_timestamp | timestamp with time zone | End point of the SQL start time range. | - -- dbe_perf.get_global_slow_sql_by_timestamp(start_timestamp timestamp with time zone, end_timestamp timestamp with time zone) - - Description: Obtains slow SQL information at the database level. The result can be queried only in the system database but cannot be queried in the user database. - - Return type: record - - **Table 5** dbe_perf.get_global_slow_sql_by_timestamp parameter description - - | Parameter | Type | Description | - | :-------------- | :----------------------- | :--------------------------------------- | - | start_timestamp | timestamp with time zone | Start point of the SQL start time range. | - | end_timestamp | timestamp with time zone | End point of the SQL start time range. | - -- statement_detail_decode(detail text, format text, pretty boolean) - - Description: Parses the **details** column in a full or slow SQL statement. The result can be queried only in the system database but cannot be queried in the user database. - - Return type: text - - **Table 6** statement_detail_decode parameter description - - | Parameter | Type | Description | - | :--------- | :------ | :----------------------------------------------------------- | - | **detail** | text | Set of events generated by the SQL statement (unreadable). | - | format | text | Parsing output format. The value is **plaintext**. | - | pretty | boolean | Whether to display the text in pretty format when **format** is set to **plaintext**. The options are as follows:The value **true** indicates that events are separated by **\n**.The value **false** indicates that events are separated by commas (,). | - -- get_prepared_pending_xid - - Description: Returns nextxid when restoration is complete. - - Parameter: nan - - Return type: text - -- pg_clean_region_info - - Description: Clears the regionmap. - - Parameter: nan - - Return type: character varying - -- pg_get_delta_info - - Description: Obtains delta information from a single DN. - - Parameter: rel text, schema_name text - - Return type: part_name text, live_tuple bigint, data_size bigint, and blocknum bigint - -- pg_get_replication_slot_name - - Description: Obtains the slot name. - - Parameter: nan - - Return type: text - -- pg_get_running_xacts - - Description: Obtains running xact. - - Parameter: **nan** - - Return type: handle integer, gxid xid, state tinyint, node text, xmin xid, vacuum boolean, timeline bigint, prepare_xid xid, pid bigint, and next_xid xid - -- pg_get_variable_info - - Description: Obtains the shared memory variable *cache*. - - Parameter: nan - - Return type: node_name text, nextOid oid, nextXid xid, oldestXid xid, xidVacLimit xid, oldestXidDB oid, lastExtendCSNLogpage xid, startExtendCSNLogpage xid, nextCommitSeqNo xid, latestCompletedXid xid, and startupMaxXid xid - -- pg_get_xidlimit - - Description: Obtains transaction ID information from the shared memory. - - Parameter: nan - - Return type: nextXid xid, oldestXid xid, xidVacLimit xid, xidWarnLimit xid, xidStopLimit xid, xidWrapLimit xid, and oldestXidDB oid - -- get_global_user_transaction() - - Description: Returns transaction information about each user on all nodes. - - Return type: node_name name, usename name, commit_counter bigint, rollback_counter bigint, resp_min bigint, resp_max bigint, resp_avg bigint, resp_total bigint, bg_commit_counter bigint, bg_rollback_counter bigint, bg_resp_min bigint, bg_resp_max bigint, bg_resp_avg bigint, and bg_resp_total bigint - -- pg_collation_for - - Description: Returns the sorting rule corresponding to the input parameter string. - - Parameter: any (Explicit type conversion is required for constants.) - - Return type: text - -- pgxc_unlock_for_sp_database(name Name) - - Description: Releases a specified database lock. - - Parameter: database name - - Return type: Boolean - -- pgxc_lock_for_sp_database(name Name) - - Description: Locks a specified database. - - Parameter: database name - - Return type: Boolean - -- copy_error_log_create() - - Description: Creates the error table (**public.pgxc_copy_error_log**) required for creating the **COPY FROM** error tolerance mechanism. - - Return type: Boolean - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - This function attempts to create the **public.pgxc_copy_error_log** table. For details about the table, see Table 7. - > - Create the B-tree index on the **relname** column and execute **REVOKE ALL on public.pgxc_copy_error_log FROM public** to manage permissions for the error table (the permissions are the same as those of the **COPY** statement). - > - **public.pgxc_copy_error_log** is a row-store table. Therefore, this function can be executed and **COPY** error tolerance is available only when row-store tables can be created in the database instance. Note that after the GUC parameter **enable_hadoop_env** is enabled, row-store tables cannot be created in the database instance (the default value is **off** for MogDB). - > - Same as the error table and the **COPY** statement, the function requires **sysadmin** or higher permissions. - > - If the **public.pgxc_copy_error_log** table or the **copy_error_log_relname_idx** index exists before the function creates it, the function will report an error and roll back. - - **Table 7** Error table public.pgxc_copy_error_log - - | Column | Type | Description | - | :--------- | :----------------------- | :----------------------------------------------------------- | - | relname | character varying | Table name in the form of *Schema name***.***Table name* | - | begintime | timestamp with time zone | Time when a data format error was reported | - | filename | character varying | Name of the source data file where a data format error occurs | - | lineno | bigint | Number of the row where a data format error occurs in a source data file | - | rawrecord | text | Raw record of a data format error in the source data file | - | **detail** | text | Error details | - -- dynamic_func_control(scope text, function_name text, action text, “{params}” text[]) - - Description: Dynamically enables built-in functions. Currently, only full SQL statements can be dynamically enabled. - - Return type: record - - **Table 8** Parameter description of dynamic_func_control - - | Parameter | Type | Description | - | :------------ | :----- | :----------------------------------------------------------- | - | scope | text | Scope where the function is to be dynamically enabled. Currently, only **LOCAL** is supported. | - | function_name | text | Function name. Currently, only **STMT** is supported. | - | action | text | When **function_name** is set to **STMT**, the value of **action** can only be **TRACK**, **UNTRACK**, **LIST**, or **CLEAN**.
- **TRACK**: records the full SQL information of normalized SQL statements.
- **UNTRACK**: cancels the recording of full SQL information of normalized SQL statements.
- **LIST**: lists normalized SQL information that is recorded in the current track.
- **CLEAN**: cleans normalized SQL information that is recorded in the current track. | - | params | text[] | When **function_name** is set to **STMT**, the parameters corresponding to different actions are set as follows:
- **TRACK**: **'{“Normalized SQLID”, “L0/L1/L2”}'**
- **UNTRACK**: **'{“Normalized SQLID”}'**
- LIST - '{}'
- CLEAN - '{}' | - -- gs_parse_page_bypath(path text, blocknum bigint, relation_type text, read_memory boolean) - - Description: Parses a specified table page and returns the path for storing the parsed content. - - Return type: text - - Note: Only the system administrator or O&M administrator can execute this function. - - **Table 9** gs_parse_page_bypath parameters - - | Parameter | Type | Description | - | :------------ | :------ | :----------------------------------------------------------- | - | path | text | - For an ordinary table or segment-page table, the relative path is *Tablespace name***/***Database OID***/***Relfilenode of the table (physical file name)*. For example, **base/16603/16394**.
- You can run the **pg_relation_filepath(table_name text)** command to query the relative path of the table file.
- Valid path formats are as follows:
- global/relNode
- base/dbNode/relNode
- pg_tblspc/spcNode/version_dir/dbNode/relNode | - | blocknum | bigint | - **-1**: Information about all blocks
- **0-***MaxBlockNumber*: Information about the corresponding block | - | relation_type | text | - **heap**: Astore table
- **uheap**: Ustore table
- btree_index: B-tree index
- ubtree_index: UBTree index
- **segment**: Segment-page | - | read_memory | boolean | - **false**: The system parses the page from the disk file.
- **true**: The system attempts to parse the page from the shared buffer. If the page does not exist in the shared buffer, the system parses the page from the disk file. | - -- gs_xlogdump_lsn(start_lsn text, end_lsn text) - - Description: Parses Xlogs within the specified LSN range and returns the path for storing the parsed content. You can use **pg_current_xlog_location()** to obtain the current Xlog position. - - Return type: text - - Parameters: LSN start position and LSN end position - - Note: Only the system administrator or O&M administrator can execute this function. - -- gs_xlogdump_xid(c_xid xid) - - Description: Parses Xlogs of a specified XID and returns the path for storing the parsed content. You can use **txid_current()** to obtain the current XID. - - Parameter: XID - - Return type: text - - Note: Only the system administrator or O&M administrator can execute this function. - -- gs_xlogdump_tablepath(path text, blocknum bigint, relation_type text) - - Description: Parses logs corresponding to a specified table page and returns the path for storing the parsed content. - - Return type: text - - Note: Only the system administrator or O&M administrator can execute this function. - - **Table 10** gs_xlogdump_tablepath parameters - - | Parameter | Type | Description | - | :------------ | :----- | :----------------------------------------------------------- | - | path | text | - For an ordinary table or segment-page table, the relative path is *Tablespace name***/***Database OID***/***Relfilenode of the table (physical file name)*. For example, **base/16603/16394**.
- You can run the **pg_relation_filepath(table_name text)** command to query the relative path of the table file.
- Valid path formats are as follows:
- global/relNode
- base/dbNode/relNode
- pg_tblspc/spcNode/version_dir/dbNode/relNode | - | blocknum | bigint | - **-1**: Information about all blocks
- **0-***MaxBlockNumber*: Information about the corresponding block | - | relation_type | text | - **heap**: Astore table
- **uheap**: Ustore table
- btree_index: B-tree index
- **ubtree_index**: UBTree index
- **segment**: Segment-page | - -- gs_xlogdump_parsepage_tablepath(path text, blocknum bigint, relation_type text, read_memory boolean) - - Description: Parses the specified table page and logs corresponding to the table page and returns the path for storing the parsed content. It can be regarded as one execution of **gs_parse_page_bypath** and **gs_xlogdump_tablepath**. The prerequisite for executing this function is that the table file exists. To view logs of deleted tables, call **gs_xlogdump_tablepath**. - - Return type: text - - Note: Only the system administrator or O&M administrator can execute this function. - - **Table 11** gs_xlogdump_parsepage_tablepath parameters - - | Parameter | Type | Description | - | :------------ | :------ | :----------------------------------------------------------- | - | path | text | - For an ordinary table or segment-page table, the relative path is *Tablespace name***/***Database OID***/***Relfilenode of the table (physical file name)*. For example, **base/16603/16394**.
- You can run the **pg_relation_filepath(table_name text)** command to query the relative path of the table file.
- Valid path formats are as follows:
- global/relNode
- base/dbNode/relNode
- pg_tblspc/spcNode/version_dir/dbNode/relNode | - | blocknum | bigint | - **-1**: Information about all blocks
- **0-***MaxBlockNumber*: Information about the corresponding block | - | relation_type | text | - **heap**: Astore table
- **uheap**: Ustore table
- btree_index: B-tree index
- ubtree_index: UBTree index
- **segment**: Segment-page | - | read_memory | boolean | - **false**: The system parses the page from the disk file.
- **true**: The system attempts to parse the page from the shared buffer. If the page does not exist in the shared buffer, the system parses the page from the disk file. | - -- gs_index_verify(Oid oid, uint32:wq blkno) - - Description: Checks whether the sequence of keys on the UBtree index page or index tree is correct. - - Return type: record - - **Table 12** gs_index_verify parameters - - | Parameter | Type | Description | - | :-------- | :----- | :----------------------------------------------------------- | - | oid | Oid | - Index file relfilenode, which can be queried using **select relfilenode from pg_class where relname='***Index file name***'**. | - | blkno | uint32 | - **0**: indicates that all pages in the index tree are checked.
- If the value is greater than 0, the index page whose page code is equal to the value of **blkno** is checked. | - -- gs_index_recycle_queue(Oid oid, int type, uint32 blkno) - - Description: Parses the UBtree index recycling queue information. - - Return type: record - - **Table 13** gs_index_recycle_queue parameters - - | Parameter | Type | Description | - | :-------- | :----- | :----------------------------------------------------------- | - | oid | Oid | - Index file relfilenode, which can be queried using **select relfilenode from pg_class where relname='***Index file name***'**. | - | type | int | - **0**: indicates that the entire queue to be recycled is parsed.
- **1**: indicates that the entire empty page queue is parsed.
- **2**: indicates that a single page is parsed. | - | blkno | uint32 | - ID of the recycling queue page. This parameter is valid only when **type** is set to **2**. The value of **blkno** ranges from 1 to 4294967294. | - -- gs_stat_wal_entrytable(int64 idx) - - Description: Exports the content of the write-ahead log insertion status table in the Xlog. - - Return type: record - - **Table 14** gs_stat_wal_entrytable parameters - - | Category | Parameter Name | Type | Description | - | :--------------- | :------------- | :----- | :----------------------------------------------------------- | - | Input parameter | idx | int64 | - **-1**: queries all elements in an array.
- **0-***Maximum value*: content of a specific array element. | - | Output parameter | idx | uint64 | Records the subscripts in the corresponding array. | - | Output parameter | endlsn | uint64 | Records the LSN label. | - | Output parameter | lrc | int32 | Records the corresponding LRC. | - | Output parameter | status | uint32 | Determines whether the Xlog corresponding to the current entry has been completely copied to the WAL buffer:
- **0**: Not copied.
- **1**: Copied | - -- gs_walwriter_flush_position() - - Description: Outputs the refresh position of write-ahead logs. - - Return type: record - - **Table 15** gs_walwriter_flush_position parameters - - | Category | Parameter Name | Type | Description | - | :--------------- | :---------------------- | :----- | :----------------------------------------------------------- | - | Output parameter | last_flush_status_entry | int32 | Subscript index obtained after the Xlog flushes the tblEntry of the last flushed disk. | - | Output parameter | last_scanned_lrc | int32 | LRC obtained after the Xlog flushes the last tblEntry scanned last time. | - | Output parameter | curr_lrc | int32 | Latest LRC usage in the WALInsertStatusEntry status table. The LRC indicates the LRC value corresponding to the WALInsertStatusEntry when the next Xlog record is written. | - | Output parameter | curr_byte_pos | uint64 | The latest Xlog position after the Xlog is written to the WAL file, which is also the next Xlog insertion point. | - | Output parameter | prev_byte_size | uint32 | Length of the previous Xlog record. | - | Output parameter | flush_result | uint64 | Position of the current global Xlog flush. | - | Output parameter | send_result | uint64 | Xlog sending position on the current host. | - | Output parameter | shm_rqst_write_pos | uint64 | The write position of the LogwrtRqst request in the XLogCtl recorded in the shared memory. | - | Output parameter | shm_rqst_flush_pos | uint64 | The flush position of the LogwrtRqst request in the XLogCtl recorded in the shared memory. | - | Output parameter | shm_result_write_pos | uint64 | The write position of the LogwrtResult request in the XLogCtl recorded in the shared memory. | - | Output parameter | shm_result_flush_pos | uint64 | The flush position of the LogwrtResult request in the XLogCtl recorded in the shared memory. | - | Output parameter | curr_time | text | Current time. | - -- gs_walwriter_flush_stat(int operation) - - Description: Collects statistics on the frequency of writing and synchronizing write-ahead logs, data volume, and Xlog file information. - - Return type: record - - **Table 16** gs_walwriter_flush_stat parameters - - | Category | Parameter Name | Type | Description | - | :--------------- | :--------------------------- | :----- | :----------------------------------------------------------- | - | Input parameter | operation | int | - **-1**: Disable the statistics function. (Default value)
- **0**: Enable the statistics function.
- **1**: Query statistics.
- **2**: Reset statistics. | - | Output parameter | write_times | uint64 | Number of times that the Xlog calls the **write** API. | - | Output parameter | sync_times | uint64 | Number of times that the Xlog calls the **sync** API. | - | Output parameter | total_xlog_sync_bytes | uint64 | Total number of backend thread requests for writing data to Xlogs. | - | Output parameter | total_actual_xlog_sync_bytes | uint64 | Total number of Xlogs that call the **sync** API for disk flushing. | - | Output parameter | avg_write_bytes | uint32 | Number of Xlogs written each time the **XLogWrite** API is called. | - | Output parameter | avg_actual_write_bytes | uint32 | Number of Xlogs written each time the **write** API is called. | - | Output parameter | avg_sync_bytes | uint32 | Average number of Xlogs for each synchronization request. | - | Output parameter | avg_actual_sync_bytes | uint32 | Actual Xlog amount of disk flushing by calling **sync** each time. | - | Output parameter | total_write_time | uint64 | Total time of calling the write operation (unit: μs). | - | Output parameter | total_sync_time | uint64 | Total time of calling the sync operation (unit: μs). | - | Output parameter | avg_write_time | uint32 | Average time for calling the **write** API each time (unit: μs). | - | Output parameter | avg_sync_time | uint32 | Average time for calling the **sync** API each time (unit: μs). | - | Output parameter | curr_init_xlog_segno | uint64 | ID of the latest Xlog segment file. | - | Output parameter | curr_open_xlog_segno | uint64 | ID of the Xlog segment file that is being written. | - | Output parameter | last_reset_time | text | Time when statistics were last collected. | - | Output parameter | curr_time | text | Current time. | - -- gs_comm_proxy_thread_status() - - Description: Collects statistics on data packets sent and received by the proxy communication library **comm_proxy** when a user-mode network is configured for the database instance. - - Parameter: nan - - Return type: record - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The query result of this function is displayed only when the user-mode network is deployed in a centralized environment and **enable_dfx in comm_proxy_attr** is set to **true**. In other scenarios, an error message is displayed, indicating that queries are not supported. +--- +title: Other Functions +summary: Other Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Other Functions + +- plan_seed() + + Description: Obtains the seed value of the previous query statement (internal use). + + Return type: int + +- pg_stat_get_env() + + Description: Obtains the environment variable information of the current node. Only users with the **sysadmin** or **monitor admin** permission can access the environment variable information. + + Return type: record + + Example: + + ```sql + MogDB=# select pg_stat_get_env(); + pg_stat_get_env + --------------------------------------------------------------------------------------------------------------------------------------- + (coordinator1,localhost,144773,49100,/data1/GaussDB_Kernel_TRUNK/install,/data1/GaussDB_Kernel_TRUNK/install/data/coordinator1,pg_log) + (1 row) + ``` + +- pg_catalog.plancache_clean() + + Description: Clears the global plan cache that is not used on nodes. + + Return type: Boolean + +- pg_catalog.plancache_status() + + Description: Displays information about the global plan cache on nodes. The information returned by the function is the same as that in GLOBAL_PLANCACHE_STATUS. + + Return type: record + +- textlen(text) + + Description: Provides the method of querying the logical length of text. + + Return type: int + +- threadpool_status() + + Description: Displays the status of worker threads and sessions in the thread pool. + + Return type: record + +- get_local_active_session() + + Description: Provides sampling records of the historical active sessions stored in the memory of the current node. + + Return type: record + +- pg_stat_get_thread() + + Description: Provides status information about all threads on the current node. Users with the **sysadmin** or **monitor admin** permission can view information about all threads, and common users can view only their own thread information. + + Return type: record + +- pg_stat_get_sql_count() + + Description: Provides the counts of the **SELECT**, **UPDATE**, **INSERT**, **DELETE**, and **MERGE INTO** statements executed on the current node. Users with the **sysadmin** or **monitor admin** permission can view information about all users, and common users can view only their own statistics. + + Return type: record + +- pg_stat_get_data_senders() + + Description: Provides detailed information about the data-copy sending thread active at the moment. + + Return type: record + +- get_wait_event_info() + + Description: Provides detailed information about the wait event. + + Return type: record + +- generate_wdr_report(begin_snap_id bigint, end_snap_id bigint, report_type cstring, report_scope cstring, node_name cstring) + + Description: Generates system diagnosis reports based on two snapshots. You need to run the command in the **postgres** database. By default, the initial user or the user with the **monadmin** permission can access the database. The result can be queried only in the system database but cannot be queried in the user database. + + Return type: record + + **Table 1** generate_wdr_report parameter description + + | Parameter | Description | Range | + | ------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | + | begin_snap_id | Snapshot ID that starts the diagnosis report period. | N/A | + | end_snap_id | Snapshot ID that ends the diagnosis report period. By default, the value of **end_snap_id** is greater than that of **begin_snap_id**. | N/A | + | report_type | Specifies the type of the generated report. | - **summary**
- **detail**
- **all**: Both **summary** and **detail** types are included. | + | report_scope | Specifies the scope for a report to be generated. | - **cluster**: database-level information
- **node**: node-level information | + | node_name | When **report\_scope** is set to **node**, set this parameter to the name of the corresponding node. (You can run the **select \* from pg_node_env;** command to query the node name.)If **report\_scope** is set to **cluster**, this parameter can be omitted, left blank, or set to **NULL**. | - **cluster**: This value is omitted, left blank or set to **NULL**.
- **node**: indicates the node name in MogDB. | + +- create_wdr_snapshot() + + Description: Manually generates system diagnosis snapshots. This function requires the **sysadmin** permission. + + Return type: text + +- kill_snapshot() + + Description: Kills the WDR snapshot backend thread. Users who invoke this function must have the **SYSADMIN** permission, the **REPLICATION** permission, or inherit the **gs_role_replication** permission of the built-in role. + + Return type: void + +- capture_view_to_json(text,integer) + + Description: Saves the view result to the directory specified by GUC: **perf_directory**. If **is_crossdb** is set to **1**, the view is accessed once for all databases. If the value of **is_crossdb** is **0**, the current database is accessed only once. Only users with the **sysadmin** or **monitor admin** permission can execute this function. + + Return type: int + +- reset_unique_sql + + Description: Clears the unique SQL statements in the memory of the database node. (The **sysadmin** permission is required.) + + Return type: Boolean + + **Table 2** reset_unique_sql parameter description + + | Parameter | Type | Description | + | :---------- | :--- | :----------------------------------------------------------- | + | scope | text | Clearance scope type. The options are as follows:
- **GLOBAL**: Clears all nodes. If the value is **GLOBAL**, this function can be executed only on the primary node.
- **LOCAL**: Clears the current node. | + | clean_type | text | - BY_USERID: Unique SQL statements are cleared based on user IDs.
- BY_CNID: Unique SQL statements are cleared based on primary node IDs.
- **ALL**: All data is cleared. | + | clean_value | int8 | Clearance value corresponding to the clearance type. | + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** + > + > - The values **GLOBAL** and **LOCAL** of **scope** apply to distributed databases. For MogDB, the values have the same meaning, indicating that the local node is cleared. + > - The value **BY_CNID** of **clean_type** applies only to distributed databases and is invalid for MogDB. + +- wdr_xdb_query(db_name_str text, query text) + + Description: Provides the capability of executing local cross-database queries. For example, when connecting to the Postgres database, access tables in the **test** database. + + ```sql + select col1 from wdr_xdb_query('dbname=test','select col1 from t1') as dd(col1 int); + ``` + + Return type: record + +- pg_wlm_jump_queue(pid int) + + Description: Moves a task to the top of the queue of the primary node of the database. + + Return type: Boolean + + - **true**: success + - **false**: failure + +- gs_wlm_switch_cgroup(pid int, cgroup text) + + Description: Moves a job to another Cgroup to change the job priority. + + Return type: Boolean + + - **true**: success + - **false**: failure + +- pv_session_memctx_detail(threadid tid, MemoryContextName text) + + Description: Records information about the memory context **MemoryContextName** of the thread **tid** into the *threadid***_timestamp.log** file in the *$GAUSSLOG***/pg_log/***${node_name}***/dumpmem** directory. *threadid* can be obtained from *sessid* in the **GS_SESSION_MEMORY_DETAIL** view. In the officially released version, only the **MemoryContextName** that is an empty string (two single quotation marks indicate that the input is an empty string) is accepted. In this case, all memory context information is recorded. Otherwise, no operation is performed. For the DEBUG version for internal development and test personnel to debug, you can specify the **MemoryContextName** to be counted. In this case, all the memory usage of the context is recorded in the specified file. Only the administrator can execute this function. + + Return type: Boolean + + - **true**: success + - **false**: failure + +- pg_shared_memctx_detail(MemoryContextName text) + + Description: Records information about the memory context **MemoryContextName** into the *threadid***_timestamp.log** file in the *$GAUSSLOG***/pg_log/***${node_name}***/dumpmem** directory. This function is provided only for internal development and test personnel to debug in the DEBUG version. Calling this function in the officially released version does not involve any operation. Only the administrator can execute this function. + + Return type: Boolean + + - **true**: success + - **false**: failure + +- local_bgwriter_stat() + + Description: Displays the information about pages flushed by the bgwriter thread of this instance, number of pages in the candidate buffer chain, and buffer elimination information. + + Return type: record + +- local_candidate_stat() + + Description: Displays the number of pages in the candidate buffer chain of this instance and buffer elimination information, including the normal buffer pool and segment buffer pool. + + Return type: record + +- local_ckpt_stat() + + Description: Displays the information about checkpoints and flushing pages of the current instance. + + Return type: record + +- local_double_write_stat() + + Description: Displays the doublewrite file status of the current instance. + + Return type: record + + **Table 3** local_double_write_stat parameters + + | Parameter | Type | Description | + | :-------------------- | :--- | :----------------------------------------------------------- | + | node_name | text | Instance name | + | curr_dwn | int8 | Sequence number of the doublewrite file | + | curr_start_page | int8 | Start page for restoring the doublewrite file | + | file_trunc_num | int8 | Number of times that the doublewrite file is reused | + | file_reset_num | int8 | Number of reset times after the doublewrite file is full | + | total_writes | int8 | Total number of I/Os of the doublewrite file | + | low_threshold_writes | int8 | Number of I/Os for writing doublewrite files with low efficiency (the number of I/O flushing pages at a time is less than 16) | + | high_threshold_writes | int8 | Number of I/Os for writing doublewrite files with high efficiency (the number of I/O flushing pages at a time is more than 421) | + | total_pages | int8 | Total number of pages that are flushed to the doublewrite file area | + | low_threshold_pages | int8 | Number of pages that are flushed with low efficiency | + | high_threshold_pages | int8 | Number of pages that are flushed with high efficiency | + | file_id | int8 | ID of the current doublewrite file | + +- local_single_flush_dw_stat() + + Description: Displays the elimination of dual-write files on a single page in the instance. + + Return type: record + +- local_pagewriter_stat() + + Description: Displays the page flushing information and checkpoint information of the current instance. + + Return type: record + +- local_redo_stat() + + Description: Displays the replay status of the current standby instance. + + Return type: record + + Note: The returned replay status includes the current replay position and the replay position of the minimum restoration point. + +- local_recovery_status() + + Description: Displays log flow control information about the primary and standby nodes. + + Return type: record + +- gs_wlm_node_recover(boolean isForce) + + Description: Obtains top SQL query statement-level statistics recorded in the current memory. If the input parameter is not **0**, the information is cleared from the memory. + + Return type: record + +- gs_wlm_node_clean(cstring nodename) + + Description: Clears data after the dynamic load management node is faulty. Only administrators can execute this function. This function is called by the database instance management module. You are not advised to directly call this function. This view is not supported in a centralized or standalone system. + + Return type: Boolean + +- gs_cgroup_map_ng_conf(group name) + + Description: Reads the Cgroup configuration file of a specified logical database. + + Return type: record + +- gs_wlm_switch_cgroup(sess_id int8, cgroup name) + + Description: Switches the Cgroup of a specified session. + + Return type: record + +- comm_client_info() + + Description: Queries information about active client connections of a single node. + + Return type: SETOF record + +- pg_sync_cstore_delta(text) + + Description: Synchronizes the delta table structure of a specified column-store table with that of the column-store primary table. + + Return type: bigint + +- pg_sync_cstore_delta() + + Description: Synchronizes the delta table structure of all column-store tables with that of the column-store primary table. + + Return type: bigint + +- pg_get_flush_lsn() + + Description: Returns the position of the Xlog flushed from the current node. + + Return type: text + +- pg_get_sync_flush_lsn() + + Description: Returns the position of the Xlog flushed by the majority on the current node. + + Return type: text + +- gs_create_log_tables() + + Description: Creates foreign tables and views for run logs and performance logs. + + Return type: void + + Example: + + ``` + MogDB=# select gs_create_log_tables(); + gs_create_log_tables + ---------------------- + + (1 row) + ``` + +- dbe_perf.get_global_full_sql_by_timestamp(start_timestamp timestamp with time zone, end_timestamp timestamp with time zone) + + Description: Obtains full SQL information at the database level. The result can be queried only in the system database but cannot be queried in the user database. + + Return type: record + + **Table 4** dbe_perf.get_global_full_sql_by_timestamp parameter description + + | Parameter | Type | Description | + | :-------------- | :----------------------- | :--------------------------------------- | + | start_timestamp | timestamp with time zone | Start point of the SQL start time range. | + | end_timestamp | timestamp with time zone | End point of the SQL start time range. | + +- dbe_perf.get_global_slow_sql_by_timestamp(start_timestamp timestamp with time zone, end_timestamp timestamp with time zone) + + Description: Obtains slow SQL information at the database level. The result can be queried only in the system database but cannot be queried in the user database. + + Return type: record + + **Table 5** dbe_perf.get_global_slow_sql_by_timestamp parameter description + + | Parameter | Type | Description | + | :-------------- | :----------------------- | :--------------------------------------- | + | start_timestamp | timestamp with time zone | Start point of the SQL start time range. | + | end_timestamp | timestamp with time zone | End point of the SQL start time range. | + +- statement_detail_decode(detail text, format text, pretty boolean) + + Description: Parses the **details** column in a full or slow SQL statement. The result can be queried only in the system database but cannot be queried in the user database. + + Return type: text + + **Table 6** statement_detail_decode parameter description + + | Parameter | Type | Description | + | :--------- | :------ | :----------------------------------------------------------- | + | **detail** | text | Set of events generated by the SQL statement (unreadable). | + | format | text | Parsing output format. The value is **plaintext**. | + | pretty | boolean | Whether to display the text in pretty format when **format** is set to **plaintext**. The options are as follows:The value **true** indicates that events are separated by **\n**.The value **false** indicates that events are separated by commas (,). | + +- get_prepared_pending_xid + + Description: Returns nextxid when restoration is complete. + + Parameter: nan + + Return type: text + +- pg_clean_region_info + + Description: Clears the regionmap. + + Parameter: nan + + Return type: character varying + +- pg_get_delta_info + + Description: Obtains delta information from a single DN. + + Parameter: rel text, schema_name text + + Return type: part_name text, live_tuple bigint, data_size bigint, and blocknum bigint + +- pg_get_replication_slot_name + + Description: Obtains the slot name. + + Parameter: nan + + Return type: text + +- pg_get_running_xacts + + Description: Obtains running xact. + + Parameter: **nan** + + Return type: handle integer, gxid xid, state tinyint, node text, xmin xid, vacuum boolean, timeline bigint, prepare_xid xid, pid bigint, and next_xid xid + +- pg_get_variable_info + + Description: Obtains the shared memory variable *cache*. + + Parameter: nan + + Return type: node_name text, nextOid oid, nextXid xid, oldestXid xid, xidVacLimit xid, oldestXidDB oid, lastExtendCSNLogpage xid, startExtendCSNLogpage xid, nextCommitSeqNo xid, latestCompletedXid xid, and startupMaxXid xid + +- pg_get_xidlimit + + Description: Obtains transaction ID information from the shared memory. + + Parameter: nan + + Return type: nextXid xid, oldestXid xid, xidVacLimit xid, xidWarnLimit xid, xidStopLimit xid, xidWrapLimit xid, and oldestXidDB oid + +- get_global_user_transaction() + + Description: Returns transaction information about each user on all nodes. + + Return type: node_name name, usename name, commit_counter bigint, rollback_counter bigint, resp_min bigint, resp_max bigint, resp_avg bigint, resp_total bigint, bg_commit_counter bigint, bg_rollback_counter bigint, bg_resp_min bigint, bg_resp_max bigint, bg_resp_avg bigint, and bg_resp_total bigint + +- pg_collation_for + + Description: Returns the sorting rule corresponding to the input parameter string. + + Parameter: any (Explicit type conversion is required for constants.) + + Return type: text + +- pgxc_unlock_for_sp_database(name Name) + + Description: Releases a specified database lock. + + Parameter: database name + + Return type: Boolean + +- pgxc_lock_for_sp_database(name Name) + + Description: Locks a specified database. + + Parameter: database name + + Return type: Boolean + +- copy_error_log_create() + + Description: Creates the error table (**public.pgxc_copy_error_log**) required for creating the **COPY FROM** error tolerance mechanism. + + Return type: Boolean + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > + > - This function attempts to create the **public.pgxc_copy_error_log** table. For details about the table, see Table 7. + > - Create the B-tree index on the **relname** column and execute **REVOKE ALL on public.pgxc_copy_error_log FROM public** to manage permissions for the error table (the permissions are the same as those of the **COPY** statement). + > - **public.pgxc_copy_error_log** is a row-store table. Therefore, this function can be executed and **COPY** error tolerance is available only when row-store tables can be created in the database instance. Note that after the GUC parameter **enable_hadoop_env** is enabled, row-store tables cannot be created in the database instance (the default value is **off** for MogDB). + > - Same as the error table and the **COPY** statement, the function requires **sysadmin** or higher permissions. + > - If the **public.pgxc_copy_error_log** table or the **copy_error_log_relname_idx** index exists before the function creates it, the function will report an error and roll back. + + **Table 7** Error table public.pgxc_copy_error_log + + | Column | Type | Description | + | :--------- | :----------------------- | :----------------------------------------------------------- | + | relname | character varying | Table name in the form of *Schema name***.***Table name* | + | begintime | timestamp with time zone | Time when a data format error was reported | + | filename | character varying | Name of the source data file where a data format error occurs | + | lineno | bigint | Number of the row where a data format error occurs in a source data file | + | rawrecord | text | Raw record of a data format error in the source data file | + | **detail** | text | Error details | + +- dynamic_func_control(scope text, function_name text, action text, “{params}” text[]) + + Description: Dynamically enables built-in functions. Currently, only full SQL statements can be dynamically enabled. + + Return type: record + + **Table 8** Parameter description of dynamic_func_control + + | Parameter | Type | Description | + | :------------ | :----- | :----------------------------------------------------------- | + | scope | text | Scope where the function is to be dynamically enabled. Currently, only **LOCAL** is supported. | + | function_name | text | Function name. Currently, only **STMT** is supported. | + | action | text | When **function_name** is set to **STMT**, the value of **action** can only be **TRACK**, **UNTRACK**, **LIST**, or **CLEAN**.
- **TRACK**: records the full SQL information of normalized SQL statements.
- **UNTRACK**: cancels the recording of full SQL information of normalized SQL statements.
- **LIST**: lists normalized SQL information that is recorded in the current track.
- **CLEAN**: cleans normalized SQL information that is recorded in the current track. | + | params | text[] | When **function_name** is set to **STMT**, the parameters corresponding to different actions are set as follows:
- **TRACK**: **'{“Normalized SQLID”, “L0/L1/L2”}'**
- **UNTRACK**: **'{“Normalized SQLID”}'**
- LIST - '{}'
- CLEAN - '{}' | + +- gs_parse_page_bypath(path text, blocknum bigint, relation_type text, read_memory boolean) + + Description: Parses a specified table page and returns the path for storing the parsed content. + + Return type: text + + Note: Only the system administrator or O&M administrator can execute this function. + + **Table 9** gs_parse_page_bypath parameters + + | Parameter | Type | Description | + | :------------ | :------ | :----------------------------------------------------------- | + | path | text | - For an ordinary table or segment-page table, the relative path is *Tablespace name***/***Database OID***/***Relfilenode of the table (physical file name)*. For example, **base/16603/16394**.
- You can run the **pg_relation_filepath(table_name text)** command to query the relative path of the table file.
- Valid path formats are as follows:
- global/relNode
- base/dbNode/relNode
- pg_tblspc/spcNode/version_dir/dbNode/relNode | + | blocknum | bigint | - **-1**: Information about all blocks
- **0-***MaxBlockNumber*: Information about the corresponding block | + | relation_type | text | - **heap**: Astore table
- **uheap**: Ustore table
- btree_index: B-tree index
- ubtree_index: UBTree index
- **segment**: Segment-page | + | read_memory | boolean | - **false**: The system parses the page from the disk file.
- **true**: The system attempts to parse the page from the shared buffer. If the page does not exist in the shared buffer, the system parses the page from the disk file. | + +- gs_xlogdump_lsn(start_lsn text, end_lsn text) + + Description: Parses Xlogs within the specified LSN range and returns the path for storing the parsed content. You can use **pg_current_xlog_location()** to obtain the current Xlog position. + + Return type: text + + Parameters: LSN start position and LSN end position + + Note: Only the system administrator or O&M administrator can execute this function. + +- gs_xlogdump_xid(c_xid xid) + + Description: Parses Xlogs of a specified XID and returns the path for storing the parsed content. You can use **txid_current()** to obtain the current XID. + + Parameter: XID + + Return type: text + + Note: Only the system administrator or O&M administrator can execute this function. + +- gs_xlogdump_tablepath(path text, blocknum bigint, relation_type text) + + Description: Parses logs corresponding to a specified table page and returns the path for storing the parsed content. + + Return type: text + + Note: Only the system administrator or O&M administrator can execute this function. + + **Table 10** gs_xlogdump_tablepath parameters + + | Parameter | Type | Description | + | :------------ | :----- | :----------------------------------------------------------- | + | path | text | - For an ordinary table or segment-page table, the relative path is *Tablespace name***/***Database OID***/***Relfilenode of the table (physical file name)*. For example, **base/16603/16394**.
- You can run the **pg_relation_filepath(table_name text)** command to query the relative path of the table file.
- Valid path formats are as follows:
- global/relNode
- base/dbNode/relNode
- pg_tblspc/spcNode/version_dir/dbNode/relNode | + | blocknum | bigint | - **-1**: Information about all blocks
- **0-***MaxBlockNumber*: Information about the corresponding block | + | relation_type | text | - **heap**: Astore table
- **uheap**: Ustore table
- btree_index: B-tree index
- **ubtree_index**: UBTree index
- **segment**: Segment-page | + +- gs_xlogdump_parsepage_tablepath(path text, blocknum bigint, relation_type text, read_memory boolean) + + Description: Parses the specified table page and logs corresponding to the table page and returns the path for storing the parsed content. It can be regarded as one execution of **gs_parse_page_bypath** and **gs_xlogdump_tablepath**. The prerequisite for executing this function is that the table file exists. To view logs of deleted tables, call **gs_xlogdump_tablepath**. + + Return type: text + + Note: Only the system administrator or O&M administrator can execute this function. + + **Table 11** gs_xlogdump_parsepage_tablepath parameters + + | Parameter | Type | Description | + | :------------ | :------ | :----------------------------------------------------------- | + | path | text | - For an ordinary table or segment-page table, the relative path is *Tablespace name***/***Database OID***/***Relfilenode of the table (physical file name)*. For example, **base/16603/16394**.
- You can run the **pg_relation_filepath(table_name text)** command to query the relative path of the table file.
- Valid path formats are as follows:
- global/relNode
- base/dbNode/relNode
- pg_tblspc/spcNode/version_dir/dbNode/relNode | + | blocknum | bigint | - **-1**: Information about all blocks
- **0-***MaxBlockNumber*: Information about the corresponding block | + | relation_type | text | - **heap**: Astore table
- **uheap**: Ustore table
- btree_index: B-tree index
- ubtree_index: UBTree index
- **segment**: Segment-page | + | read_memory | boolean | - **false**: The system parses the page from the disk file.
- **true**: The system attempts to parse the page from the shared buffer. If the page does not exist in the shared buffer, the system parses the page from the disk file. | + +- gs_index_verify(Oid oid, uint32:wq blkno) + + Description: Checks whether the sequence of keys on the UBtree index page or index tree is correct. + + Return type: record + + **Table 12** gs_index_verify parameters + + | Parameter | Type | Description | + | :-------- | :----- | :----------------------------------------------------------- | + | oid | Oid | - Index file relfilenode, which can be queried using **select relfilenode from pg_class where relname='***Index file name***'**. | + | blkno | uint32 | - **0**: indicates that all pages in the index tree are checked.
- If the value is greater than 0, the index page whose page code is equal to the value of **blkno** is checked. | + +- gs_index_recycle_queue(Oid oid, int type, uint32 blkno) + + Description: Parses the UBtree index recycling queue information. + + Return type: record + + **Table 13** gs_index_recycle_queue parameters + + | Parameter | Type | Description | + | :-------- | :----- | :----------------------------------------------------------- | + | oid | Oid | - Index file relfilenode, which can be queried using **select relfilenode from pg_class where relname='***Index file name***'**. | + | type | int | - **0**: indicates that the entire queue to be recycled is parsed.
- **1**: indicates that the entire empty page queue is parsed.
- **2**: indicates that a single page is parsed. | + | blkno | uint32 | - ID of the recycling queue page. This parameter is valid only when **type** is set to **2**. The value of **blkno** ranges from 1 to 4294967294. | + +- gs_stat_wal_entrytable(int64 idx) + + Description: Exports the content of the write-ahead log insertion status table in the Xlog. + + Return type: record + + **Table 14** gs_stat_wal_entrytable parameters + + | Category | Parameter Name | Type | Description | + | :--------------- | :------------- | :----- | :----------------------------------------------------------- | + | Input parameter | idx | int64 | - **-1**: queries all elements in an array.
- **0-***Maximum value*: content of a specific array element. | + | Output parameter | idx | uint64 | Records the subscripts in the corresponding array. | + | Output parameter | endlsn | uint64 | Records the LSN label. | + | Output parameter | lrc | int32 | Records the corresponding LRC. | + | Output parameter | status | uint32 | Determines whether the Xlog corresponding to the current entry has been completely copied to the WAL buffer:
- **0**: Not copied.
- **1**: Copied | + +- gs_walwriter_flush_position() + + Description: Outputs the refresh position of write-ahead logs. + + Return type: record + + **Table 15** gs_walwriter_flush_position parameters + + | Category | Parameter Name | Type | Description | + | :--------------- | :---------------------- | :----- | :----------------------------------------------------------- | + | Output parameter | last_flush_status_entry | int32 | Subscript index obtained after the Xlog flushes the tblEntry of the last flushed disk. | + | Output parameter | last_scanned_lrc | int32 | LRC obtained after the Xlog flushes the last tblEntry scanned last time. | + | Output parameter | curr_lrc | int32 | Latest LRC usage in the WALInsertStatusEntry status table. The LRC indicates the LRC value corresponding to the WALInsertStatusEntry when the next Xlog record is written. | + | Output parameter | curr_byte_pos | uint64 | The latest Xlog position after the Xlog is written to the WAL file, which is also the next Xlog insertion point. | + | Output parameter | prev_byte_size | uint32 | Length of the previous Xlog record. | + | Output parameter | flush_result | uint64 | Position of the current global Xlog flush. | + | Output parameter | send_result | uint64 | Xlog sending position on the current host. | + | Output parameter | shm_rqst_write_pos | uint64 | The write position of the LogwrtRqst request in the XLogCtl recorded in the shared memory. | + | Output parameter | shm_rqst_flush_pos | uint64 | The flush position of the LogwrtRqst request in the XLogCtl recorded in the shared memory. | + | Output parameter | shm_result_write_pos | uint64 | The write position of the LogwrtResult request in the XLogCtl recorded in the shared memory. | + | Output parameter | shm_result_flush_pos | uint64 | The flush position of the LogwrtResult request in the XLogCtl recorded in the shared memory. | + | Output parameter | curr_time | text | Current time. | + +- gs_walwriter_flush_stat(int operation) + + Description: Collects statistics on the frequency of writing and synchronizing write-ahead logs, data volume, and Xlog file information. + + Return type: record + + **Table 16** gs_walwriter_flush_stat parameters + + | Category | Parameter Name | Type | Description | + | :--------------- | :--------------------------- | :----- | :----------------------------------------------------------- | + | Input parameter | operation | int | - **-1**: Disable the statistics function. (Default value)
- **0**: Enable the statistics function.
- **1**: Query statistics.
- **2**: Reset statistics. | + | Output parameter | write_times | uint64 | Number of times that the Xlog calls the **write** API. | + | Output parameter | sync_times | uint64 | Number of times that the Xlog calls the **sync** API. | + | Output parameter | total_xlog_sync_bytes | uint64 | Total number of backend thread requests for writing data to Xlogs. | + | Output parameter | total_actual_xlog_sync_bytes | uint64 | Total number of Xlogs that call the **sync** API for disk flushing. | + | Output parameter | avg_write_bytes | uint32 | Number of Xlogs written each time the **XLogWrite** API is called. | + | Output parameter | avg_actual_write_bytes | uint32 | Number of Xlogs written each time the **write** API is called. | + | Output parameter | avg_sync_bytes | uint32 | Average number of Xlogs for each synchronization request. | + | Output parameter | avg_actual_sync_bytes | uint32 | Actual Xlog amount of disk flushing by calling **sync** each time. | + | Output parameter | total_write_time | uint64 | Total time of calling the write operation (unit: μs). | + | Output parameter | total_sync_time | uint64 | Total time of calling the sync operation (unit: μs). | + | Output parameter | avg_write_time | uint32 | Average time for calling the **write** API each time (unit: μs). | + | Output parameter | avg_sync_time | uint32 | Average time for calling the **sync** API each time (unit: μs). | + | Output parameter | curr_init_xlog_segno | uint64 | ID of the latest Xlog segment file. | + | Output parameter | curr_open_xlog_segno | uint64 | ID of the Xlog segment file that is being written. | + | Output parameter | last_reset_time | text | Time when statistics were last collected. | + | Output parameter | curr_time | text | Current time. | + +- gs_comm_proxy_thread_status() + + Description: Collects statistics on data packets sent and received by the proxy communication library **comm_proxy** when a user-mode network is configured for the database instance. + + Parameter: nan + + Return type: record + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The query result of this function is displayed only when the user-mode network is deployed in a centralized environment and **enable_dfx in comm_proxy_attr** is set to **true**. In other scenarios, an error message is displayed, indicating that queries are not supported. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/row-store-compression-system-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/row-store-compression-system-functions.md index 8d76ded9..3f7023c5 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/row-store-compression-system-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/row-store-compression-system-functions.md @@ -1,95 +1,95 @@ ---- -title: Row-store Compression System Functions -summary: Row-store Compression System Functions -author: Guo Huan -date: 2022-10-31 ---- - -# Row-store Compression System Functions - -- compress_buffer_stat_info() - - Description: Queries the PCA buffer statistics. - - Return type: record - - **Table 1** compress_buffer_stat_info parameters - - | Parameter Type | Parameter Name | Type | Description | - | :--------------- | :------------- | :----- | :------------------------------------------------- | - | Output parameter | ctrl_cnt | bigint | pca_page_ctrl_t structure. | - | Output parameter | main_cnt | bigint | Total number of main LRU chains in each partition. | - | Output parameter | free_cnt | bigint | Total number of free LRU chains in each partition. | - | Output parameter | recycle_times | bigint | Number of times that the buffer eliminates LRUs. | - -- compress_ratio_info(file_path text) - - Description: Views the file compression rate. - - Return type: record - - **Table 2** compress_ratio_info parameters - - | Parameter Type | Parameter Name | Type | Description | - | :--------------- | :------------- | :------ | :------------------------------------------------ | - | Input parameter | file_path | text | Relative file path. | - | Output parameter | path | text | Relative path of the file. | - | Output parameter | is_compress | boolean | Determines whether the file is a compressed file. | - | Output parameter | file_count | bigint | Number of contained segment files. | - | Output parameter | logic_size | bigint | Logical size, in bytes. | - | Output parameter | physic_size | bigint | Actual physical size, in bytes. | - | Output parameter | compress_ratio | text | File compression rate. | - -- compress_statistic_info(file_path text, step smallint) - - Description: Collects statistics on the dispersion of compressed files. - - Return type: record - - **Table 3** compress_statistic_info parameters - - | Parameter Type | Parameter Name | Type | Description | - | :--------------- | :--------------- | :------- | :------------------------------------------------ | - | Input parameter | file_path | text | Relative path of the file. | - | Input parameter | step | smallint | Sampling statistics step. | - | Output parameter | path | text | Relative path of the file. | - | Output parameter | extent_count | bigint | Number of extents. | - | Output parameter | dispersion_count | bigint | Number of pages containing discrete chunks. | - | Output parameter | void_count | bigint | Number of pages containing unacknowledged chunks. | - -- compress_address_header(oid regclass, seg_id bigint) - - Description: Views the management information on the file compression page. - - Return type: record - - **Table 4** compress_address_header parameters - - | Parameter Type | Parameter Name | Type | Description | - | :--------------- | :-------------- | :------- | :--------------------------------------------- | - | Input parameter | oid | regclass | reloid of the table to which the file belongs. | - | Input parameter | seg_id | bigint | Sequence number of a segment file. | - | Output parameter | extent | bigint | ID of the extent. | - | Output parameter | nblocks | bigint | Number of pages in the extent. | - | Output parameter | alocated_chunks | integer | Number of chunks allocated in the extent. | - | Output parameter | chunk_size | integer | Chunk size, in bytes. | - | Output parameter | algorithm | bigint | Compression algorithm. | - -- compress_address_details(oid regclass, seg_id bigint) - - Description: Detailed information about the usage of page chunks. - - Return type: record - - **Table 5** compress_address_details parameters - - | Parameter Type | Parameter Name | Type | Description | - | :--------------- | :------------------ | :------- | :----------------------------------------------------------- | - | Input parameter | oid | regclass | reloid of the table to which the file belongs. | - | Input parameter | seg_id | bigint | Sequence number of a segment file. | - | Output parameter | extent | bigint | ID of the extent. | - | Output parameter | extent_block_number | bigint | Page number in the extent. The value ranges from 0 to 127. | - | Output parameter | block_number | bigint | Overall page number. | - | Output parameter | alocated_chunks | integer | Number of chunks used by the page. | - | Output parameter | nchunks | integer | Number of chunks used by the page. The value cannot be greater than the value of **alocated_chunks**. | - | Output parameter | chunknos | integer | Number of the used chunks, starting from 1. | +--- +title: Row-store Compression System Functions +summary: Row-store Compression System Functions +author: Guo Huan +date: 2022-10-31 +--- + +# Row-store Compression System Functions + +- compress_buffer_stat_info() + + Description: Queries the PCA buffer statistics. + + Return type: record + + **Table 1** compress_buffer_stat_info parameters + + | Parameter Type | Parameter Name | Type | Description | + | :--------------- | :------------- | :----- | :------------------------------------------------- | + | Output parameter | ctrl_cnt | bigint | pca_page_ctrl_t structure. | + | Output parameter | main_cnt | bigint | Total number of main LRU chains in each partition. | + | Output parameter | free_cnt | bigint | Total number of free LRU chains in each partition. | + | Output parameter | recycle_times | bigint | Number of times that the buffer eliminates LRUs. | + +- compress_ratio_info(file_path text) + + Description: Views the file compression rate. + + Return type: record + + **Table 2** compress_ratio_info parameters + + | Parameter Type | Parameter Name | Type | Description | + | :--------------- | :------------- | :------ | :------------------------------------------------ | + | Input parameter | file_path | text | Relative file path. | + | Output parameter | path | text | Relative path of the file. | + | Output parameter | is_compress | boolean | Determines whether the file is a compressed file. | + | Output parameter | file_count | bigint | Number of contained segment files. | + | Output parameter | logic_size | bigint | Logical size, in bytes. | + | Output parameter | physic_size | bigint | Actual physical size, in bytes. | + | Output parameter | compress_ratio | text | File compression rate. | + +- compress_statistic_info(file_path text, step smallint) + + Description: Collects statistics on the dispersion of compressed files. + + Return type: record + + **Table 3** compress_statistic_info parameters + + | Parameter Type | Parameter Name | Type | Description | + | :--------------- | :--------------- | :------- | :------------------------------------------------ | + | Input parameter | file_path | text | Relative path of the file. | + | Input parameter | step | smallint | Sampling statistics step. | + | Output parameter | path | text | Relative path of the file. | + | Output parameter | extent_count | bigint | Number of extents. | + | Output parameter | dispersion_count | bigint | Number of pages containing discrete chunks. | + | Output parameter | void_count | bigint | Number of pages containing unacknowledged chunks. | + +- compress_address_header(oid regclass, seg_id bigint) + + Description: Views the management information on the file compression page. + + Return type: record + + **Table 4** compress_address_header parameters + + | Parameter Type | Parameter Name | Type | Description | + | :--------------- | :-------------- | :------- | :--------------------------------------------- | + | Input parameter | oid | regclass | reloid of the table to which the file belongs. | + | Input parameter | seg_id | bigint | Sequence number of a segment file. | + | Output parameter | extent | bigint | ID of the extent. | + | Output parameter | nblocks | bigint | Number of pages in the extent. | + | Output parameter | alocated_chunks | integer | Number of chunks allocated in the extent. | + | Output parameter | chunk_size | integer | Chunk size, in bytes. | + | Output parameter | algorithm | bigint | Compression algorithm. | + +- compress_address_details(oid regclass, seg_id bigint) + + Description: Detailed information about the usage of page chunks. + + Return type: record + + **Table 5** compress_address_details parameters + + | Parameter Type | Parameter Name | Type | Description | + | :--------------- | :------------------ | :------- | :----------------------------------------------------------- | + | Input parameter | oid | regclass | reloid of the table to which the file belongs. | + | Input parameter | seg_id | bigint | Sequence number of a segment file. | + | Output parameter | extent | bigint | ID of the extent. | + | Output parameter | extent_block_number | bigint | Page number in the extent. The value ranges from 0 to 127. | + | Output parameter | block_number | bigint | Overall page number. | + | Output parameter | alocated_chunks | integer | Number of chunks used by the page. | + | Output parameter | nchunks | integer | Number of chunks used by the page. The value cannot be greater than the value of **alocated_chunks**. | + | Output parameter | chunknos | integer | Number of the used chunks, starting from 1. | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/segment-page-storage-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/segment-page-storage-functions.md index 5c88f4e7..997a708c 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/segment-page-storage-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/segment-page-storage-functions.md @@ -1,113 +1,113 @@ ---- -title: Segment-Page Storage Functions -summary: Segment-Page Storage Functions -author: Guo Huan -date: 2021-10-28 ---- - -# Segment-Page Storage Functions - -- local_segment_space_info(tablespacename TEXT, databasename TEXT) - - Description: Generates usage information about all extent groups in the tablespace. - - Return type: - - | node_name | Node name | - | ---------------- | ------------------------------------------------------------ | - | extent_size | Extent specifications of an extent group. The unit is the number of blocks. | - | forknum | Fork number | - | total_blocks | Total number of extents in a physical file | - | meta_data_blocks | Number of blocks occupied by the metadata managed in a tablespace, including the space header and map page but excluding the segment head | - | used_data_blocks | Number of extents used for storing data, including the segment head | - | utilization | Percentage of the number of used blocks to the total number of blocks, that is, (the value of **used_data_blocks** + the value of **meta_data_block**)/the value of **total_blocks** | - | high_water_mark | High-water mark, indicating the number of allocated extents and maximum physical page number. Blocks that exceed the high-water mark are not used and can be directly recycled. | - - Example: - - ```sql - select * from local_segment_space_info('pg_default', 'postgres'); - node_name | extent_size | forknum | total_blocks | meta_data_blocks | used_data_blocks | utilization | high_water_mark - -------------------+-------------+---------+--------------+------------------+------------------+-------------+----------------- - dn_6001_6002_6003 | 1 | 0 | 16384 | 4157 | 1 | .253784 | 4158 - dn_6001_6002_6003 | 8 | 0 | 16384 | 4157 | 8 | .254211 | 4165 - (2 rows) - ``` - -- pg_stat_segment_extent_usage(int4 tablespace oid, int4 database oid, int4 extent_type, int4 forknum) - - Description: Specifies the usage information of each allocated extent in an extent group returned each time. **extent_type** indicates the type of the extent group. The value is an integer ranging from 1 to 5. If the value is not within the range, an error is reported. **forknum** indicates the fork number. The value is an integer ranging from 0 to 4. Currently, only the following values are valid: **0** for data files, **1** for FSM files, and **2** for visibility map files. - - Return type: - - | Name | Description | - | :------------ | :----------------------------------------------------------- | - | start_block | Start physical page number of an extent | - | extent_size | Size of an extent | - | usage_type | Usage type of an extent, for example, **segment head** and **data extent** | - | ower_location | Object location of an extent to which a pointer points. For example, the owner of a data extent is the head of the segment to which the data extent belongs. | - | special_data | Position of an extent in its owner. The value of this field is related to the usage type. For example, special data of a data extent is the extent ID in the segment to which the data extent belongs. | - - The value of **usage_type** is enumerated. The meaning of each value is as follows: - - - **Non-bucket table segment head**: data segment head of a non-hash bucket table - - **Non-bucket table fork head**: fork segment header of a non-segment-page table - - **Data extent**: data block - - Example: - - ```sql - select * from pg_stat_segment_extent_usage((select oid::int4 from pg_tablespace where spcname='pg_default'), (select oid::int4 from pg_database where datname='postgres'), 1, 0); - start_block | extent_size | usage_type | ower_location | special_data - -------------+-------------+------------------------+---------------+-------------- - 4157 | 1 | Data extent | 4294967295 | 0 - 4158 | 1 | Data extent | 4157 | 0 - ``` - -- local_space_shrink(tablespacename TEXT, databasename TEXT) - - Description: Shrinks specified physical segment-page space on the current node. Only the currently connected database can be shrank. - - Return value: empty - -- gs_space_shrink(int4 tablespace, int4 database, int4 extent_type, int4 forknum) - - Description: Works similar to **local_space_shrink**, that is, shrinks specified physical segment-page space. However, the parameters are different. The input parameters are the OIDs of the tablespace and database, and the value of **extent_type** is an integer ranging from 2 to 5. Note: The value **1** of **extent_type** indicates segment-page metadata. Currently, the physical file that contains the metadata cannot be shrunk. This function is used only by tools. You are not advised to use it directly. - - Return value: empty - -- pg_stat_remain_segment_info() - - Description: Displays residual extents on the current node due to faults. Residual extents are classified into two types: segments that are allocated but not used and extents that are allocated but not used. The main difference is that a segment contains multiple extents. During reclamation, all extents in the segment need to be recycled. - - Return type: - - | Name | Description | - | -------- | ------------------------------------------------------------ | - | space_id | Tablespace ID | - | db_id | Database ID | - | block_id | Extent ID | - | type | Extent type. The options are as follows: **ALLOC_SEGMENT**, **DROP_SEGMENT**, and **SHRINK_EXTENT**. | - - The values of **type** are described as follows: - - - **ALLOC_SEGMENT**: When a user creates a segment-page table and the segment is just allocated but the transaction of creating a table is not committed, the node is faulty. As a result, the segment is not used after being allocated. - - - **DROP_SEGMENT**: When a user deletes a segment-page table and the transaction is successfully committed, the bit corresponding to the segment page of the table is not reset and a fault, such as power failure, occurs. As a result, the segment is not used or released. - - - **SHRINK_EXTENT**: When a user shrinks a segment-page table and does not release the idle extent, a fault, such as power failure, occurs. As a result, the extent remains and cannot be reused. - - Example: - - ```sql - select * from pg_stat_remain_segment_info(); - space_id | db_id | block_id | type - ----------+-------+----------+------ - 1663 | 16385| 4156| ALLOC_SEGMENT - ``` - -- pg_free_remain_segment(int4 spaceId, int4 dbId, int4 segmentId) - - Description: Releases a specified residual extent. The value must be obtained from the **pg_stat_remain_segment_info** function. The function verifies input values. If the specified extent is not among the recorded residual extents, an error message is returned. If the specified extent is a single extent, the extent is released independently. If it is a segment, the segment and all extents in the segment are released. - - Return value: empty +--- +title: Segment-Page Storage Functions +summary: Segment-Page Storage Functions +author: Guo Huan +date: 2021-10-28 +--- + +# Segment-Page Storage Functions + +- local_segment_space_info(tablespacename TEXT, databasename TEXT) + + Description: Generates usage information about all extent groups in the tablespace. + + Return type: + + | node_name | Node name | + | ---------------- | ------------------------------------------------------------ | + | extent_size | Extent specifications of an extent group. The unit is the number of blocks. | + | forknum | Fork number | + | total_blocks | Total number of extents in a physical file | + | meta_data_blocks | Number of blocks occupied by the metadata managed in a tablespace, including the space header and map page but excluding the segment head | + | used_data_blocks | Number of extents used for storing data, including the segment head | + | utilization | Percentage of the number of used blocks to the total number of blocks, that is, (the value of **used_data_blocks** + the value of **meta_data_block**)/the value of **total_blocks** | + | high_water_mark | High-water mark, indicating the number of allocated extents and maximum physical page number. Blocks that exceed the high-water mark are not used and can be directly recycled. | + + Example: + + ```sql + select * from local_segment_space_info('pg_default', 'postgres'); + node_name | extent_size | forknum | total_blocks | meta_data_blocks | used_data_blocks | utilization | high_water_mark + -------------------+-------------+---------+--------------+------------------+------------------+-------------+----------------- + dn_6001_6002_6003 | 1 | 0 | 16384 | 4157 | 1 | .253784 | 4158 + dn_6001_6002_6003 | 8 | 0 | 16384 | 4157 | 8 | .254211 | 4165 + (2 rows) + ``` + +- pg_stat_segment_extent_usage(int4 tablespace oid, int4 database oid, int4 extent_type, int4 forknum) + + Description: Specifies the usage information of each allocated extent in an extent group returned each time. **extent_type** indicates the type of the extent group. The value is an integer ranging from 1 to 5. If the value is not within the range, an error is reported. **forknum** indicates the fork number. The value is an integer ranging from 0 to 4. Currently, only the following values are valid: **0** for data files, **1** for FSM files, and **2** for visibility map files. + + Return type: + + | Name | Description | + | :------------ | :----------------------------------------------------------- | + | start_block | Start physical page number of an extent | + | extent_size | Size of an extent | + | usage_type | Usage type of an extent, for example, **segment head** and **data extent** | + | ower_location | Object location of an extent to which a pointer points. For example, the owner of a data extent is the head of the segment to which the data extent belongs. | + | special_data | Position of an extent in its owner. The value of this field is related to the usage type. For example, special data of a data extent is the extent ID in the segment to which the data extent belongs. | + + The value of **usage_type** is enumerated. The meaning of each value is as follows: + + - **Non-bucket table segment head**: data segment head of a non-hash bucket table + - **Non-bucket table fork head**: fork segment header of a non-segment-page table + - **Data extent**: data block + + Example: + + ```sql + select * from pg_stat_segment_extent_usage((select oid::int4 from pg_tablespace where spcname='pg_default'), (select oid::int4 from pg_database where datname='postgres'), 1, 0); + start_block | extent_size | usage_type | ower_location | special_data + -------------+-------------+------------------------+---------------+-------------- + 4157 | 1 | Data extent | 4294967295 | 0 + 4158 | 1 | Data extent | 4157 | 0 + ``` + +- local_space_shrink(tablespacename TEXT, databasename TEXT) + + Description: Shrinks specified physical segment-page space on the current node. Only the currently connected database can be shrank. + + Return value: empty + +- gs_space_shrink(int4 tablespace, int4 database, int4 extent_type, int4 forknum) + + Description: Works similar to **local_space_shrink**, that is, shrinks specified physical segment-page space. However, the parameters are different. The input parameters are the OIDs of the tablespace and database, and the value of **extent_type** is an integer ranging from 2 to 5. Note: The value **1** of **extent_type** indicates segment-page metadata. Currently, the physical file that contains the metadata cannot be shrunk. This function is used only by tools. You are not advised to use it directly. + + Return value: empty + +- pg_stat_remain_segment_info() + + Description: Displays residual extents on the current node due to faults. Residual extents are classified into two types: segments that are allocated but not used and extents that are allocated but not used. The main difference is that a segment contains multiple extents. During reclamation, all extents in the segment need to be recycled. + + Return type: + + | Name | Description | + | -------- | ------------------------------------------------------------ | + | space_id | Tablespace ID | + | db_id | Database ID | + | block_id | Extent ID | + | type | Extent type. The options are as follows: **ALLOC_SEGMENT**, **DROP_SEGMENT**, and **SHRINK_EXTENT**. | + + The values of **type** are described as follows: + + - **ALLOC_SEGMENT**: When a user creates a segment-page table and the segment is just allocated but the transaction of creating a table is not committed, the node is faulty. As a result, the segment is not used after being allocated. + + - **DROP_SEGMENT**: When a user deletes a segment-page table and the transaction is successfully committed, the bit corresponding to the segment page of the table is not reset and a fault, such as power failure, occurs. As a result, the segment is not used or released. + + - **SHRINK_EXTENT**: When a user shrinks a segment-page table and does not release the idle extent, a fault, such as power failure, occurs. As a result, the extent remains and cannot be reused. + + Example: + + ```sql + select * from pg_stat_remain_segment_info(); + space_id | db_id | block_id | type + ----------+-------+----------+------ + 1663 | 16385| 4156| ALLOC_SEGMENT + ``` + +- pg_free_remain_segment(int4 spaceId, int4 dbId, int4 segmentId) + + Description: Releases a specified residual extent. The value must be obtained from the **pg_stat_remain_segment_info** function. The function verifies input values. If the specified extent is not among the recorded residual extents, an error message is returned. If the specified extent is a single extent, the extent is released independently. If it is a segment, the segment and all extents in the segment are released. + + Return value: empty diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/server-signal-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/server-signal-functions.md index f2283477..8c14e1a0 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/server-signal-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/server-signal-functions.md @@ -1,66 +1,66 @@ ---- -title: Server Signal Functions -summary: Server Signal Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Server Signal Functions - -Server signaling functions send control signals to other server processes. Only system administrators can use these functions. - -- pg_cancel_backend(pid int) - - Description: Cancels the current query of a backend. - - Return type: Boolean - - Note:**pg_cancel_backend** sends a query cancellation (SIGINT) signal to the backend process identified by **pid**. The PID of an active backend process can be found in the **pid** column of the **pg_stat_activity** view, or can be found by listing the database process using **ps** on the server. A user with the **SYSADMIN** permission, the owner of the database connected to the backend process, the owner of the backend process, or a user who inherits the **gs_role_signal_backend** permission of the built-in role has the permission to use this function. - -- pg_reload_conf() - - Description: Causes all server processes to reload their configuration files (restricted to the system administrator). - - Return type: Boolean - - Note:**pg_reload_conf** sends a SIGHUP signal to the server (restricted to the system administrator). As a result, all server processes reload their configuration files. - -- pg_rotate_logfile() - - Description: Rotates the log files of the server. - - Return type: Boolean - - Note:**pg_rotate_logfile** sends a signal to the log file manager, instructing the manager to immediately switch to a new output file. This function works only when **redirect_stderr** is used for log output. Otherwise, no log file manager subprocess exists. - -- pg_terminate_backend(pid int) - - Description: Terminates a backend thread. - - Return type: Boolean - - Note: Each of these functions returns **true** if they are successful and **false** otherwise. A user with the **SYSADMIN** permission, the owner of the database connected to the backend process, the owner of the backend process, or a user who inherits the **gs_role_signal_backend** permission of the built-in role has the permission to use this function. - - Example: - - ```sql - MogDB=# SELECT pid from pg_stat_activity; - pid - ----------------- - 140657876268816 - (1 rows) - - MogDB=# SELECT pg_terminate_backend(140657876268816); - pg_terminate_backend - ---------------------- - t - (1 row) - ``` - -- pg_terminate_session(pid int64, sessionid int64) - - Description: Terminates a backend session. - - Return type: Boolean - - Note: Each of these functions returns **true** if they are successful and **false** otherwise. A user with the **SYSADMIN** permission, the owner of the database connected to the session, the owner of the session, or a user who inherits the **gs_role_signal_backend** permission of the built-in role has the permission to use this function. +--- +title: Server Signal Functions +summary: Server Signal Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Server Signal Functions + +Server signaling functions send control signals to other server processes. Only system administrators can use these functions. + +- pg_cancel_backend(pid int) + + Description: Cancels the current query of a backend. + + Return type: Boolean + + Note:**pg_cancel_backend** sends a query cancellation (SIGINT) signal to the backend process identified by **pid**. The PID of an active backend process can be found in the **pid** column of the **pg_stat_activity** view, or can be found by listing the database process using **ps** on the server. A user with the **SYSADMIN** permission, the owner of the database connected to the backend process, the owner of the backend process, or a user who inherits the **gs_role_signal_backend** permission of the built-in role has the permission to use this function. + +- pg_reload_conf() + + Description: Causes all server processes to reload their configuration files (restricted to the system administrator). + + Return type: Boolean + + Note:**pg_reload_conf** sends a SIGHUP signal to the server (restricted to the system administrator). As a result, all server processes reload their configuration files. + +- pg_rotate_logfile() + + Description: Rotates the log files of the server. + + Return type: Boolean + + Note:**pg_rotate_logfile** sends a signal to the log file manager, instructing the manager to immediately switch to a new output file. This function works only when **redirect_stderr** is used for log output. Otherwise, no log file manager subprocess exists. + +- pg_terminate_backend(pid int) + + Description: Terminates a backend thread. + + Return type: Boolean + + Note: Each of these functions returns **true** if they are successful and **false** otherwise. A user with the **SYSADMIN** permission, the owner of the database connected to the backend process, the owner of the backend process, or a user who inherits the **gs_role_signal_backend** permission of the built-in role has the permission to use this function. + + Example: + + ```sql + MogDB=# SELECT pid from pg_stat_activity; + pid + ----------------- + 140657876268816 + (1 rows) + + MogDB=# SELECT pg_terminate_backend(140657876268816); + pg_terminate_backend + ---------------------- + t + (1 row) + ``` + +- pg_terminate_session(pid int64, sessionid int64) + + Description: Terminates a backend session. + + Return type: Boolean + + Note: Each of these functions returns **true** if they are successful and **false** otherwise. A user with the **SYSADMIN** permission, the owner of the database connected to the session, the owner of the session, or a user who inherits the **gs_role_signal_backend** permission of the built-in role has the permission to use this function. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/snapshot-synchronization-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/snapshot-synchronization-functions.md index 73116a4b..91dd049d 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/snapshot-synchronization-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/snapshot-synchronization-functions.md @@ -1,24 +1,24 @@ ---- -title: Snapshot Synchronization Functions -summary: Snapshot Synchronization Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Snapshot Synchronization Functions - -Snapshot synchronization functions save the current snapshot and return its identifier. - -- pg_export_snapshot() - -Description: Saves the current snapshot and returns its identifier. - -Return type: text - -Note:**pg_export_snapshot** saves the current snapshot and returns a text string identifying the snapshot. This string must be passed to clients that want to import the snapshot. A snapshot can be imported when the **set transaction snapshot snapshot_id;** command is executed. Doing so is possible only when the transaction is set to the **SERIALIZABLE** or **REPEATABLE READ** isolation level. The output of the function cannot be used as the input of **set transaction snapshot**. - -- pg_export_snapshot_and_csn() - -Description: Saves the current snapshot and returns its identifier. Compared with **pg_export_snapshot()**, **pg_export_snapshot()** returns a CSN, indicating the CSN of the current snapshot. - -Return type: text +--- +title: Snapshot Synchronization Functions +summary: Snapshot Synchronization Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Snapshot Synchronization Functions + +Snapshot synchronization functions save the current snapshot and return its identifier. + +- pg_export_snapshot() + +Description: Saves the current snapshot and returns its identifier. + +Return type: text + +Note:**pg_export_snapshot** saves the current snapshot and returns a text string identifying the snapshot. This string must be passed to clients that want to import the snapshot. A snapshot can be imported when the **set transaction snapshot snapshot_id;** command is executed. Doing so is possible only when the transaction is set to the **SERIALIZABLE** or **REPEATABLE READ** isolation level. The output of the function cannot be used as the input of **set transaction snapshot**. + +- pg_export_snapshot_and_csn() + +Description: Saves the current snapshot and returns its identifier. Compared with **pg_export_snapshot()**, **pg_export_snapshot()** returns a CSN, indicating the CSN of the current snapshot. + +Return type: text diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/system-management-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/system-management-functions.md index cd1c4a38..147cb042 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/system-management-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/system-management-functions.md @@ -1,21 +1,21 @@ ---- -title: System Administration Functions -summary: System Administration Functions -author: zhang cuiping -date: 2023-04-23 ---- - -# System Administration Functions - -- **[Configuration Settings Functions](configuration-settings-functions.md)** -- **[Universal File Access Functions](universal-file-access-functions.md)** -- **[Server Signal Functions](server-signal-functions.md)** -- **[Backup and Restoration Control Functions](backup-and-restoration-control-functions.md)** -- **[Snapshot Synchronization Functions](snapshot-synchronization-functions.md)** -- **[Database Object Functions](database-object-functions.md)** -- **[Advisory Lock Functions](advisory-lock-functions.md)** -- **[Logical Replication Functions](logical-replication-functions.md)** -- **[Segment-Page Storage Functions](segment-page-storage-functions.md)** -- **[Other Functions](other-functions.md)** -- **[Undo System Functions](undo-system-functions.md)** +--- +title: System Administration Functions +summary: System Administration Functions +author: zhang cuiping +date: 2023-04-23 +--- + +# System Administration Functions + +- **[Configuration Settings Functions](configuration-settings-functions.md)** +- **[Universal File Access Functions](universal-file-access-functions.md)** +- **[Server Signal Functions](server-signal-functions.md)** +- **[Backup and Restoration Control Functions](backup-and-restoration-control-functions.md)** +- **[Snapshot Synchronization Functions](snapshot-synchronization-functions.md)** +- **[Database Object Functions](database-object-functions.md)** +- **[Advisory Lock Functions](advisory-lock-functions.md)** +- **[Logical Replication Functions](logical-replication-functions.md)** +- **[Segment-Page Storage Functions](segment-page-storage-functions.md)** +- **[Other Functions](other-functions.md)** +- **[Undo System Functions](undo-system-functions.md)** - **[Row-store Compression System Functions](row-store-compression-system-functions.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/undo-system-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/undo-system-functions.md index 8708571a..05298126 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/undo-system-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/undo-system-functions.md @@ -1,89 +1,89 @@ ---- -title: Undo System Functions -summary: Undo System Functions -author: Guo Huan -date: 2021-10-28 ---- - -# Undo System Functions - -- gs_undo_meta(type, zoneId, location) - - Description: Specifies metadata of each module in the undo system. - - Parameter description: - - - **type** (metadata type) - - The value **0** indicates the metadata corresponding to **Undo Zone(Record)**. - - The value **1** indicates the metadata corresponding to **Undo Zone(Transaction Slot)**. - - The value **2** indicates the metadata corresponding to **Undo Space(Record)**. - - The value **3** indicates the metadata corresponding to **Undo Space(Transaction Slot)**. - - - **zoneId** (undo zone ID) - - The value **–1** indicates the metadata of all undo zones. - - The value range 0–1024 x 1024 indicates the metadata of the corresponding zone ID. - - - **location** (read location) - - The value **0** indicates that data is read from the current memory. - - The value **1** indicates that data is read from a physical file. - - Return type: record - -- gs_undo_translot(location, zoneId) - - Description: Specifies transaction slot information of the undo system. - - Parameter description: - - - **location** (read location) - - The value **0** indicates that data is read from the current memory. - - The value **1** indicates that data is read from a physical file. - - - **zoneId** (undo zone ID) - - The value **–1** indicates the metadata of all undo zones. - - The value range 0–1024 x 1024 indicates the metadata of the corresponding zone ID. - - Return type: record - -- gs_stat_undo() - - Description: Undo statistics. - - Return type: record - - **Table 1** gs_stat_undo parameters - - | Category | Parameter Name | Type | Description | - | :--------------- | :---------------------- | :----- | :----------------------------------------------------------- | - | Output parameter | curr_used_zone_count | uint32 | Number of used undo zones. | - | Output parameter | top_used_zones | text | Information about the first three undo zones with the maximum usage. The output format is as follows:
**(zoneId1:***Used size***; zoneId2:***Used size***; zoneId3:***Used size***)**. | - | Output parameter | curr_used_undo_size | uint32 | Total size of the undo tablespace that is being used. The unit is MB. | - | Output parameter | undo_threshold | uint32 | Calculation result of the value of the GUC parameter **undo_space_limit_size** x 80%. The unit is MB. | - | Output parameter | oldest_xid_in_undo | uint64 | XID of the transaction recycled to the undo space. The undo records generated by the transaction whose XID is smaller than the value of XID are recycled. | - | Output parameter | oldest_xmin | uint64 | Oldest active transaction. | - | Output parameter | total_undo_chain_len | int64 | Total length of all accessed undo chains. | - | Output parameter | max_undo_chain_len | int64 | Maximum length of the accessed undo chain. | - | Output parameter | create_undo_file_count | uint32 | Number of created undo files. | - | Output parameter | discard_undo_file_count | uint32 | Number of deleted undo files. | - -- gs_undo_record(undoptr) - - Description: Undo record resolution. - - Parameter description: - - - **undoptr** (undo record pointer) - +--- +title: Undo System Functions +summary: Undo System Functions +author: Guo Huan +date: 2021-10-28 +--- + +# Undo System Functions + +- gs_undo_meta(type, zoneId, location) + + Description: Specifies metadata of each module in the undo system. + + Parameter description: + + - **type** (metadata type) + + The value **0** indicates the metadata corresponding to **Undo Zone(Record)**. + + The value **1** indicates the metadata corresponding to **Undo Zone(Transaction Slot)**. + + The value **2** indicates the metadata corresponding to **Undo Space(Record)**. + + The value **3** indicates the metadata corresponding to **Undo Space(Transaction Slot)**. + + - **zoneId** (undo zone ID) + + The value **–1** indicates the metadata of all undo zones. + + The value range 0–1024 x 1024 indicates the metadata of the corresponding zone ID. + + - **location** (read location) + + The value **0** indicates that data is read from the current memory. + + The value **1** indicates that data is read from a physical file. + + Return type: record + +- gs_undo_translot(location, zoneId) + + Description: Specifies transaction slot information of the undo system. + + Parameter description: + + - **location** (read location) + + The value **0** indicates that data is read from the current memory. + + The value **1** indicates that data is read from a physical file. + + - **zoneId** (undo zone ID) + + The value **–1** indicates the metadata of all undo zones. + + The value range 0–1024 x 1024 indicates the metadata of the corresponding zone ID. + + Return type: record + +- gs_stat_undo() + + Description: Undo statistics. + + Return type: record + + **Table 1** gs_stat_undo parameters + + | Category | Parameter Name | Type | Description | + | :--------------- | :---------------------- | :----- | :----------------------------------------------------------- | + | Output parameter | curr_used_zone_count | uint32 | Number of used undo zones. | + | Output parameter | top_used_zones | text | Information about the first three undo zones with the maximum usage. The output format is as follows:
**(zoneId1:***Used size***; zoneId2:***Used size***; zoneId3:***Used size***)**. | + | Output parameter | curr_used_undo_size | uint32 | Total size of the undo tablespace that is being used. The unit is MB. | + | Output parameter | undo_threshold | uint32 | Calculation result of the value of the GUC parameter **undo_space_limit_size** x 80%. The unit is MB. | + | Output parameter | oldest_xid_in_undo | uint64 | XID of the transaction recycled to the undo space. The undo records generated by the transaction whose XID is smaller than the value of XID are recycled. | + | Output parameter | oldest_xmin | uint64 | Oldest active transaction. | + | Output parameter | total_undo_chain_len | int64 | Total length of all accessed undo chains. | + | Output parameter | max_undo_chain_len | int64 | Maximum length of the accessed undo chain. | + | Output parameter | create_undo_file_count | uint32 | Number of created undo files. | + | Output parameter | discard_undo_file_count | uint32 | Number of deleted undo files. | + +- gs_undo_record(undoptr) + + Description: Undo record resolution. + + Parameter description: + + - **undoptr** (undo record pointer) + Return type: record \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/universal-file-access-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/universal-file-access-functions.md index a86ae731..4eb728bd 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/universal-file-access-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/system-management-functions/universal-file-access-functions.md @@ -1,148 +1,148 @@ ---- -title: Universal File Access Functions -summary: Universal File Access Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Universal File Access Functions - -Universal file access functions provide local access interfaces for files on a database server. Only files in the MogDB directory and the **log_directory** directory can be accessed. Use a relative path for files in the MogDB directory, and a path matching the **log_directory** configuration setting for log files. Only database initialization users can use these functions. - -- pg_ls_dir(dirname text) - - Description: Lists files in a directory. - - Return type: setof text - - Note:**pg_ls_dir** returns all the names in the specified directory, except the special entries "." and "..". - - For example: - - ```sql - MogDB=# SELECT pg_ls_dir('./'); - pg_ls_dir - ---------------------- - .postgresql.conf.swp - postgresql.conf - pg_tblspc - PG_VERSION - pg_ident.conf - core - server.crt - pg_serial - pg_twophase - postgresql.conf.lock - pg_stat_tmp - pg_notify - pg_subtrans - pg_ctl.lock - pg_xlog - pg_clog - base - pg_snapshots - postmaster.opts - postmaster.pid - server.key.rand - server.key.cipher - pg_multixact - pg_errorinfo - server.key - pg_hba.conf - pg_replslot - .pg_hba.conf.swp - cacert.pem - pg_hba.conf.lock - global - mogdb.state - (32 rows) - ``` - -- pg_read_file(filename text, offset bigint, length bigint) - - Description: Returns the content of a text file. - - Return type: text - - Note:**pg_read_file** returns part of a text file. It can return a maximum of *length* bytes from *offset*. The actual size of fetched data is less than *length* if the end of the file is reached first. If **offset** is negative, it is the length rolled back from the file end. If **offset** and **length** are omitted, the entire file is returned. - - For example: - - ```sql - MogDB=# SELECT pg_read_file('postmaster.pid',0,100); - pg_read_file - --------------------------------------- - 53078 + - /srv/BigData/hadoop/data1/dbnode+ - 1500022474 + - 8000 + - /var/run/FusionInsight + - localhost + - 2 - (1 row) - ``` - -- pg_read_binary_file(filename text [, offset bigint, length bigint,missing_ok boolean]) - - Description: Returns the content of a binary file. - - Return type: bytea - - Note:**pg_read_binary_file** is similar to **pg_read_file**, except that the result is a **bytea** value; accordingly, no encoding checks are performed. In combination with the **convert_from** function, this function can be used to read a file in a specified encoding: - - ```sql - MogDB=# SELECT convert_from(pg_read_binary_file('filename'), 'UTF8'); - ``` - -- pg_stat_file(filename text) - - Description: Returns status information about a file. - - Return type: record - - Note:**pg_stat_file** returns a record containing the file size, last access timestamp, last modification timestamp, last file status change timestamp, and a Boolean value indicating if it is a directory. Typical use cases are as follows: - - ```sql - MogDB=# SELECT * FROM pg_stat_file('filename'); - ``` - - ```sql - MogDB=# SELECT (pg_stat_file('filename')).modification; - ``` - - For example: - - ```sql - MogDB=# SELECT convert_from(pg_read_binary_file('postmaster.pid'), 'UTF8'); - convert_from - -------------------------------------- - 4881 + - /srv/BigData/mogdb/data1/dbnode+ - 1496308688 + - 25108 + - /opt/user/Bigdata/mogdb/mogdb_tmp + - * + - 25108001 43352069 + - - (1 row) - ``` - - ```sql - MogDB=# SELECT * FROM pg_stat_file('postmaster.pid'); - - size | access | modification | change - | creation | isdir - ------+------------------------+------------------------+------------------------ - +----------+------- - 117 | 2017-06-05 11:06:34+08 | 2017-06-01 17:18:08+08 | 2017-06-01 17:18:08+08 - | | f - (1 row) - ``` - - ```sql - MogDB=# SELECT (pg_stat_file('postmaster.pid')).modification; - modification - ------------------------ - 2017-06-01 17:18:08+08 - (1 row) - ``` +--- +title: Universal File Access Functions +summary: Universal File Access Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Universal File Access Functions + +Universal file access functions provide local access interfaces for files on a database server. Only files in the MogDB directory and the **log_directory** directory can be accessed. Use a relative path for files in the MogDB directory, and a path matching the **log_directory** configuration setting for log files. Only database initialization users can use these functions. + +- pg_ls_dir(dirname text) + + Description: Lists files in a directory. + + Return type: setof text + + Note:**pg_ls_dir** returns all the names in the specified directory, except the special entries "." and "..". + + For example: + + ```sql + MogDB=# SELECT pg_ls_dir('./'); + pg_ls_dir + ---------------------- + .postgresql.conf.swp + postgresql.conf + pg_tblspc + PG_VERSION + pg_ident.conf + core + server.crt + pg_serial + pg_twophase + postgresql.conf.lock + pg_stat_tmp + pg_notify + pg_subtrans + pg_ctl.lock + pg_xlog + pg_clog + base + pg_snapshots + postmaster.opts + postmaster.pid + server.key.rand + server.key.cipher + pg_multixact + pg_errorinfo + server.key + pg_hba.conf + pg_replslot + .pg_hba.conf.swp + cacert.pem + pg_hba.conf.lock + global + mogdb.state + (32 rows) + ``` + +- pg_read_file(filename text, offset bigint, length bigint) + + Description: Returns the content of a text file. + + Return type: text + + Note:**pg_read_file** returns part of a text file. It can return a maximum of *length* bytes from *offset*. The actual size of fetched data is less than *length* if the end of the file is reached first. If **offset** is negative, it is the length rolled back from the file end. If **offset** and **length** are omitted, the entire file is returned. + + For example: + + ```sql + MogDB=# SELECT pg_read_file('postmaster.pid',0,100); + pg_read_file + --------------------------------------- + 53078 + + /srv/BigData/hadoop/data1/dbnode+ + 1500022474 + + 8000 + + /var/run/FusionInsight + + localhost + + 2 + (1 row) + ``` + +- pg_read_binary_file(filename text [, offset bigint, length bigint,missing_ok boolean]) + + Description: Returns the content of a binary file. + + Return type: bytea + + Note:**pg_read_binary_file** is similar to **pg_read_file**, except that the result is a **bytea** value; accordingly, no encoding checks are performed. In combination with the **convert_from** function, this function can be used to read a file in a specified encoding: + + ```sql + MogDB=# SELECT convert_from(pg_read_binary_file('filename'), 'UTF8'); + ``` + +- pg_stat_file(filename text) + + Description: Returns status information about a file. + + Return type: record + + Note:**pg_stat_file** returns a record containing the file size, last access timestamp, last modification timestamp, last file status change timestamp, and a Boolean value indicating if it is a directory. Typical use cases are as follows: + + ```sql + MogDB=# SELECT * FROM pg_stat_file('filename'); + ``` + + ```sql + MogDB=# SELECT (pg_stat_file('filename')).modification; + ``` + + For example: + + ```sql + MogDB=# SELECT convert_from(pg_read_binary_file('postmaster.pid'), 'UTF8'); + convert_from + -------------------------------------- + 4881 + + /srv/BigData/mogdb/data1/dbnode+ + 1496308688 + + 25108 + + /opt/user/Bigdata/mogdb/mogdb_tmp + + * + + 25108001 43352069 + + + (1 row) + ``` + + ```sql + MogDB=# SELECT * FROM pg_stat_file('postmaster.pid'); + + size | access | modification | change + | creation | isdir + ------+------------------------+------------------------+------------------------ + +----------+------- + 117 | 2017-06-05 11:06:34+08 | 2017-06-01 17:18:08+08 | 2017-06-01 17:18:08+08 + | | f + (1 row) + ``` + + ```sql + MogDB=# SELECT (pg_stat_file('postmaster.pid')).modification; + modification + ------------------------ + 2017-06-01 17:18:08+08 + (1 row) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/text-search-functions-and-operators.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/text-search-functions-and-operators.md index f34fac2f..7cf7a4fa 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/text-search-functions-and-operators.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/text-search-functions-and-operators.md @@ -1,535 +1,535 @@ ---- -title: Text Search Functions and Operators -summary: Text Search Functions and Operators -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Text Search Functions and Operators - -## Text Search Operators - -- @@ - - Description: Specifies whether the **tsvector**-type words match the **tsquery**-type words. - - Example: - - ```sql - MogDB=# SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat') AS RESULT; - result - -------- - t - (1 row) - ``` - -- @@@ - - Description: Synonym for @@ - - Example: - - ```sql - MogDB=# SELECT to_tsvector('fat cats ate rats') @@@ to_tsquery('cat & rat') AS RESULT; - result - -------- - t - (1 row) - ``` - -- || - - Description: Connects two **tsvector**-type words. - - Example: - - ```sql - MogDB=# SELECT 'a:1 b:2'::tsvector || 'c:1 d:2 b:3'::tsvector AS RESULT; - result - --------------------------- - 'a':1 'b':2,5 'c':3 'd':4 - (1 row) - ``` - -- && - - Description: Performs an AND operation on two **tsquery**-type words. - - Example: - - ```sql - MogDB=# SELECT 'fat | rat'::tsquery && 'cat'::tsquery AS RESULT; - result - --------------------------- - 'fat' | 'rat' | 'cat' - (1 row) - ``` - -- || - - Description: Performs an OR operation on two **tsquery**-type words. - - Example: - - ```sql - MogDB=# SELECT 'fat | rat'::tsquery || 'cat'::tsquery AS RESULT; - result - --------------------------- - ( 'fat' | 'rat' ) | 'cat' - (1 row) - ``` - -- !! - - Description:**NOT** a **tsquery** - - Example: - - ```sql - MogDB=# SELECT !! 'cat'::tsquery AS RESULT; - result - -------- - !'cat' - (1 row) - ``` - -- @> - - Description: Specifies whether a **tsquery**-type word contains another **tsquery**-type word. - - Example: - - ```sql - MogDB=# SELECT 'cat'::tsquery @> 'cat & rat'::tsquery AS RESULT; - result - -------- - f - (1 row) - ``` - -- <@ - - Description: Specifies whether a **tsquery**-type word is contained in another **tsquery**-type word. - - Example: - - ```sql - MogDB=# SELECT 'cat'::tsquery <@ 'cat & rat'::tsquery AS RESULT; - result - -------- - t - (1 row) - ``` - -In addition to the preceding operators, the ordinary B-tree comparison operators (including = and <) are defined for types **tsvector** and **tsquery**. - -## Text Search Functions - -- get_current_ts_config() - - Description: Obtains default text search configurations. - - Return type: regconfig - - Example: - - ```sql - MogDB=# SELECT get_current_ts_config(); - get_current_ts_config - ----------------------- - english - (1 row) - ``` - -- length(tsvector) - - Description: Specifies the number of lexemes in a **tsvector**-type word. - - Return type: integer - - Example: - - ```sql - MogDB=# SELECT length('fat:2,4 cat:3 rat:5A'::tsvector); - length - -------- - 3 - (1 row) - ``` - -- numnode(tsquery) - - Description: Specifies the number of lexemes plus **tsquery** operators. - - Return type: integer - - Example: - - ```sql - MogDB=# SELECT numnode('(fat & rat) | cat'::tsquery); - numnode - --------- - 5 - (1 row) - ``` - -- plainto_tsquery([ config regconfig , ] query text) - - Description: Generates **tsquery** lexemes without punctuations. - - Return type: tsquery - - Example: - - ```sql - MogDB=# SELECT plainto_tsquery('english', 'The Fat Rats'); - plainto_tsquery - ----------------- - 'fat' & 'rat' - (1 row) - ``` - -- querytree(query tsquery) - - Description: Obtains the indexable part of a **tsquery**. - - Return type: text - - Example: - - ```sql - MogDB=# SELECT querytree('foo & ! bar'::tsquery); - querytree - ----------- - 'foo' - (1 row) - ``` - -- setweight(tsvector, "char") - - Description: Assigns weight to each element of **tsvector**. - - Return type: tsvector - - Example: - - ```sql - MogDB=# SELECT setweight('fat:2,4 cat:3 rat:5B'::tsvector, 'A'); - setweight - ------------------------------- - 'cat':3A 'fat':2A,4A 'rat':5A - (1 row) - ``` - -- strip(tsvector) - - Description: Removes positions and weights from **tsvector**. - - Return type: tsvector - - Example: - - ```sql - MogDB=# SELECT strip('fat:2,4 cat:3 rat:5A'::tsvector); - strip - ------------------- - 'cat' 'fat' 'rat' - (1 row) - ``` - -- to_tsquery([ config regconfig , ] query text) - - Description: Normalizes words and converts them to **tsquery**. - - Return type: tsquery - - Example: - - ```sql - MogDB=# SELECT to_tsquery('english', 'The & Fat & Rats'); - to_tsquery - --------------- - 'fat' & 'rat' - (1 row) - ``` - -- to_tsvector([ config regconfig , ] document text) - - Description: Reduces document text to **tsvector**. - - Return type: tsvector - - Example: - - ```sql - MogDB=# SELECT to_tsvector('english', 'The Fat Rats'); - to_tsvector - ----------------- - 'fat':2 'rat':3 - (1 row) - ``` - -- to_tsvector_for_batch([ config regconfig , ] document text) - - Description: Reduces document text to **tsvector**. - - Return type: tsvector - - Example: - - ```sql - MogDB=# SELECT to_tsvector_for_batch('english', 'The Fat Rats'); - to_tsvector - ----------------- - 'fat':2 'rat':3 - (1 row) - ``` - -- ts_headline([ config regconfig, ] document text, query tsquery [, options text ]) - - Description: Highlights a query match. - - Return type: text - - Example: - - ```sql - MogDB=# SELECT ts_headline('x y z', 'z'::tsquery); - ts_headline - -------------- - x y z - (1 row) - ``` - -- ts_rank([ weights float4[], ] vector tsvector, query tsquery [, normalization integer ]) - - Description: Ranks documents for a query. - - Return type: float4 - - Example: - - ```sql - MogDB=# SELECT ts_rank('hello world'::tsvector, 'world'::tsquery); - ts_rank - ---------- - .0607927 - (1 row) - ``` - -- ts_rank_cd([ weights float4[], ] vector tsvector, query tsquery [, normalization integer ]) - - Description: Ranks documents for a query using cover density. - - Return type: float4 - - Example: - - ```sql - MogDB=# SELECT ts_rank_cd('hello world'::tsvector, 'world'::tsquery); - ts_rank_cd - ------------ - .1 - (1 row) - ``` - -- ts_rewrite(query tsquery, target tsquery, substitute tsquery) - - Description: Replaces a **tsquery**-type word. - - Return type: tsquery - - Example: - - ```sql - MogDB=# SELECT ts_rewrite('a & b'::tsquery, 'a'::tsquery, 'foo|bar'::tsquery); - ts_rewrite - ------------------------- - 'b' & ( 'foo' | 'bar' ) - (1 row) - ``` - -- ts_rewrite(query tsquery, select text) - - Description: Replaces **tsquery** data in the target with the result of a **SELECT** command. - - Return type: tsquery - - Example: - - ```sql - MogDB=# SELECT ts_rewrite('world'::tsquery, 'select ''world''::tsquery, ''hello''::tsquery'); - ts_rewrite - ------------ - 'hello' - (1 row) - ``` - -## Text Search Debugging Functions - -- ts_debug([ config regconfig, ] document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) - - Description: Tests a configuration. - - Return type: SETOF record - - Example: - - ```sql - MogDB=# SELECT ts_debug('english', 'The Brightest supernovaes'); - ts_debug - ----------------------------------------------------------------------------------- - (asciiword,"Word, all ASCII",The,{english_stem},english_stem,{}) - (blank,"Space symbols"," ",{},,) - (asciiword,"Word, all ASCII",Brightest,{english_stem},english_stem,{brightest}) - (blank,"Space symbols"," ",{},,) - (asciiword,"Word, all ASCII",supernovaes,{english_stem},english_stem,{supernova}) - (5 rows) - ``` - -- ts_lexize(dict regdictionary, token text) - - Description: Tests a data dictionary. - - Return type: text[] - - Example: - - ```sql - MogDB=# SELECT ts_lexize('english_stem', 'stars'); - ts_lexize - ----------- - {star} - (1 row) - ``` - -- ts_parse(parser_name text, document text, OUT tokid integer, OUT token text) - - Description: Tests a parser. - - Return type: SETOF record - - Example: - - ```sql - MogDB=# SELECT ts_parse('default', 'foo - bar'); - ts_parse - ----------- - (1,foo) - (12," ") - (12,"- ") - (1,bar) - (4 rows) - ``` - -- ts_parse(parser_oid oid, document text, OUT tokid integer, OUT token text) - - Description: Tests a parser. - - Return type: SETOF record - - Example: - - ```sql - MogDB=# SELECT ts_parse(3722, 'foo - bar'); - ts_parse - ----------- - (1,foo) - (12," ") - (12,"- ") - (1,bar) - (4 rows) - ``` - -- ts_token_type(parser_name text, OUT tokid integer, OUT alias text, OUT description text) - - Description: Obtains token types defined by a parser. - - Return type: SETOF record - - Example: - - ```sql - MogDB=# SELECT ts_token_type('default'); - ts_token_type - -------------------------------------------------------------- - (1,asciiword,"Word, all ASCII") - (2,word,"Word, all letters") - (3,numword,"Word, letters and digits") - (4,email,"Email address") - (5,url,URL) - (6,host,Host) - (7,sfloat,"Scientific notation") - (8,version,"Version number") - (9,hword_numpart,"Hyphenated word part, letters and digits") - (10,hword_part,"Hyphenated word part, all letters") - (11,hword_asciipart,"Hyphenated word part, all ASCII") - (12,blank,"Space symbols") - (13,tag,"XML tag") - (14,protocol,"Protocol head") - (15,numhword,"Hyphenated word, letters and digits") - (16,asciihword,"Hyphenated word, all ASCII") - (17,hword,"Hyphenated word, all letters") - (18,url_path,"URL path") - (19,file,"File or path name") - (20,float,"Decimal notation") - (21,int,"Signed integer") - (22,uint,"Unsigned integer") - (23,entity,"XML entity") - (23 rows) - ``` - -- ts_token_type(parser_oid oid, OUT tokid integer, OUT alias text, OUT description text) - - Description: Obtains token types defined by a parser. - - Return type: SETOF record - - Example: - - ```sql - MogDB=# SELECT ts_token_type(3722); - ts_token_type - -------------------------------------------------------------- - (1,asciiword,"Word, all ASCII") - (2,word,"Word, all letters") - (3,numword,"Word, letters and digits") - (4,email,"Email address") - (5,url,URL) - (6,host,Host) - (7,sfloat,"Scientific notation") - (8,version,"Version number") - (9,hword_numpart,"Hyphenated word part, letters and digits") - (10,hword_part,"Hyphenated word part, all letters") - (11,hword_asciipart,"Hyphenated word part, all ASCII") - (12,blank,"Space symbols") - (13,tag,"XML tag") - (14,protocol,"Protocol head") - (15,numhword,"Hyphenated word, letters and digits") - (16,asciihword,"Hyphenated word, all ASCII") - (17,hword,"Hyphenated word, all letters") - (18,url_path,"URL path") - (19,file,"File or path name") - (20,float,"Decimal notation") - (21,int,"Signed integer") - (22,uint,"Unsigned integer") - (23,entity,"XML entity") - (23 rows) - ``` - -- ts_stat(sqlquery text, [ weights text, ] OUT word text, OUT ndoc integer, OUT nentry integer) - - Description: Obtains statistics of a **tsvector** column. - - Return type: SETOF record - - Example: - - ```sql - MogDB=# SELECT ts_stat('select ''hello world''::tsvector'); - ts_stat - ------------- - (world,1,1) - (hello,1,1) - (2 rows) - ``` +--- +title: Text Search Functions and Operators +summary: Text Search Functions and Operators +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Text Search Functions and Operators + +## Text Search Operators + +- @@ + + Description: Specifies whether the **tsvector**-type words match the **tsquery**-type words. + + Example: + + ```sql + MogDB=# SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat') AS RESULT; + result + -------- + t + (1 row) + ``` + +- @@@ + + Description: Synonym for @@ + + Example: + + ```sql + MogDB=# SELECT to_tsvector('fat cats ate rats') @@@ to_tsquery('cat & rat') AS RESULT; + result + -------- + t + (1 row) + ``` + +- || + + Description: Connects two **tsvector**-type words. + + Example: + + ```sql + MogDB=# SELECT 'a:1 b:2'::tsvector || 'c:1 d:2 b:3'::tsvector AS RESULT; + result + --------------------------- + 'a':1 'b':2,5 'c':3 'd':4 + (1 row) + ``` + +- && + + Description: Performs an AND operation on two **tsquery**-type words. + + Example: + + ```sql + MogDB=# SELECT 'fat | rat'::tsquery && 'cat'::tsquery AS RESULT; + result + --------------------------- + 'fat' | 'rat' | 'cat' + (1 row) + ``` + +- || + + Description: Performs an OR operation on two **tsquery**-type words. + + Example: + + ```sql + MogDB=# SELECT 'fat | rat'::tsquery || 'cat'::tsquery AS RESULT; + result + --------------------------- + ( 'fat' | 'rat' ) | 'cat' + (1 row) + ``` + +- !! + + Description:**NOT** a **tsquery** + + Example: + + ```sql + MogDB=# SELECT !! 'cat'::tsquery AS RESULT; + result + -------- + !'cat' + (1 row) + ``` + +- @> + + Description: Specifies whether a **tsquery**-type word contains another **tsquery**-type word. + + Example: + + ```sql + MogDB=# SELECT 'cat'::tsquery @> 'cat & rat'::tsquery AS RESULT; + result + -------- + f + (1 row) + ``` + +- <@ + + Description: Specifies whether a **tsquery**-type word is contained in another **tsquery**-type word. + + Example: + + ```sql + MogDB=# SELECT 'cat'::tsquery <@ 'cat & rat'::tsquery AS RESULT; + result + -------- + t + (1 row) + ``` + +In addition to the preceding operators, the ordinary B-tree comparison operators (including = and <) are defined for types **tsvector** and **tsquery**. + +## Text Search Functions + +- get_current_ts_config() + + Description: Obtains default text search configurations. + + Return type: regconfig + + Example: + + ```sql + MogDB=# SELECT get_current_ts_config(); + get_current_ts_config + ----------------------- + english + (1 row) + ``` + +- length(tsvector) + + Description: Specifies the number of lexemes in a **tsvector**-type word. + + Return type: integer + + Example: + + ```sql + MogDB=# SELECT length('fat:2,4 cat:3 rat:5A'::tsvector); + length + -------- + 3 + (1 row) + ``` + +- numnode(tsquery) + + Description: Specifies the number of lexemes plus **tsquery** operators. + + Return type: integer + + Example: + + ```sql + MogDB=# SELECT numnode('(fat & rat) | cat'::tsquery); + numnode + --------- + 5 + (1 row) + ``` + +- plainto_tsquery([ config regconfig , ] query text) + + Description: Generates **tsquery** lexemes without punctuations. + + Return type: tsquery + + Example: + + ```sql + MogDB=# SELECT plainto_tsquery('english', 'The Fat Rats'); + plainto_tsquery + ----------------- + 'fat' & 'rat' + (1 row) + ``` + +- querytree(query tsquery) + + Description: Obtains the indexable part of a **tsquery**. + + Return type: text + + Example: + + ```sql + MogDB=# SELECT querytree('foo & ! bar'::tsquery); + querytree + ----------- + 'foo' + (1 row) + ``` + +- setweight(tsvector, "char") + + Description: Assigns weight to each element of **tsvector**. + + Return type: tsvector + + Example: + + ```sql + MogDB=# SELECT setweight('fat:2,4 cat:3 rat:5B'::tsvector, 'A'); + setweight + ------------------------------- + 'cat':3A 'fat':2A,4A 'rat':5A + (1 row) + ``` + +- strip(tsvector) + + Description: Removes positions and weights from **tsvector**. + + Return type: tsvector + + Example: + + ```sql + MogDB=# SELECT strip('fat:2,4 cat:3 rat:5A'::tsvector); + strip + ------------------- + 'cat' 'fat' 'rat' + (1 row) + ``` + +- to_tsquery([ config regconfig , ] query text) + + Description: Normalizes words and converts them to **tsquery**. + + Return type: tsquery + + Example: + + ```sql + MogDB=# SELECT to_tsquery('english', 'The & Fat & Rats'); + to_tsquery + --------------- + 'fat' & 'rat' + (1 row) + ``` + +- to_tsvector([ config regconfig , ] document text) + + Description: Reduces document text to **tsvector**. + + Return type: tsvector + + Example: + + ```sql + MogDB=# SELECT to_tsvector('english', 'The Fat Rats'); + to_tsvector + ----------------- + 'fat':2 'rat':3 + (1 row) + ``` + +- to_tsvector_for_batch([ config regconfig , ] document text) + + Description: Reduces document text to **tsvector**. + + Return type: tsvector + + Example: + + ```sql + MogDB=# SELECT to_tsvector_for_batch('english', 'The Fat Rats'); + to_tsvector + ----------------- + 'fat':2 'rat':3 + (1 row) + ``` + +- ts_headline([ config regconfig, ] document text, query tsquery [, options text ]) + + Description: Highlights a query match. + + Return type: text + + Example: + + ```sql + MogDB=# SELECT ts_headline('x y z', 'z'::tsquery); + ts_headline + -------------- + x y z + (1 row) + ``` + +- ts_rank([ weights float4[], ] vector tsvector, query tsquery [, normalization integer ]) + + Description: Ranks documents for a query. + + Return type: float4 + + Example: + + ```sql + MogDB=# SELECT ts_rank('hello world'::tsvector, 'world'::tsquery); + ts_rank + ---------- + .0607927 + (1 row) + ``` + +- ts_rank_cd([ weights float4[], ] vector tsvector, query tsquery [, normalization integer ]) + + Description: Ranks documents for a query using cover density. + + Return type: float4 + + Example: + + ```sql + MogDB=# SELECT ts_rank_cd('hello world'::tsvector, 'world'::tsquery); + ts_rank_cd + ------------ + .1 + (1 row) + ``` + +- ts_rewrite(query tsquery, target tsquery, substitute tsquery) + + Description: Replaces a **tsquery**-type word. + + Return type: tsquery + + Example: + + ```sql + MogDB=# SELECT ts_rewrite('a & b'::tsquery, 'a'::tsquery, 'foo|bar'::tsquery); + ts_rewrite + ------------------------- + 'b' & ( 'foo' | 'bar' ) + (1 row) + ``` + +- ts_rewrite(query tsquery, select text) + + Description: Replaces **tsquery** data in the target with the result of a **SELECT** command. + + Return type: tsquery + + Example: + + ```sql + MogDB=# SELECT ts_rewrite('world'::tsquery, 'select ''world''::tsquery, ''hello''::tsquery'); + ts_rewrite + ------------ + 'hello' + (1 row) + ``` + +## Text Search Debugging Functions + +- ts_debug([ config regconfig, ] document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) + + Description: Tests a configuration. + + Return type: SETOF record + + Example: + + ```sql + MogDB=# SELECT ts_debug('english', 'The Brightest supernovaes'); + ts_debug + ----------------------------------------------------------------------------------- + (asciiword,"Word, all ASCII",The,{english_stem},english_stem,{}) + (blank,"Space symbols"," ",{},,) + (asciiword,"Word, all ASCII",Brightest,{english_stem},english_stem,{brightest}) + (blank,"Space symbols"," ",{},,) + (asciiword,"Word, all ASCII",supernovaes,{english_stem},english_stem,{supernova}) + (5 rows) + ``` + +- ts_lexize(dict regdictionary, token text) + + Description: Tests a data dictionary. + + Return type: text[] + + Example: + + ```sql + MogDB=# SELECT ts_lexize('english_stem', 'stars'); + ts_lexize + ----------- + {star} + (1 row) + ``` + +- ts_parse(parser_name text, document text, OUT tokid integer, OUT token text) + + Description: Tests a parser. + + Return type: SETOF record + + Example: + + ```sql + MogDB=# SELECT ts_parse('default', 'foo - bar'); + ts_parse + ----------- + (1,foo) + (12," ") + (12,"- ") + (1,bar) + (4 rows) + ``` + +- ts_parse(parser_oid oid, document text, OUT tokid integer, OUT token text) + + Description: Tests a parser. + + Return type: SETOF record + + Example: + + ```sql + MogDB=# SELECT ts_parse(3722, 'foo - bar'); + ts_parse + ----------- + (1,foo) + (12," ") + (12,"- ") + (1,bar) + (4 rows) + ``` + +- ts_token_type(parser_name text, OUT tokid integer, OUT alias text, OUT description text) + + Description: Obtains token types defined by a parser. + + Return type: SETOF record + + Example: + + ```sql + MogDB=# SELECT ts_token_type('default'); + ts_token_type + -------------------------------------------------------------- + (1,asciiword,"Word, all ASCII") + (2,word,"Word, all letters") + (3,numword,"Word, letters and digits") + (4,email,"Email address") + (5,url,URL) + (6,host,Host) + (7,sfloat,"Scientific notation") + (8,version,"Version number") + (9,hword_numpart,"Hyphenated word part, letters and digits") + (10,hword_part,"Hyphenated word part, all letters") + (11,hword_asciipart,"Hyphenated word part, all ASCII") + (12,blank,"Space symbols") + (13,tag,"XML tag") + (14,protocol,"Protocol head") + (15,numhword,"Hyphenated word, letters and digits") + (16,asciihword,"Hyphenated word, all ASCII") + (17,hword,"Hyphenated word, all letters") + (18,url_path,"URL path") + (19,file,"File or path name") + (20,float,"Decimal notation") + (21,int,"Signed integer") + (22,uint,"Unsigned integer") + (23,entity,"XML entity") + (23 rows) + ``` + +- ts_token_type(parser_oid oid, OUT tokid integer, OUT alias text, OUT description text) + + Description: Obtains token types defined by a parser. + + Return type: SETOF record + + Example: + + ```sql + MogDB=# SELECT ts_token_type(3722); + ts_token_type + -------------------------------------------------------------- + (1,asciiword,"Word, all ASCII") + (2,word,"Word, all letters") + (3,numword,"Word, letters and digits") + (4,email,"Email address") + (5,url,URL) + (6,host,Host) + (7,sfloat,"Scientific notation") + (8,version,"Version number") + (9,hword_numpart,"Hyphenated word part, letters and digits") + (10,hword_part,"Hyphenated word part, all letters") + (11,hword_asciipart,"Hyphenated word part, all ASCII") + (12,blank,"Space symbols") + (13,tag,"XML tag") + (14,protocol,"Protocol head") + (15,numhword,"Hyphenated word, letters and digits") + (16,asciihword,"Hyphenated word, all ASCII") + (17,hword,"Hyphenated word, all letters") + (18,url_path,"URL path") + (19,file,"File or path name") + (20,float,"Decimal notation") + (21,int,"Signed integer") + (22,uint,"Unsigned integer") + (23,entity,"XML entity") + (23 rows) + ``` + +- ts_stat(sqlquery text, [ weights text, ] OUT word text, OUT ndoc integer, OUT nentry integer) + + Description: Obtains statistics of a **tsvector** column. + + Return type: SETOF record + + Example: + + ```sql + MogDB=# SELECT ts_stat('select ''hello world''::tsvector'); + ts_stat + ------------- + (world,1,1) + (hello,1,1) + (2 rows) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/trigger-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/trigger-functions.md index 8929420c..3e530b6f 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/trigger-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/trigger-functions.md @@ -1,55 +1,55 @@ ---- -title: Trigger Functions -summary: Trigger Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Trigger Functions - -- pg_get_triggerdef(oid) - - Description: Obtains the definition information of a trigger. - - Parameter: OID of the trigger to be queried - - Return type: text - - Example: - - ```sql - MogDB=# select pg_get_triggerdef(oid) from pg_trigger; - pg_get_triggerdef - -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - CREATE TRIGGER tg1 BEFORE INSERT ON gtest26 FOR EACH STATEMENT EXECUTE PROCEDURE gtest_trigger_func() - CREATE TRIGGER tg03 AFTER INSERT ON gtest26 FOR EACH ROW WHEN ((new.a IS NOT NULL)) EXECUTE PROCEDURE gtest_trigger_func() - (2 rows) - ``` - -- pg_get_triggerdef(oid, boolean) - - Description: Obtains the definition information of a trigger. - - Parameter: OID of the trigger to be queried and whether it is displayed in pretty mode - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Boolean parameters take effect only when the WHEN condition is specified during trigger creation. - - Return type: text - - Example: - - ```sql - MogDB=# select pg_get_triggerdef(oid,true) from pg_trigger; - pg_get_triggerdef - -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - CREATE TRIGGER tg1 BEFORE INSERT ON gtest26 FOR EACH STATEMENT EXECUTE PROCEDURE gtest_trigger_func() - CREATE TRIGGER tg03 AFTER INSERT ON gtest26 FOR EACH ROW WHEN (new.a IS NOT NULL) EXECUTE PROCEDURE gtest_trigger_func() - (2 rows) - - MogDB=# select pg_get_triggerdef(oid,false) from pg_trigger; - pg_get_triggerdef - -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - CREATE TRIGGER tg1 BEFORE INSERT ON gtest26 FOR EACH STATEMENT EXECUTE PROCEDURE gtest_trigger_func() - CREATE TRIGGER tg03 AFTER INSERT ON gtest26 FOR EACH ROW WHEN ((new.a IS NOT NULL)) EXECUTE PROCEDURE gtest_trigger_func() - (2 rows) - ``` +--- +title: Trigger Functions +summary: Trigger Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Trigger Functions + +- pg_get_triggerdef(oid) + + Description: Obtains the definition information of a trigger. + + Parameter: OID of the trigger to be queried + + Return type: text + + Example: + + ```sql + MogDB=# select pg_get_triggerdef(oid) from pg_trigger; + pg_get_triggerdef + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + CREATE TRIGGER tg1 BEFORE INSERT ON gtest26 FOR EACH STATEMENT EXECUTE PROCEDURE gtest_trigger_func() + CREATE TRIGGER tg03 AFTER INSERT ON gtest26 FOR EACH ROW WHEN ((new.a IS NOT NULL)) EXECUTE PROCEDURE gtest_trigger_func() + (2 rows) + ``` + +- pg_get_triggerdef(oid, boolean) + + Description: Obtains the definition information of a trigger. + + Parameter: OID of the trigger to be queried and whether it is displayed in pretty mode + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Boolean parameters take effect only when the WHEN condition is specified during trigger creation. + + Return type: text + + Example: + + ```sql + MogDB=# select pg_get_triggerdef(oid,true) from pg_trigger; + pg_get_triggerdef + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + CREATE TRIGGER tg1 BEFORE INSERT ON gtest26 FOR EACH STATEMENT EXECUTE PROCEDURE gtest_trigger_func() + CREATE TRIGGER tg03 AFTER INSERT ON gtest26 FOR EACH ROW WHEN (new.a IS NOT NULL) EXECUTE PROCEDURE gtest_trigger_func() + (2 rows) + + MogDB=# select pg_get_triggerdef(oid,false) from pg_trigger; + pg_get_triggerdef + -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + CREATE TRIGGER tg1 BEFORE INSERT ON gtest26 FOR EACH STATEMENT EXECUTE PROCEDURE gtest_trigger_func() + CREATE TRIGGER tg03 AFTER INSERT ON gtest26 FOR EACH ROW WHEN ((new.a IS NOT NULL)) EXECUTE PROCEDURE gtest_trigger_func() + (2 rows) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/type-conversion-functions/type-conversion-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/type-conversion-functions/type-conversion-functions.md index f5bcd15c..5e12ce37 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/type-conversion-functions/type-conversion-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/type-conversion-functions/type-conversion-functions.md @@ -1,12 +1,12 @@ ---- -title: Type Conversion Functions -summary: Type Conversion Functions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Type Conversion Functions - -- **[Type Conversion Functions (1)](type-conversion-functions-1.md)** - +--- +title: Type Conversion Functions +summary: Type Conversion Functions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Type Conversion Functions + +- **[Type Conversion Functions (1)](type-conversion-functions-1.md)** + - **[Type Conversion Functions (2)](type-conversion-functions-2.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/window-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/window-functions.md index 4ca1e1d7..d4080ee1 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/window-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/window-functions.md @@ -1,626 +1,626 @@ ---- -title: Window Functions(Analysis Functions) -summary: Window Functions(Analysis Functions) -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Window Functions(Analysis Functions) - -Window functions and the **OVER** clause are used together. The **OVER** clause is used for grouping data and sorting the elements in a group. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** **order by** in a window function must be followed by a column name. If it is followed by a number, the number is processed as a constant value and the target column is not ranked. - -- RANK() - - Description: Generates non-consecutive sequence numbers for the values in each group. The same values have the same sequence number. - - Return type: bigint - - Example: - - ```sql - MogDB=# SELECT d_moy, d_fy_week_seq, rank() OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; - d_moy | d_fy_week_seq | rank - -------+---------------+------ - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 2 | 8 - 1 | 2 | 8 - 1 | 2 | 8 - 1 | 2 | 8 - 1 | 2 | 8 - 1 | 2 | 8 - 1 | 2 | 8 - 1 | 3 | 15 - 1 | 3 | 15 - 1 | 3 | 15 - 1 | 3 | 15 - 1 | 3 | 15 - 1 | 3 | 15 - 1 | 3 | 15 - 1 | 4 | 22 - 1 | 4 | 22 - 1 | 4 | 22 - 1 | 4 | 22 - 1 | 4 | 22 - 1 | 4 | 22 - 1 | 4 | 22 - 1 | 5 | 29 - 1 | 5 | 29 - 2 | 5 | 1 - 2 | 5 | 1 - 2 | 5 | 1 - 2 | 5 | 1 - 2 | 5 | 1 - 2 | 6 | 6 - 2 | 6 | 6 - 2 | 6 | 6 - 2 | 6 | 6 - 2 | 6 | 6 - 2 | 6 | 6 - 2 | 6 | 6 - (42 rows) - ``` - -- ROW_NUMBER() - - Description: Generates consecutive sequence numbers for the values in each group. The same values have different sequence numbers. - - Return type: bigint - - Example: - - ```sql - MogDB=# SELECT d_moy, d_fy_week_seq, Row_number() OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; - d_moy | d_fy_week_seq | row_number - -------+---------------+------------ - 1 | 1 | 1 - 1 | 1 | 2 - 1 | 1 | 3 - 1 | 1 | 4 - 1 | 1 | 5 - 1 | 1 | 6 - 1 | 1 | 7 - 1 | 2 | 8 - 1 | 2 | 9 - 1 | 2 | 10 - 1 | 2 | 11 - 1 | 2 | 12 - 1 | 2 | 13 - 1 | 2 | 14 - 1 | 3 | 15 - 1 | 3 | 16 - 1 | 3 | 17 - 1 | 3 | 18 - 1 | 3 | 19 - 1 | 3 | 20 - 1 | 3 | 21 - 1 | 4 | 22 - 1 | 4 | 23 - 1 | 4 | 24 - 1 | 4 | 25 - 1 | 4 | 26 - 1 | 4 | 27 - 1 | 4 | 28 - 1 | 5 | 29 - 1 | 5 | 30 - 2 | 5 | 1 - 2 | 5 | 2 - 2 | 5 | 3 - 2 | 5 | 4 - 2 | 5 | 5 - 2 | 6 | 6 - 2 | 6 | 7 - 2 | 6 | 8 - 2 | 6 | 9 - 2 | 6 | 10 - 2 | 6 | 11 - 2 | 6 | 12 - (42 rows) - ``` - -- DENSE_RANK() - - Description: Generates consecutive sequence numbers for the values in each group. The same values have the same sequence number. - - Return type: bigint - - Example: - - ```sql - MogDB=# SELECT d_moy, d_fy_week_seq, dense_rank() OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; - d_moy | d_fy_week_seq | dense_rank - -------+---------------+------------ - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 3 | 3 - 1 | 3 | 3 - 1 | 3 | 3 - 1 | 3 | 3 - 1 | 3 | 3 - 1 | 3 | 3 - 1 | 3 | 3 - 1 | 4 | 4 - 1 | 4 | 4 - 1 | 4 | 4 - 1 | 4 | 4 - 1 | 4 | 4 - 1 | 4 | 4 - 1 | 4 | 4 - 1 | 5 | 5 - 1 | 5 | 5 - 2 | 5 | 1 - 2 | 5 | 1 - 2 | 5 | 1 - 2 | 5 | 1 - 2 | 5 | 1 - 2 | 6 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - (42 rows) - ``` - -- PERCENT_RANK() - - Description: Generates corresponding sequence numbers for the values in each group. That is, the function calculates the value according to the formula Sequence number = (**rank** - 1) / (**total rows** - 1). **rank** is the corresponding sequence number generated based on the **RANK** function for the value and **totalrows** is the total number of elements in a group. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT d_moy, d_fy_week_seq, percent_rank() OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; - d_moy | d_fy_week_seq | percent_rank - -------+---------------+------------------ - 1 | 1 | 0 - 1 | 1 | 0 - 1 | 1 | 0 - 1 | 1 | 0 - 1 | 1 | 0 - 1 | 1 | 0 - 1 | 1 | 0 - 1 | 2 | .241379310344828 - 1 | 2 | .241379310344828 - 1 | 2 | .241379310344828 - 1 | 2 | .241379310344828 - 1 | 2 | .241379310344828 - 1 | 2 | .241379310344828 - 1 | 2 | .241379310344828 - 1 | 3 | .482758620689655 - 1 | 3 | .482758620689655 - 1 | 3 | .482758620689655 - 1 | 3 | .482758620689655 - 1 | 3 | .482758620689655 - 1 | 3 | .482758620689655 - 1 | 3 | .482758620689655 - 1 | 4 | .724137931034483 - 1 | 4 | .724137931034483 - 1 | 4 | .724137931034483 - 1 | 4 | .724137931034483 - 1 | 4 | .724137931034483 - 1 | 4 | .724137931034483 - 1 | 4 | .724137931034483 - 1 | 5 | .96551724137931 - 1 | 5 | .96551724137931 - 2 | 5 | 0 - 2 | 5 | 0 - 2 | 5 | 0 - 2 | 5 | 0 - 2 | 5 | 0 - 2 | 6 | .454545454545455 - 2 | 6 | .454545454545455 - 2 | 6 | .454545454545455 - 2 | 6 | .454545454545455 - 2 | 6 | .454545454545455 - 2 | 6 | .454545454545455 - 2 | 6 | .454545454545455 - (42 rows) - ``` - -- CUME_DIST() - - Description: Generates accumulative distribution sequence numbers for the values in each group. That is, the function calculates the value according to the following formula: Sequence number = Number of rows preceding or peer with current row/Total rows. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT d_moy, d_fy_week_seq, cume_dist() OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim e_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; - d_moy | d_fy_week_seq | cume_dist - -------+---------------+------------------ - 1 | 1 | .233333333333333 - 1 | 1 | .233333333333333 - 1 | 1 | .233333333333333 - 1 | 1 | .233333333333333 - 1 | 1 | .233333333333333 - 1 | 1 | .233333333333333 - 1 | 1 | .233333333333333 - 1 | 2 | .466666666666667 - 1 | 2 | .466666666666667 - 1 | 2 | .466666666666667 - 1 | 2 | .466666666666667 - 1 | 2 | .466666666666667 - 1 | 2 | .466666666666667 - 1 | 2 | .466666666666667 - 1 | 3 | .7 - 1 | 3 | .7 - 1 | 3 | .7 - 1 | 3 | .7 - 1 | 3 | .7 - 1 | 3 | .7 - 1 | 3 | .7 - 1 | 4 | .933333333333333 - 1 | 4 | .933333333333333 - 1 | 4 | .933333333333333 - 1 | 4 | .933333333333333 - 1 | 4 | .933333333333333 - 1 | 4 | .933333333333333 - 1 | 4 | .933333333333333 - 1 | 5 | 1 - 1 | 5 | 1 - 2 | 5 | .416666666666667 - 2 | 5 | .416666666666667 - 2 | 5 | .416666666666667 - 2 | 5 | .416666666666667 - 2 | 5 | .416666666666667 - 2 | 6 | 1 - 2 | 6 | 1 - 2 | 6 | 1 - 2 | 6 | 1 - 2 | 6 | 1 - 2 | 6 | 1 - 2 | 6 | 1 - (42 rows) - ``` - -- NTILE(num_buckets integer) - - Description: Equally allocates sequential data sets to the buckets whose quantity is specified by **num_buckets** according to **num_buckets integer** and allocates the bucket number to each row. Divide the partition as evenly as possible. - - Return type: integer - - Example: - - ```sql - MogDB=# SELECT d_moy, d_fy_week_seq, ntile(3) OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; - d_moy | d_fy_week_seq | ntile - -------+---------------+------- - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 3 | 2 - 1 | 3 | 2 - 1 | 3 | 2 - 1 | 3 | 2 - 1 | 3 | 2 - 1 | 3 | 2 - 1 | 3 | 3 - 1 | 4 | 3 - 1 | 4 | 3 - 1 | 4 | 3 - 1 | 4 | 3 - 1 | 4 | 3 - 1 | 4 | 3 - 1 | 4 | 3 - 1 | 5 | 3 - 1 | 5 | 3 - 2 | 5 | 1 - 2 | 5 | 1 - 2 | 5 | 1 - 2 | 5 | 1 - 2 | 5 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - 2 | 6 | 3 - 2 | 6 | 3 - 2 | 6 | 3 - 2 | 6 | 3 - (42 rows) - ``` - -- LAG(value any [, offset integer [, default any ]]) - - Description: Generates lag values for the corresponding values in each group. That is, the value of the row obtained by moving forward the row corresponding to the current value by **offset** (integer) is the sequence number. If the row does not exist after the moving, the result value is the default value. If omitted, **offset** defaults to **1** and **default** to **NULL**. - - Return type: same as the parameter type - - Example: - - ```sql - MogDB=# SELECT d_moy, d_fy_week_seq, lag(d_moy,3,null) OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; - d_moy | d_fy_week_seq | lag - -------+---------------+----- - 1 | 1 | - 1 | 1 | - 1 | 1 | - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 5 | 1 - 1 | 5 | 1 - 2 | 5 | - 2 | 5 | - 2 | 5 | - 2 | 5 | 2 - 2 | 5 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - 2 | 6 | 2 - (42 rows) - ``` - -- LEAD(value any [, offset integer [, default any ]]) - - Description: Generates leading values for the corresponding values in each group. That is, the value of the row obtained by moving backward the row corresponding to the current value by **offset** (integer) is the sequence number. If the row after the moving exceeds the total number of rows for the current group, the result value is the default value. If omitted, **offset** defaults to **1** and **default** to **NULL**. - - Return type: same as the parameter type - - Example: - - ```sql - MogDB=# SELECT d_moy, d_fy_week_seq, lead(d_fy_week_seq,2) OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; - d_moy | d_fy_week_seq | lead - -------+---------------+------ - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 2 - 1 | 1 | 2 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 2 | 2 - 1 | 2 | 3 - 1 | 2 | 3 - 1 | 3 | 3 - 1 | 3 | 3 - 1 | 3 | 3 - 1 | 3 | 3 - 1 | 3 | 3 - 1 | 3 | 4 - 1 | 3 | 4 - 1 | 4 | 4 - 1 | 4 | 4 - 1 | 4 | 4 - 1 | 4 | 4 - 1 | 4 | 4 - 1 | 4 | 5 - 1 | 4 | 5 - 1 | 5 | - 1 | 5 | - 2 | 5 | 5 - 2 | 5 | 5 - 2 | 5 | 5 - 2 | 5 | 6 - 2 | 5 | 6 - 2 | 6 | 6 - 2 | 6 | 6 - 2 | 6 | 6 - 2 | 6 | 6 - 2 | 6 | 6 - 2 | 6 | - 2 | 6 | - (42 rows) - ``` - -- FIRST_VALUE(value any) - - Description: Returns the first value of each group. - - Return type: same as the parameter type - - Example: - - ```sql - MogDB=# SELECT d_moy, d_fy_week_seq, first_value(d_fy_week_seq) OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; - d_moy | d_fy_week_seq | first_value - -------+---------------+------------- - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 5 | 1 - 1 | 5 | 1 - 2 | 5 | 5 - 2 | 5 | 5 - 2 | 5 | 5 - 2 | 5 | 5 - 2 | 5 | 5 - 2 | 6 | 5 - 2 | 6 | 5 - 2 | 6 | 5 - 2 | 6 | 5 - 2 | 6 | 5 - 2 | 6 | 5 - 2 | 6 | 5 - (42 rows) - ``` - -- LAST_VALUE(value any) - - Description: Returns the last value of each group. - - Return type: same as the parameter type - - Example: - - ```sql - MogDB=# SELECT d_moy, d_fy_week_seq, last_value(d_moy) OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 6 ORDER BY 1,2; - d_moy | d_fy_week_seq | last_value - -------+---------------+------------ - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 5 | 1 - 1 | 5 | 1 - 2 | 5 | 2 - 2 | 5 | 2 - 2 | 5 | 2 - 2 | 5 | 2 - 2 | 5 | 2 - (35 rows) - ``` - -- NTH_VALUE(value any, nth integer) - - Description: Returns the _n_th row for a group. If the row does not exist, **NULL** is returned by default. - - Return type: same as the parameter type - - Example: - - ```sql - MogDB=# SELECT d_moy, d_fy_week_seq, nth_value(d_fy_week_seq,6) OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 6 ORDER BY 1,2; - d_moy | d_fy_week_seq | nth_value - -------+---------------+----------- - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 1 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 2 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 3 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 4 | 1 - 1 | 5 | 1 - 1 | 5 | 1 - 2 | 5 | - 2 | 5 | - 2 | 5 | - 2 | 5 | - 2 | 5 | - (35 rows) - ``` +--- +title: Window Functions(Analysis Functions) +summary: Window Functions(Analysis Functions) +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Window Functions(Analysis Functions) + +Window functions and the **OVER** clause are used together. The **OVER** clause is used for grouping data and sorting the elements in a group. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** **order by** in a window function must be followed by a column name. If it is followed by a number, the number is processed as a constant value and the target column is not ranked. + +- RANK() + + Description: Generates non-consecutive sequence numbers for the values in each group. The same values have the same sequence number. + + Return type: bigint + + Example: + + ```sql + MogDB=# SELECT d_moy, d_fy_week_seq, rank() OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; + d_moy | d_fy_week_seq | rank + -------+---------------+------ + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 2 | 8 + 1 | 2 | 8 + 1 | 2 | 8 + 1 | 2 | 8 + 1 | 2 | 8 + 1 | 2 | 8 + 1 | 2 | 8 + 1 | 3 | 15 + 1 | 3 | 15 + 1 | 3 | 15 + 1 | 3 | 15 + 1 | 3 | 15 + 1 | 3 | 15 + 1 | 3 | 15 + 1 | 4 | 22 + 1 | 4 | 22 + 1 | 4 | 22 + 1 | 4 | 22 + 1 | 4 | 22 + 1 | 4 | 22 + 1 | 4 | 22 + 1 | 5 | 29 + 1 | 5 | 29 + 2 | 5 | 1 + 2 | 5 | 1 + 2 | 5 | 1 + 2 | 5 | 1 + 2 | 5 | 1 + 2 | 6 | 6 + 2 | 6 | 6 + 2 | 6 | 6 + 2 | 6 | 6 + 2 | 6 | 6 + 2 | 6 | 6 + 2 | 6 | 6 + (42 rows) + ``` + +- ROW_NUMBER() + + Description: Generates consecutive sequence numbers for the values in each group. The same values have different sequence numbers. + + Return type: bigint + + Example: + + ```sql + MogDB=# SELECT d_moy, d_fy_week_seq, Row_number() OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; + d_moy | d_fy_week_seq | row_number + -------+---------------+------------ + 1 | 1 | 1 + 1 | 1 | 2 + 1 | 1 | 3 + 1 | 1 | 4 + 1 | 1 | 5 + 1 | 1 | 6 + 1 | 1 | 7 + 1 | 2 | 8 + 1 | 2 | 9 + 1 | 2 | 10 + 1 | 2 | 11 + 1 | 2 | 12 + 1 | 2 | 13 + 1 | 2 | 14 + 1 | 3 | 15 + 1 | 3 | 16 + 1 | 3 | 17 + 1 | 3 | 18 + 1 | 3 | 19 + 1 | 3 | 20 + 1 | 3 | 21 + 1 | 4 | 22 + 1 | 4 | 23 + 1 | 4 | 24 + 1 | 4 | 25 + 1 | 4 | 26 + 1 | 4 | 27 + 1 | 4 | 28 + 1 | 5 | 29 + 1 | 5 | 30 + 2 | 5 | 1 + 2 | 5 | 2 + 2 | 5 | 3 + 2 | 5 | 4 + 2 | 5 | 5 + 2 | 6 | 6 + 2 | 6 | 7 + 2 | 6 | 8 + 2 | 6 | 9 + 2 | 6 | 10 + 2 | 6 | 11 + 2 | 6 | 12 + (42 rows) + ``` + +- DENSE_RANK() + + Description: Generates consecutive sequence numbers for the values in each group. The same values have the same sequence number. + + Return type: bigint + + Example: + + ```sql + MogDB=# SELECT d_moy, d_fy_week_seq, dense_rank() OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; + d_moy | d_fy_week_seq | dense_rank + -------+---------------+------------ + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 3 | 3 + 1 | 3 | 3 + 1 | 3 | 3 + 1 | 3 | 3 + 1 | 3 | 3 + 1 | 3 | 3 + 1 | 3 | 3 + 1 | 4 | 4 + 1 | 4 | 4 + 1 | 4 | 4 + 1 | 4 | 4 + 1 | 4 | 4 + 1 | 4 | 4 + 1 | 4 | 4 + 1 | 5 | 5 + 1 | 5 | 5 + 2 | 5 | 1 + 2 | 5 | 1 + 2 | 5 | 1 + 2 | 5 | 1 + 2 | 5 | 1 + 2 | 6 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + (42 rows) + ``` + +- PERCENT_RANK() + + Description: Generates corresponding sequence numbers for the values in each group. That is, the function calculates the value according to the formula Sequence number = (**rank** - 1) / (**total rows** - 1). **rank** is the corresponding sequence number generated based on the **RANK** function for the value and **totalrows** is the total number of elements in a group. + + Return type: double precision + + Example: + + ```sql + MogDB=# SELECT d_moy, d_fy_week_seq, percent_rank() OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; + d_moy | d_fy_week_seq | percent_rank + -------+---------------+------------------ + 1 | 1 | 0 + 1 | 1 | 0 + 1 | 1 | 0 + 1 | 1 | 0 + 1 | 1 | 0 + 1 | 1 | 0 + 1 | 1 | 0 + 1 | 2 | .241379310344828 + 1 | 2 | .241379310344828 + 1 | 2 | .241379310344828 + 1 | 2 | .241379310344828 + 1 | 2 | .241379310344828 + 1 | 2 | .241379310344828 + 1 | 2 | .241379310344828 + 1 | 3 | .482758620689655 + 1 | 3 | .482758620689655 + 1 | 3 | .482758620689655 + 1 | 3 | .482758620689655 + 1 | 3 | .482758620689655 + 1 | 3 | .482758620689655 + 1 | 3 | .482758620689655 + 1 | 4 | .724137931034483 + 1 | 4 | .724137931034483 + 1 | 4 | .724137931034483 + 1 | 4 | .724137931034483 + 1 | 4 | .724137931034483 + 1 | 4 | .724137931034483 + 1 | 4 | .724137931034483 + 1 | 5 | .96551724137931 + 1 | 5 | .96551724137931 + 2 | 5 | 0 + 2 | 5 | 0 + 2 | 5 | 0 + 2 | 5 | 0 + 2 | 5 | 0 + 2 | 6 | .454545454545455 + 2 | 6 | .454545454545455 + 2 | 6 | .454545454545455 + 2 | 6 | .454545454545455 + 2 | 6 | .454545454545455 + 2 | 6 | .454545454545455 + 2 | 6 | .454545454545455 + (42 rows) + ``` + +- CUME_DIST() + + Description: Generates accumulative distribution sequence numbers for the values in each group. That is, the function calculates the value according to the following formula: Sequence number = Number of rows preceding or peer with current row/Total rows. + + Return type: double precision + + Example: + + ```sql + MogDB=# SELECT d_moy, d_fy_week_seq, cume_dist() OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim e_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; + d_moy | d_fy_week_seq | cume_dist + -------+---------------+------------------ + 1 | 1 | .233333333333333 + 1 | 1 | .233333333333333 + 1 | 1 | .233333333333333 + 1 | 1 | .233333333333333 + 1 | 1 | .233333333333333 + 1 | 1 | .233333333333333 + 1 | 1 | .233333333333333 + 1 | 2 | .466666666666667 + 1 | 2 | .466666666666667 + 1 | 2 | .466666666666667 + 1 | 2 | .466666666666667 + 1 | 2 | .466666666666667 + 1 | 2 | .466666666666667 + 1 | 2 | .466666666666667 + 1 | 3 | .7 + 1 | 3 | .7 + 1 | 3 | .7 + 1 | 3 | .7 + 1 | 3 | .7 + 1 | 3 | .7 + 1 | 3 | .7 + 1 | 4 | .933333333333333 + 1 | 4 | .933333333333333 + 1 | 4 | .933333333333333 + 1 | 4 | .933333333333333 + 1 | 4 | .933333333333333 + 1 | 4 | .933333333333333 + 1 | 4 | .933333333333333 + 1 | 5 | 1 + 1 | 5 | 1 + 2 | 5 | .416666666666667 + 2 | 5 | .416666666666667 + 2 | 5 | .416666666666667 + 2 | 5 | .416666666666667 + 2 | 5 | .416666666666667 + 2 | 6 | 1 + 2 | 6 | 1 + 2 | 6 | 1 + 2 | 6 | 1 + 2 | 6 | 1 + 2 | 6 | 1 + 2 | 6 | 1 + (42 rows) + ``` + +- NTILE(num_buckets integer) + + Description: Equally allocates sequential data sets to the buckets whose quantity is specified by **num_buckets** according to **num_buckets integer** and allocates the bucket number to each row. Divide the partition as evenly as possible. + + Return type: integer + + Example: + + ```sql + MogDB=# SELECT d_moy, d_fy_week_seq, ntile(3) OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; + d_moy | d_fy_week_seq | ntile + -------+---------------+------- + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 3 | 2 + 1 | 3 | 2 + 1 | 3 | 2 + 1 | 3 | 2 + 1 | 3 | 2 + 1 | 3 | 2 + 1 | 3 | 3 + 1 | 4 | 3 + 1 | 4 | 3 + 1 | 4 | 3 + 1 | 4 | 3 + 1 | 4 | 3 + 1 | 4 | 3 + 1 | 4 | 3 + 1 | 5 | 3 + 1 | 5 | 3 + 2 | 5 | 1 + 2 | 5 | 1 + 2 | 5 | 1 + 2 | 5 | 1 + 2 | 5 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + 2 | 6 | 3 + 2 | 6 | 3 + 2 | 6 | 3 + 2 | 6 | 3 + (42 rows) + ``` + +- LAG(value any [, offset integer [, default any ]]) + + Description: Generates lag values for the corresponding values in each group. That is, the value of the row obtained by moving forward the row corresponding to the current value by **offset** (integer) is the sequence number. If the row does not exist after the moving, the result value is the default value. If omitted, **offset** defaults to **1** and **default** to **NULL**. + + Return type: same as the parameter type + + Example: + + ```sql + MogDB=# SELECT d_moy, d_fy_week_seq, lag(d_moy,3,null) OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; + d_moy | d_fy_week_seq | lag + -------+---------------+----- + 1 | 1 | + 1 | 1 | + 1 | 1 | + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 5 | 1 + 1 | 5 | 1 + 2 | 5 | + 2 | 5 | + 2 | 5 | + 2 | 5 | 2 + 2 | 5 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + 2 | 6 | 2 + (42 rows) + ``` + +- LEAD(value any [, offset integer [, default any ]]) + + Description: Generates leading values for the corresponding values in each group. That is, the value of the row obtained by moving backward the row corresponding to the current value by **offset** (integer) is the sequence number. If the row after the moving exceeds the total number of rows for the current group, the result value is the default value. If omitted, **offset** defaults to **1** and **default** to **NULL**. + + Return type: same as the parameter type + + Example: + + ```sql + MogDB=# SELECT d_moy, d_fy_week_seq, lead(d_fy_week_seq,2) OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; + d_moy | d_fy_week_seq | lead + -------+---------------+------ + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 2 + 1 | 1 | 2 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 2 | 2 + 1 | 2 | 3 + 1 | 2 | 3 + 1 | 3 | 3 + 1 | 3 | 3 + 1 | 3 | 3 + 1 | 3 | 3 + 1 | 3 | 3 + 1 | 3 | 4 + 1 | 3 | 4 + 1 | 4 | 4 + 1 | 4 | 4 + 1 | 4 | 4 + 1 | 4 | 4 + 1 | 4 | 4 + 1 | 4 | 5 + 1 | 4 | 5 + 1 | 5 | + 1 | 5 | + 2 | 5 | 5 + 2 | 5 | 5 + 2 | 5 | 5 + 2 | 5 | 6 + 2 | 5 | 6 + 2 | 6 | 6 + 2 | 6 | 6 + 2 | 6 | 6 + 2 | 6 | 6 + 2 | 6 | 6 + 2 | 6 | + 2 | 6 | + (42 rows) + ``` + +- FIRST_VALUE(value any) + + Description: Returns the first value of each group. + + Return type: same as the parameter type + + Example: + + ```sql + MogDB=# SELECT d_moy, d_fy_week_seq, first_value(d_fy_week_seq) OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 7 ORDER BY 1,2; + d_moy | d_fy_week_seq | first_value + -------+---------------+------------- + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 5 | 1 + 1 | 5 | 1 + 2 | 5 | 5 + 2 | 5 | 5 + 2 | 5 | 5 + 2 | 5 | 5 + 2 | 5 | 5 + 2 | 6 | 5 + 2 | 6 | 5 + 2 | 6 | 5 + 2 | 6 | 5 + 2 | 6 | 5 + 2 | 6 | 5 + 2 | 6 | 5 + (42 rows) + ``` + +- LAST_VALUE(value any) + + Description: Returns the last value of each group. + + Return type: same as the parameter type + + Example: + + ```sql + MogDB=# SELECT d_moy, d_fy_week_seq, last_value(d_moy) OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 6 ORDER BY 1,2; + d_moy | d_fy_week_seq | last_value + -------+---------------+------------ + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 5 | 1 + 1 | 5 | 1 + 2 | 5 | 2 + 2 | 5 | 2 + 2 | 5 | 2 + 2 | 5 | 2 + 2 | 5 | 2 + (35 rows) + ``` + +- NTH_VALUE(value any, nth integer) + + Description: Returns the _n_th row for a group. If the row does not exist, **NULL** is returned by default. + + Return type: same as the parameter type + + Example: + + ```sql + MogDB=# SELECT d_moy, d_fy_week_seq, nth_value(d_fy_week_seq,6) OVER(PARTITION BY d_moy ORDER BY d_fy_week_seq) FROM tpcds.date_dim WHERE d_moy < 4 AND d_fy_week_seq < 6 ORDER BY 1,2; + d_moy | d_fy_week_seq | nth_value + -------+---------------+----------- + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 2 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 3 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 4 | 1 + 1 | 5 | 1 + 1 | 5 | 1 + 2 | 5 | + 2 | 5 | + 2 | 5 | + 2 | 5 | + 2 | 5 | + (35 rows) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/xml-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/xml-functions.md index c4300f7e..5ba92c11 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/xml-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/functions-and-operators/xml-functions.md @@ -1,380 +1,380 @@ ---- -title: XML Types -summary: XML Types -author: zhang cuiping -date: 2023-04-07 ---- - -# XML Types - -The following functions are inherited from the open source PG9.2. - -- xmlparse ( { DOCUMENT | CONTENT } value) - - Description: Use the function xmlparse to generate xml-type values from character data. - - Parameter: data type is text - - Return value type: xml - - Example: - - ```sql - MogDB=# SELECT XMLPARSE (DOCUMENT 'Manual...'); - xmlparse - ---------------------------------------------------------- - Manual... - (1 row) - MogDB=# SELECT XMLPARSE (CONTENT 'abcbarfoo'); - xmlparse - --------------------------------- - abcbarfoo - (1 row) - ``` - -- xmlserialize( { DOCUMENT | CONTENT } value AS type ) - - Description: Use the function xmlserialize to generate a string from xml. - - Parameter: The type can be character, character varying or text or a variant of one of them. - - Return value type: Determined by the type of data entered in the function - - Example: - - ```sql - MogDB=# SELECT XMLSERIALIZE(CONTENT 'good' AS CHAR(10)); - xmlserialize - -------------- - good - (1 row) - MogDB=# SELECT xmlserialize(DOCUMENT 'bad' as text); - xmlserialize - ------------------ - bad - (1 row) - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - When a string value is converted to an xml type without XMLPARSE or XMLSERIALIZE, the selection of DOCUMENT and CONTENT is determined by the “XML option” session configuration parameter, which can be set by standard commands: - > - > ``` - > SET XML OPTION { DOCUMENT | CONTENT }; - > ``` - > - > Or use a similar syntax to set: - > - > ``` - > SET xmloption TO { DOCUMENT | CONTENT }; - > ``` - -- xmlcomment(text) - - Description: Creates an XML value and contains an XML comment with the specified text as the content. The text does not contain the “–” character and does not exist at the end of the “-” character. It conforms to the format requirements of XML comments. And when the parameter is empty, the result is also empty. - - Parameter: data type is text. - - Return value type: xml - - Example: - - ```sql - MogDB=# SELECT xmlcomment('hello'); - xmlcomment - -------------- - - ``` - -- xmlconcat(xml[, …]) - - Description: concatenates a list consisting of a single XML value into a single value, which contains an XML content fragment. The null value will be ignored, and the result will be null only when all parameters are null. - - Parameter: data type is xml. - - Return value type: xml - - Example: - - ```sql - MogDB=# SELECT xmlconcat('', 'foo'); - xmlconcat - ---------------------- - foo - ``` - -- xmlelement(name name [, xmlattributes(value [AS attname] [, … ])] [, content, …]) - - Description: Generates an XML element with the given name, attribute, and content. - - Return value type: xml - - Example: - - ```sql - MogDB=# SELECT xmlelement(name foo); - xmlelement - ------------ - - ``` - -- xmlforest(content [AS name] [, …]) - - Description: Generates an XML sequence of elements with a given name and content. - - Return value type: xml - - Example: - - ```sql - MogDB=# SELECT xmlforest('abc' AS foo, 123 AS bar); - xmlforest - ------------------------------ - abc123 - ``` - -- xmlpi(name target [, content]) - - Description: Creates an XML processing instruction. If the content is not empty, the content cannot contain character sequences. - - Return value type: xml - - Example: - - ```sql - MogDB=# SELECT xmlpi(name php, 'echo "hello world";'); - xmlpi - ----------------------------- - - ``` - -- xmlroot(xml, version text | no value [, standalone yes|no|no value]) - - Description: Modify the attribute of the root node of an XML value. If a version is specified, it will replace the value in the version declaration of the root node. If an independent setting is specified, it will replace the value in the independent declaration of the root node. - - Example: - - ```sql - MogDB=# SELECT xmlroot('abc',version '1.0', standalone yes); - xmlroot - ---------------------------------------- - - abc - ``` - -- xmlagg(xml) - - Description: This function is an aggregate function. It concatenates the input values of aggregate function calls and supports cross-row concatenation. - - Parameter: xml - - Return value type: xml - - Example: - - ```sql - MogDB=# CREATE TABLE xmltest ( - id int, - data xml - ); - MogDB=# INSERT INTO xmltest VALUES (1, 'one'); - INSERT 0 1 - MogDB=# INSERT INTO xmltest VALUES (2, 'two'); - INSERT 0 1 - MogDB=# SELECT xmlagg(data) FROM xmltest; - xmlagg - -------------------------------------- - onetwo - (1 row) - ``` - -- xmlexists(text passing [BY REF] xml [BY REF]) - - Description: evaluate an XPath 1.0 expression (the first parameter ), and use the passed XML value as its context item. If the evaluation result produces an empty node set, the function returns false. If it produces any other value, it returns true. If any parameter is null, the function returns null. The non-null value passed as a context item must be an XML document, not a content fragment or any non-XML value. - - Parameter: xml - - Return value type: bool - - Example: - - ```sql - MogDB=# SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'TorontoOttawa'); - xmlexists - ------------ - t - (1 row) - ``` - -- xml_is_well_formed(text) - - Description: Check whether the text is in the correct XML type format and the return value is of Boolean type. - - Parameter: text - - Return value type: bool - - Example: - - ```sql - MogDB=# SELECT xml_is_well_formed('<>'); - xml_is_well_formed - -------------------- - f - (1 row) - ``` - -- xml_is_well_formed_document(text) - - Description: Check whether the text is in the correct XML type format and the return value is of Boolean type. - - Parameter: text - - Return value type: bool - - Example: - - ```sql - MogDB=# SELECT xml_is_well_formed_document('bar'); - xml_is_well_formed_document - ----------------------------- - t - (1 row) - ``` - -- xml_is_well_formed_content(text) - - Description: Check whether the text is in the correct XML type format and the return value is of Boolean type. - - Parameter: text - - Return value type: bool - - Example: - - ```sql - MogDB=# select xml_is_well_formed_content('k'); - xml_is_well_formed_content - ---------------------------- - t - (1 row) - ``` - -- xpath(xpath, xml [, nsarray]) - - Description: The XPath 1.0 expression xpath (a text value ) is calculated on the XML value xml. It returns an array of XML values, which corresponds to the node collection generated by the XPath expression. If the XPath expression returns a scalar value instead of a node collection, it will return an array of single elements. - - The second parameter must be a well-formed XML document. Specifically, it must have a single root node element. - - The third optional parameter of this function is an array of namespace mappings. This array should be a two-dimensional text array with the length of its second axis equal to 2 (that is, it should be an array of arrays, each of which is composed of exactly 2 elements). The first element of each array item is the name (alias) of the namespace, and the second element is the URI of the namespace. The aliases provided in this array are not required to be the same as those used in the XML document itself (in other words, aliases are local in the XML document and in the xpath function environment). - - Return value type: xml - - Example: - - ```sql - MogDB=# SELECT xpath('/my:a/text()', 'test',ARRAY[ARRAY['my', 'http://example.com']]); - xpath - -------- - {test} - (1 row) - ``` - -- xpath_exists(xpath, xml [, nsarray]) - - Description: This function is a special form of xpath function. This function does not return a single XML value that satisfies the XPath 1.0 expression. It returns a Boolean value indicating whether the query is satisfied (specifically, whether it produces any value other than an empty node set ). This function is equivalent to the standard XMLEXISTS predicate, but it also provides support for a namespace mapping parameter. - - Return value type: bool - - Example: - - ```sql - MogDB=# SELECT xpath_exists('/my:a/text()', 'test',ARRAY[ARRAY['my', 'http://example.com']]); - xpath_exists - -------------- - t - (1 row) - ``` - -- query_to_xml(query text, nulls boolean, tableforest boolean, targetns text) - - Description: This function maps the contents of the relational table to XML values, which can be understood as the export function of XML. - - Return value type: xml - -- query_to_xmlschema(query text, nulls boolean, tableforest boolean, targetns text) - - Description: returns XML schema documents, which describe the mapping performed by the corresponding functions above. - -- query_to_xml_and_xmlschema(query text, nulls boolean, tableforest boolean, targetns text) - - Description: Generate XML data mapping and corresponding XML schema, and link the generated results together in a document. - -- cursor_to_xml(cursor refcursor, count int, nulls boolean,tableforest boolean, targetns text) - - Description: This function maps the contents of the relational table to XML values, which can be understood as the export function of XML. - - Return value type: xml - -- cursor_to_xmlschema(cursor refcursor, nulls boolean, tableforest boolean, targetns text) - - Description: returns XML schema documents, which describe the mapping performed by the corresponding functions above. - - Return value type: xml - -- schema_to_xml(schema name, nulls boolean, tableforest boolean, targetns text) - - Description: Map tables in the schema to XML values. - - Return value type: xml - -- schema_to_xmlschema(schema name, nulls boolean, tableforest boolean, targetns text) - - Description: Map tables in the schema to XML schema documents. - - Return value type: xml - -- schema_to_xml_and_xmlschema(schema name, nulls boolean, tableforest boolean, targetns text) - - Description: Map tables in the schema to XML values and schema documents. - - Return value type: xml - -- database_to_xml(nulls boolean, tableforest boolean, targetns text) - - Description: Map database tables to XML values. - - Return value type: xml - -- database_to_xmlschema(nulls boolean, tableforest boolean, targetns text) - - Description: Map database tables to XML schema documents. - - Return value type: xml - -- database_to_xml_and_xmlschema(nulls boolean, tableforest boolean, targetns text) - - Description: Map database tables to XML values and schema documents. - - Return value type: xml - -- table_to_xml(tbl regclass, nulls boolean, tableforest boolean, targetns text) - - Description: This function maps the contents of the relational table to XML values, which can be understood as the export function of XML. - - Return value type: xml - -- table_to_xmlschema(tbl regclass, nulls boolean, tableforest boolean, targetns text) - - Description: returns XML schema documents, which describe the mapping performed by the corresponding functions above. - - Return value type: xml - -- table_to_xml_and_xmlschema(tbl regclass, nulls boolean, tableforest boolean, targetns text) - - Description: Generate XML data mapping and corresponding XML schema, and link the generated results together in a document. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **explain:** - > - > - Xpath related functions only support xpath () and xpath_ Exists (). Because it uses the xpath language to query XML documents, these functions rely on the libxml2 library, which is only available in Xpath 1.0, so XPath is limited to 1.0. +--- +title: XML Types +summary: XML Types +author: zhang cuiping +date: 2023-04-07 +--- + +# XML Types + +The following functions are inherited from the open source PG9.2. + +- xmlparse ( { DOCUMENT | CONTENT } value) + + Description: Use the function xmlparse to generate xml-type values from character data. + + Parameter: data type is text + + Return value type: xml + + Example: + + ```sql + MogDB=# SELECT XMLPARSE (DOCUMENT 'Manual...'); + xmlparse + ---------------------------------------------------------- + Manual... + (1 row) + MogDB=# SELECT XMLPARSE (CONTENT 'abcbarfoo'); + xmlparse + --------------------------------- + abcbarfoo + (1 row) + ``` + +- xmlserialize( { DOCUMENT | CONTENT } value AS type ) + + Description: Use the function xmlserialize to generate a string from xml. + + Parameter: The type can be character, character varying or text or a variant of one of them. + + Return value type: xml + + Example: + + ```sql + MogDB=# SELECT XMLSERIALIZE(CONTENT 'good' AS CHAR(10)); + xmlserialize + -------------- + good + (1 row) + MogDB=# SELECT xmlserialize(DOCUMENT 'bad' as text); + xmlserialize + ------------------ + bad + (1 row) + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > + > - When a string value is converted to an xml type without XMLPARSE or XMLSERIALIZE, the selection of DOCUMENT and CONTENT is determined by the “XML option” session configuration parameter, which can be set by standard commands: + > + > ``` + > SET XML OPTION { DOCUMENT | CONTENT }; + > ``` + > + > Or use a similar syntax to set: + > + > ``` + > SET xmloption TO { DOCUMENT | CONTENT }; + > ``` + +- xmlcomment(text) + + Description: Creates an XML value and contains an XML comment with the specified text as the content. The text does not contain the “–” character and does not exist at the end of the “-” character. It conforms to the format requirements of XML comments. And when the parameter is empty, the result is also empty. + + Parameter: data type is text. + + Return value type: xml + + Example: + + ```sql + MogDB=# SELECT xmlcomment('hello'); + xmlcomment + -------------- + + ``` + +- xmlconcat(xml[, …]) + + Description: concatenates a list consisting of a single XML value into a single value, which contains an XML content fragment. The null value will be ignored, and the result will be null only when all parameters are null. + + Parameter: data type is xml. + + Return value type: xml + + Example: + + ```sql + MogDB=# SELECT xmlconcat('', 'foo'); + xmlconcat + ---------------------- + foo + ``` + +- xmlelement(name name [, xmlattributes(value [AS attname] [, … ])] [, content, …]) + + Description: Generates an XML element with the given name, attribute, and content. + + Return value type: xml + + Example: + + ```sql + MogDB=# SELECT xmlelement(name foo); + xmlelement + ------------ + + ``` + +- xmlforest(content [AS name] [, …]) + + Description: Generates an XML sequence of elements with a given name and content. + + Return value type: xml + + Example: + + ```sql + MogDB=# SELECT xmlforest('abc' AS foo, 123 AS bar); + xmlforest + ------------------------------ + abc123 + ``` + +- xmlpi(name target [, content]) + + Description: Creates an XML processing instruction. If the content is not empty, the content cannot contain character sequences. + + Return value type: xml + + Example: + + ```sql + MogDB=# SELECT xmlpi(name php, 'echo "hello world";'); + xmlpi + ----------------------------- + + ``` + +- xmlroot(xml, version text | no value [, standalone yes|no|no value]) + + Description: Modify the attribute of the root node of an XML value. If a version is specified, it will replace the value in the version declaration of the root node. If an independent setting is specified, it will replace the value in the independent declaration of the root node. + + Example: + + ```sql + MogDB=# SELECT xmlroot('abc',version '1.0', standalone yes); + xmlroot + ---------------------------------------- + + abc + ``` + +- xmlagg(xml) + + Description: This function is an aggregate function. It concatenates the input values of aggregate function calls and supports cross-row concatenation. + + Parameter: xml + + Return value type: xml + + Example: + + ```sql + MogDB=# CREATE TABLE xmltest ( + id int, + data xml + ); + MogDB=# INSERT INTO xmltest VALUES (1, 'one'); + INSERT 0 1 + MogDB=# INSERT INTO xmltest VALUES (2, 'two'); + INSERT 0 1 + MogDB=# SELECT xmlagg(data) FROM xmltest; + xmlagg + -------------------------------------- + onetwo + (1 row) + ``` + +- xmlexists(text passing [BY REF] xml [BY REF]) + + Description: evaluate an XPath 1.0 expression (the first parameter ), and use the passed XML value as its context item. If the evaluation result produces an empty node set, the function returns false. If it produces any other value, it returns true. If any parameter is null, the function returns null. The non-null value passed as a context item must be an XML document, not a content fragment or any non-XML value. + + Parameter: xml + + Return value type: bool + + Example: + + ```sql + MogDB=# SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'TorontoOttawa'); + xmlexists + ------------ + t + (1 row) + ``` + +- xml_is_well_formed(text) + + Description: Check whether the text is in the correct XML type format and the return value is of Boolean type. + + Parameter: text + + Return value type: bool + + Example: + + ```sql + MogDB=# SELECT xml_is_well_formed('<>'); + xml_is_well_formed + -------------------- + f + (1 row) + ``` + +- xml_is_well_formed_document(text) + + Description: Check whether the text is in the correct XML type format and the return value is of Boolean type. + + Parameter: text + + Return value type: bool + + Example: + + ```sql + MogDB=# SELECT xml_is_well_formed_document('bar'); + xml_is_well_formed_document + ----------------------------- + t + (1 row) + ``` + +- xml_is_well_formed_content(text) + + Description: Check whether the text is in the correct XML type format and the return value is of Boolean type. + + Parameter: text + + Return value type: bool + + Example: + + ```sql + MogDB=# select xml_is_well_formed_content('k'); + xml_is_well_formed_content + ---------------------------- + t + (1 row) + ``` + +- xpath(xpath, xml [, nsarray]) + + Description: The XPath 1.0 expression xpath (a text value ) is calculated on the XML value xml. It returns an array of XML values, which corresponds to the node collection generated by the XPath expression. If the XPath expression returns a scalar value instead of a node collection, it will return an array of single elements. + + The second parameter must be a well-formed XML document. Specifically, it must have a single root node element. + + The third optional parameter of this function is an array of namespace mappings. This array should be a two-dimensional text array with the length of its second axis equal to 2 (that is, it should be an array of arrays, each of which is composed of exactly 2 elements). The first element of each array item is the name (alias) of the namespace, and the second element is the URI of the namespace. The aliases provided in this array are not required to be the same as those used in the XML document itself (in other words, aliases are local in the XML document and in the xpath function environment). + + Return value type: xml + + Example: + + ```sql + MogDB=# SELECT xpath('/my:a/text()', 'test',ARRAY[ARRAY['my', 'http://example.com']]); + xpath + -------- + {test} + (1 row) + ``` + +- xpath_exists(xpath, xml [, nsarray]) + + Description: This function is a special form of xpath function. This function does not return a single XML value that satisfies the XPath 1.0 expression. It returns a Boolean value indicating whether the query is satisfied (specifically, whether it produces any value other than an empty node set ). This function is equivalent to the standard XMLEXISTS predicate, but it also provides support for a namespace mapping parameter. + + Return value type: bool + + Example: + + ```sql + MogDB=# SELECT xpath_exists('/my:a/text()', 'test',ARRAY[ARRAY['my', 'http://example.com']]); + xpath_exists + -------------- + t + (1 row) + ``` + +- query_to_xml(query text, nulls boolean, tableforest boolean, targetns text) + + Description: This function maps the contents of the relational table to XML values, which can be understood as the export function of XML. + + Return value type: xml + +- query_to_xmlschema(query text, nulls boolean, tableforest boolean, targetns text) + + Description: returns XML schema documents, which describe the mapping performed by the corresponding functions above. + +- query_to_xml_and_xmlschema(query text, nulls boolean, tableforest boolean, targetns text) + + Description: Generate XML data mapping and corresponding XML schema, and link the generated results together in a document. + +- cursor_to_xml(cursor refcursor, count int, nulls boolean,tableforest boolean, targetns text) + + Description: This function maps the contents of the relational table to XML values, which can be understood as the export function of XML. + + Return value type: xml + +- cursor_to_xmlschema(cursor refcursor, nulls boolean, tableforest boolean, targetns text) + + Description: returns XML schema documents, which describe the mapping performed by the corresponding functions above. + + Return value type: xml + +- schema_to_xml(schema name, nulls boolean, tableforest boolean, targetns text) + + Description: Map tables in the schema to XML values. + + Return value type: xml + +- schema_to_xmlschema(schema name, nulls boolean, tableforest boolean, targetns text) + + Description: Map tables in the schema to XML schema documents. + + Return value type: xml + +- schema_to_xml_and_xmlschema(schema name, nulls boolean, tableforest boolean, targetns text) + + Description: Map tables in the schema to XML values and schema documents. + + Return value type: xml + +- database_to_xml(nulls boolean, tableforest boolean, targetns text) + + Description: Map database tables to XML values. + + Return value type: xml + +- database_to_xmlschema(nulls boolean, tableforest boolean, targetns text) + + Description: Map database tables to XML schema documents. + + Return value type: xml + +- database_to_xml_and_xmlschema(nulls boolean, tableforest boolean, targetns text) + + Description: Map database tables to XML values and schema documents. + + Return value type: xml + +- table_to_xml(tbl regclass, nulls boolean, tableforest boolean, targetns text) + + Description: This function maps the contents of the relational table to XML values, which can be understood as the export function of XML. + + Return value type: xml + +- table_to_xmlschema(tbl regclass, nulls boolean, tableforest boolean, targetns text) + + Description: returns XML schema documents, which describe the mapping performed by the corresponding functions above. + + Return value type: xml + +- table_to_xml_and_xmlschema(tbl regclass, nulls boolean, tableforest boolean, targetns text) + + Description: Generate XML data mapping and corresponding XML schema, and link the generated results together in a document. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **explain:** + > + > - Xpath related functions only support xpath () and xpath_ Exists (). Because it uses the xpath language to query XML documents, these functions rely on the libxml2 library, which is only available in Xpath 1.0, so XPath is limited to 1.0. > - Xquery, xml extension, and xslt are not supported. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/AI-features.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/AI-features.md index a334a2ab..52f7e324 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/AI-features.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/AI-features.md @@ -1,64 +1,100 @@ ---- -title: AI Features -summary: AI Features -author: Zhang Cuiping -date: 2021-11-08 ---- - -# AI Features - -## enable_hypo_index - -**Parameter description**: Specifies whether the database optimizer considers the created virtual index when executing the **EXPLAIN** statement. By executing **EXPLAIN** on a specific query statement, you can evaluate whether the index can improve the execution efficiency of the query statement based on the execution plan provided by the optimizer. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: Boolean - -- **on** indicates that a virtual index is created during **EXPLAIN** execution. -- **off** indicates that no virtual index is created during **EXPLAIN** execution. - -**Default value**: **off** - -## db4ai_snapshot_mode - -**Parameter description**: There are two snapshot modes: MSS (materialized mode, storing data entities) and CSS (computing mode, storing incremental information). - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: a string, which can be **MSS** or **CSS** - -- **MSS** indicates the materialized mode. The DB4AI stores data entities when snapshots are created. -- **CSS** indicates the computing mode. The DB4AI stores incremental information when creating snapshots. - -**Default value:** **MSS** - -## db4ai_snapshot_version_delimiter - -**Parameter description**: Specifies the delimiter for the snapshot version of a data table. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: a string, consisting of one or more characters. - -**Default value**: **@** - -## db4ai_snapshot_version_separator - -**Parameter description**: Specifies the subversion delimiter of a data table snapshot. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: a string, consisting of one or more characters. - -**Default value**: . - -## unix_socket_directory - -**Parameter description:** Specifies the path for storing files in the unix_socket communication mode. You can set this parameter only in the configuration file **postgresql.conf**. Before enabling the fenced mode, you need to set this GUC parameter. - -This parameter is a **POSTMASTER** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: a string of 0 or more characters - -**Default value:** **"** +--- +title: AI Features +summary: AI Features +author: Zhang Cuiping +date: 2021-11-08 +--- + +# AI Features + +## enable_hypo_index + +**Parameter description**: Specifies whether the database optimizer considers the created virtual index when executing the **EXPLAIN** statement. By executing **EXPLAIN** on a specific query statement, you can evaluate whether the index can improve the execution efficiency of the query statement based on the execution plan provided by the optimizer. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- **on** indicates that a virtual index is created during **EXPLAIN** execution. +- **off** indicates that no virtual index is created during **EXPLAIN** execution. + +**Default value**: **off** + +## db4ai_snapshot_mode + +**Parameter description**: There are two snapshot modes: MSS (materialized mode, storing data entities) and CSS (computing mode, storing incremental information). + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: a string, which can be **MSS** or **CSS** + +- **MSS** indicates the materialized mode. The DB4AI stores data entities when snapshots are created. +- **CSS** indicates the computing mode. The DB4AI stores incremental information when creating snapshots. + +**Default value:** **MSS** + +## db4ai_snapshot_version_delimiter + +**Parameter description**: Specifies the delimiter for the snapshot version of a data table. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: a string, consisting of one or more characters. + +**Default value**: **@** + +## db4ai_snapshot_version_separator + +**Parameter description**: Specifies the subversion delimiter of a data table snapshot. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: a string, consisting of one or more characters. + +**Default value**: . + +## unix_socket_directory + +**Parameter description:** Specifies the path for storing files in the unix_socket communication mode. You can set this parameter only in the configuration file **postgresql.conf**. Before enabling the fenced mode, you need to set this GUC parameter. + +This parameter is a **POSTMASTER** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: a string of 0 or more characters + +**Default value:** **"** + +## enable_ai_stats + +**Parameter description:** This parameter specifies whether to create or use smart statistics. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +**Default value:** on + +## enable_cachedplan_mgr + +**Parameter description:** This parameter enables or disables adaptive schedule selection. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +**Default value:** on + +## multi_stats_type + +**Parameter description:** This parameter is used to specify the category of statistics to be created when the parameter enable_ai_stats is on. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Enumeration type, valid values are “BAYESNET”, “MCV”, “ALL”. + +- "BAYESNET":Only smart statistics are created. + +- "MCV":Only traditional statistics are created. + +- "ALL":Create both traditional statistics and smart statistics. + +**Default value:** "BAYESNET" diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/DCF-parameters-settings.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/DCF-parameters-settings.md index 485d3251..b7282752 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/DCF-parameters-settings.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/DCF-parameters-settings.md @@ -360,3 +360,18 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided **Value range**: enumerated type. The value can be **700** or **750**. **Default value**: **700** + +## dcf_majority_groups + +**Parameter description**: DCF Policized Majority Function Setting. For a group that requires this parameter to be configured, at least one spare machine within the group receives logs. That is, there is a synchronized backup machine in the group. If you add or delete nodes to a DCF instance or adjust the group value of nodes in the instance, you need to modify this configuration synchronously. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: String + +- off: “”, an empty string indicates that the strategized majority feature is off. +- on: Configure valid group values, use comma to separate the group values, the group values should exist in dcf_config. For example, if the group value is 1 and 2 respectively, it can be set to “1,2” when added to DCF's policy-based majority configuration; if you configure a group value that does not exist in dcf_config or other characters, DCF will consider the configured group invalid. + +**Default value**: Empty string + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-warning.gif) **Note:** If all the nodes in a group fail after configuring the parameter, you need to remove the group from this parameter list when doing node build related operations (node repair, node replacement without ip change) on one of the nodes, and then configure the group to this parameter again after the nodes return to normal. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/HyperLogLog.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/HyperLogLog.md index bc377e4f..3194bf1b 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/HyperLogLog.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/HyperLogLog.md @@ -1,101 +1,101 @@ ---- -title: HyperLogLog -summary: HyperLogLog -author: Zhang Cuiping -date: 2021-11-08 ---- - -# HyperLogLog - -## hll_default_log2m - -**Parameter description**: Specifies the number of buckets for HLL data. The number of buckets affects the precision of distinct values calculated by HLL. The more buckets there are, the smaller the deviation is. The deviation range is as follows: [-1.04/2^log2m\*1⁄2^,+1.04/2^log2m*1⁄2^] - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from 10 to 16 - -**Default value**: **14** - -## hll_default_log2explicit - -**Parameter description**: Specifies the default threshold for switching from the explicit mode to the sparse mode. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to 12 The value **0** indicates that the explicit mode is skipped. The value 1 to 12 indicates that the mode is switched when the number of distinct values reaches 2^hll_default_log2explicit^. - -**Default value**: **10** - -## hll_default_log2sparse - -**Parameter description**: Specifies the default threshold for switching from the **sparse** mode to the **full** mode. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to 14 The value **0** indicates that the explicit mode is skipped. The value 1 to 14 indicates that the mode is switched when the number of distinct values reaches 2^hll_default_log2sparse^. - -**Default value**: **12** - -## hll_duplicate_check - -**Parameter description**: Specifies whether duplicatecheck is enabled by default. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: **0** or **1** **0**: disabled; **1**: enabled - -**Default value**: **0** - -## hll_default_regwidth (Discarded) - -**Parameter description**: Specifies the number of bits in each bucket for HLL data. A larger value indicates more memory occupied by HLL. **hll_default_regwidth** and **hll_default_log2m** determine the maximum number of distinct values that can be calculated by HLL. Currently, **regwidth** is set to a fixed value and is no longer used. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from 1 to 5 - -**Default value**: **5** - -## hll_default_expthresh (Discarded) - -**Parameter description**: Specifies the default threshold for switching from the **explicit** mode to the **sparse** mode. Currently, the **hll_default_log2explicit** parameter is used to replace the similar function. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from -1 to 7 **-1** indicates the auto mode; **0** indicates that the **explicit** mode is skipped; a value from 1 to 7 indicates that the mode is switched when the number of distinct values reaches 2hll_default_expthresh. - -**Default value**: **-1** - -## hll_default_sparseon (Discarded) - -**Parameter description**: Specifies whether to enable the **sparse** mode by default. Currently, the **hll_default_log2sparse** parameter is used to replace the similar function. When **hll_default_log2sparse** is set to **0**, the **sparse** mode is disabled. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: **0** or **1** **0** indicates that the **sparse** mode is disabled by default. **1** indicates that the **sparse** mode is enabled by default. - -**Default value**: **1** - -## hll_max_sparse (Discarded) - -**Parameter description**: Specifies the size of **max_sparse**. Currently, the **hll_default_log2sparse** parameter is used to replace the similar function. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from -1 to 2147483647 - -**Default value**: **-1** - -## enable_compress_hll (Discarded) - -**Parameter description**: Specifies whether to enable memory optimization for HLL. Currently, the HLL memory has been optimized, and this parameter is no longer used. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: Boolean - -- **on** or **true** indicates that memory optimization is enabled. -- **off** or **false** indicates that memory optimization is disabled. - -**Default value**: **off** +--- +title: HyperLogLog +summary: HyperLogLog +author: Zhang Cuiping +date: 2021-11-08 +--- + +# HyperLogLog + +## hll_default_log2m + +**Parameter description**: Specifies the number of buckets for HLL data. The number of buckets affects the precision of distinct values calculated by HLL. The more buckets there are, the smaller the deviation is. The deviation range is as follows: [-1.04/2^log2m\*1⁄2^,+1.04/2^log2m*1⁄2^] + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from 10 to 16 + +**Default value**: **14** + +## hll_default_log2explicit + +**Parameter description**: Specifies the default threshold for switching from the explicit mode to the sparse mode. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from 0 to 12 The value **0** indicates that the explicit mode is skipped. The value 1 to 12 indicates that the mode is switched when the number of distinct values reaches 2^hll_default_log2explicit^. + +**Default value**: **10** + +## hll_default_log2sparse + +**Parameter description**: Specifies the default threshold for switching from the **sparse** mode to the **full** mode. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from 0 to 14 The value **0** indicates that the explicit mode is skipped. The value 1 to 14 indicates that the mode is switched when the number of distinct values reaches 2^hll_default_log2sparse^. + +**Default value**: **12** + +## hll_duplicate_check + +**Parameter description**: Specifies whether duplicatecheck is enabled by default. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: **0** or **1** **0**: disabled; **1**: enabled + +**Default value**: **0** + +## hll_default_regwidth (Discarded) + +**Parameter description**: Specifies the number of bits in each bucket for HLL data. A larger value indicates more memory occupied by HLL. **hll_default_regwidth** and **hll_default_log2m** determine the maximum number of distinct values that can be calculated by HLL. Currently, **regwidth** is set to a fixed value and is no longer used. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from 1 to 5 + +**Default value**: **5** + +## hll_default_expthresh (Discarded) + +**Parameter description**: Specifies the default threshold for switching from the **explicit** mode to the **sparse** mode. Currently, the **hll_default_log2explicit** parameter is used to replace the similar function. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from -1 to 7 **-1** indicates the auto mode; **0** indicates that the **explicit** mode is skipped; a value from 1 to 7 indicates that the mode is switched when the number of distinct values reaches 2hll_default_expthresh. + +**Default value**: **-1** + +## hll_default_sparseon (Discarded) + +**Parameter description**: Specifies whether to enable the **sparse** mode by default. Currently, the **hll_default_log2sparse** parameter is used to replace the similar function. When **hll_default_log2sparse** is set to **0**, the **sparse** mode is disabled. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: **0** or **1** **0** indicates that the **sparse** mode is disabled by default. **1** indicates that the **sparse** mode is enabled by default. + +**Default value**: **1** + +## hll_max_sparse (Discarded) + +**Parameter description**: Specifies the size of **max_sparse**. Currently, the **hll_default_log2sparse** parameter is used to replace the similar function. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from -1 to 2147483647 + +**Default value**: **-1** + +## enable_compress_hll (Discarded) + +**Parameter description**: Specifies whether to enable memory optimization for HLL. Currently, the HLL memory has been optimized, and this parameter is no longer used. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- **on** or **true** indicates that memory optimization is enabled. +- **off** or **false** indicates that memory optimization is disabled. + +**Default value**: **off** diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/alarm-detection.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/alarm-detection.md index 34b2e4e8..a9351031 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/alarm-detection.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/alarm-detection.md @@ -1,73 +1,73 @@ ---- -title: Alarm Detection -summary: Alarm Detection -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Alarm Detection - -During the running of the MogDB, error scenarios can be detected and informed to users in time. - -## enable_alarm - -**Parameter description**: Specifies whether to enable the alarm detection thread to detect fault scenarios that may occur in the database. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](./appendix.md). - -**Value range**: Boolean - -- **on** indicates that the alarm detection thread is enabled. -- **off** indicates that the alarm detection thread is disabled. - -**Default value**: **on** - -## connection_alarm_rate - -**Parameter description**: Specifies the ratio restriction on the maximum number of allowed parallel connections to the database. The maximum number of concurrent connections to the database is **max_connections** x **connection_alarm_rate**. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range:** a floating point number ranging from 0.0 to 1.0 - -**Default value**: **0.9** - -## alarm_report_interval - -**Parameter description**: specifies the interval at which an alarm is reported. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer. The unit is s. - -**Default value:** **10** - -## alarm_component - -**Parameter description**: Certain alarms are suppressed during alarm reporting. That is, the same alarm will not be repeatedly reported by an instance within the period specified by **alarm_report_interval**. Its default value is **10s**. In this case, the parameter specifies the location of the alarm component that is used to process alarm information. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: a string - -**Default value**: **/opt/snas/bin/snas_cm_cmd** - -## table_skewness_warning_threshold - -**Parameter description**: Specifies the threshold for triggering a table skew alarm. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: a floating point number ranging from 0 to 1 - -**Default value**: **1** - -## table_skewness_warning_rows - -**Parameter description**: Specifies the minimum number of rows for triggering a table skew alarm. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to *INT_MAX* - -**Default value**: **100000** +--- +title: Alarm Detection +summary: Alarm Detection +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Alarm Detection + +During the running of the MogDB, error scenarios can be detected and informed to users in time. + +## enable_alarm + +**Parameter description**: Specifies whether to enable the alarm detection thread to detect fault scenarios that may occur in the database. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](./appendix.md). + +**Value range**: Boolean + +- **on** indicates that the alarm detection thread is enabled. +- **off** indicates that the alarm detection thread is disabled. + +**Default value**: **on** + +## connection_alarm_rate + +**Parameter description**: Specifies the ratio restriction on the maximum number of allowed parallel connections to the database. The maximum number of concurrent connections to the database is **max_connections** x **connection_alarm_rate**. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range:** a floating point number ranging from 0.0 to 1.0 + +**Default value**: **0.9** + +## alarm_report_interval + +**Parameter description**: specifies the interval at which an alarm is reported. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer. The unit is s. + +**Default value:** **10** + +## alarm_component + +**Parameter description**: Certain alarms are suppressed during alarm reporting. That is, the same alarm will not be repeatedly reported by an instance within the period specified by **alarm_report_interval**. Its default value is **10s**. In this case, the parameter specifies the location of the alarm component that is used to process alarm information. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: a string + +**Default value**: **/opt/snas/bin/snas_cm_cmd** + +## table_skewness_warning_threshold + +**Parameter description**: Specifies the threshold for triggering a table skew alarm. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: a floating point number ranging from 0 to 1 + +**Default value**: **1** + +## table_skewness_warning_rows + +**Parameter description**: Specifies the minimum number of rows for triggering a table skew alarm. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from 0 to *INT_MAX* + +**Default value**: **100000** diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/audit-switch.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/audit-switch.md index 5ed8a0ff..6d03504f 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/audit-switch.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/audit-switch.md @@ -30,6 +30,8 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided **Default value:** **pg_audit** If **om** is used for MogDB deployment, audit logs are stored in **$GAUSSLOG/pg_audit/Instance name**. +PTK will optimize the value of this parameter according to the server configuration when installing the database, please refer to [Recommended Value of GUC Parameters](https://docs.mogdb.io/en/ptk/v2.0/ref-recommend-guc) for more information. + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** > > - You need to set different audit file directories for different DNs. Otherwise, audit logs will be abnormal. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/auditing.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/auditing.md index 2f804a92..c8d97b93 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/auditing.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/auditing.md @@ -1,12 +1,12 @@ ---- -title: Auditing -summary: Auditing -author: zhang cuiping -date: 2023-04-07 ---- - -# Auditing - -- **[Audit Switch](audit-switch.md)** -- **[User and Permission Audit](user-and-permission-audit.md)** +--- +title: Auditing +summary: Auditing +author: zhang cuiping +date: 2023-04-07 +--- + +# Auditing + +- **[Audit Switch](audit-switch.md)** +- **[User and Permission Audit](user-and-permission-audit.md)** - **[Operation Auditing](operation-audit.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/operation-audit.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/operation-audit.md index 95b14d5f..722ce369 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/operation-audit.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/operation-audit.md @@ -1,185 +1,214 @@ ---- -title: Operation Auditing -summary: Operation Auditing -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Operation Auditing - -## audit_system_object - -**Parameter description**: Specifies whether to audit the CREATE, DROP, and ALTER operations on MogDB database objects. MogDB database objects include DATABASE, USER, Schema, and TABLE. The operations on the database object can be audited by changing the value of this parameter. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 0 to 67108863 - -- **0** indicates that the function of auditing the CREATE, DROP, and ALTER operations on MogDB database objects is disabled. -- Other values indicate that the CREATE, DROP, and ALTER operations on a certain or some MogDB database objects are audited. - -**Value description:** - -The value of this parameter is calculated by 26 binary bits. The 26 binary bits represent 26 types of MogDB database objects. If the corresponding binary bit is set to **0**, the CREATE, DROP, and ALTER operations on corresponding database objects are not audited. If it is set to **1**, the CREATE, DROP, and ALTER operations are audited. For details about the audit contents represented by these 19 binary bits, see [Table 1](#audit_system_object). - -**Default value**: **12295** - -**Table 1** Meaning of each value for the **audit_system_object** parameter - -| **Binary Bit** | Description | Value Description | -| -------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| Bit 0 | Whether to audit the CREATE, DROP, and ALTER operations on databases. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | -| Bit 1 | Whether to audit the CREATE, DROP, and ALTER operations on schemas. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | -| Bit 2 | Whether to audit the CREATE, DROP, and ALTER operations on users. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | -| Bit 3 | Whether to audit the CREATE, DROP, ALTER, and TRUNCATE operations on tables. | **0** indicates that the CREATE, DROP, ALTER, and TRUNCATE operations on these objects are not audited.
**1** indicates that the CREATE, DROP, ALTER, and TRUNCATE operations on these objects are audited. | -| Bit 4 | Whether to audit the CREATE, DROP, and ALTER operations on indexes. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | -| Bit 5 | Whether to audit the CREATE and DROP operations on views. | **0** indicates that the CREATE and DROP operations on these objects are not audited.
**1** indicates that the CREATE and DROP operations on these objects are audited. | -| Bit 6 | Whether to audit the CREATE, DROP, and ALTER operations on triggers. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | -| Bit 7 | Whether to audit the CREATE, DROP, and ALTER operations on procedures/functions. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | -| Bit 8 | Whether to audit the CREATE, DROP, and ALTER operations on tablespaces. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | -| Bit 9 | Whether to audit the CREATE, DROP, and ALTER operations on resource pools. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | -| Bit 10 | Whether to audit the CREATE, DROP, and ALTER operations on workloads. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | -| Bit 11 | Whether to audit the CREATE, DROP, and ALTER operations on data sources. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | -| Bit 12 | Reserved. | - | -| Bit 13 | Whether to audit the CREATE, DROP, and ALTER operations on ROW LEVEL SECURITY objects. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | -| Bit 14 | Whether to audit the CREATE, DROP, and ALTER operations on types. | **0** indicates that the CREATE, DROP, and ALTER operations on types are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on types are audited. | -| Bit 15 | Whether to audit the CREATE, DROP, and ALTER operations on text search objects (CONFIGURATION and DICTIONARY). | **0** indicates that the CREATE, DROP, and ALTER operations on text search objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on text search objects are audited. | -| Bit 16 | Whether to audit the CREATE, DROP, and ALTER operations on directories. | **0** indicates that the CREATE, DROP, and ALTER operations on directories are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on directories are audited. | -| Bit 17 | Whether to audit the CREATE, DROP, and ALTER operations on workloads. | **0** indicates that the CREATE, DROP, and ALTER operations on types are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on types are audited. | -| Bit 18 | Whether to audit the CREATE, DROP, and ALTER operations on sequences. | **0** indicates that the operations are not audited.
**1** indicates that the operations are audited. | -| Bit 19 | Whether to audit the CREATE and DROP operations on CMK and CEK objects. | **0** indicates that the CREATE and DROP operations on CMK and CEK objects are not audited.
**1** indicates that the CREATE and DROP operations on CMK and CEK objects are audited. | -| Bit 20 | Whether to audit the CREATE, DROP, and ALTER operations on PACKAGE objects. | **0** indicates that the operations are not audited.
**1** indicates that the operations are audited. | -| Bit 21 | Whether to audit the CREATE and DROP operations on MODEL objects. | **0** indicates that the CREATE and ALTER operations are not audited.
**1** indicates that the CREATE and DROP operations are audited. | -| Bit 22 | Whether to audit the CREATE, DROP, and ALTER operations on PUBLICATION and SUBSCRIPTION objects. | **0** indicates that the CREATE, DROP, and ALTER operations are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations are audited. | -| Bit 23 | Whether to audit the ALTER and DROP operations on the **gs_global_config** objects. | **0** indicates that the ALTER and DROP operations are not audited.
**1** indicates that the ALTER and DROP operations are audited. | -| Bit 24 | Whether to audit the CREATE, DROP, and ALTER operations on FOREIGN DATA WRAPPER objects. | **0** indicates that the CREATE, DROP, and ALTER operations are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations are audited. | - -## audit_dml_state - -**Parameter description**: Specifies whether to audit the INSERT, UPDATE, and DELETE operations on a specific table. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** **0** or **1** - -- **0** indicates that the function of auditing the DML operations (except SELECT) is disabled. -- **1** indicates that the function of auditing the DML operations (except SELECT) is enabled. - -**Default value**: **0** - -## audit_dml_state_select - -**Parameter description**: Specifies whether to audit the SELECT operation. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** **0** or **1** - -- **0** indicates that the SELECT auditing function is disabled. -- **1** indicates that the SELECT auditing function is enabled. - -**Default value**: **0** - -## audit_function_exec - -**Parameter description**: Specifies whether to record the audit information during the execution of the stored procedures, anonymous blocks, or user-defined functions (excluding system functions). - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** **0** or **1** - -- **0** indicates that the function of auditing the procedure or function execution is disabled. -- **1** indicates that the function of auditing the procedure or function execution is enabled. - -**Default value**: **0** - -## audit_copy_exec - -**Parameter description**: Specifies whether to audit the COPY operation. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** **0** or **1** - -- **0** indicates that the COPY auditing function is disabled. -- **1** indicates that the COPY auditing function is enabled. - -**Default value**: **1** - -## audit_set_parameter - -**Parameter description**: Specifies whether to audit the SET operation. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** **0** or **1** - -- **0** indicates that the SET auditing function is disabled. -- **1** indicates that the SET auditing function is enabled. - -**Default value**: **1** - -## audit_xid_info - -**Parameter description**: Specifies whether to record the transaction ID of the SQL statement in the **detail_info** column of the audit log. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** **0** or **1** - -- **0** indicates that the function of recording transaction IDs in audit logs is disabled. -- **1** indicates that the function of recording transaction IDs in audit logs is enabled. - -**Default value**: **0** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** If this function is enabled, the **detail_info** information in audit logs starts with **xid**. For example: -> -> ``` -> detail_info: xid=14619 , create table t1(id int); -> ``` -> -> If transaction IDs do not exist, **xid** is recorded as **NA** in audit logs. - -## enableSeparationOfDuty - -**Parameter description**: Specifies whether the separation of three duties is enabled. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the separation of three duties is enabled. -- **off** indicates that the separation of three duties is disabled. - -**Default value**: **off** - -## enable_nonsysadmin_execute_direct - -**Parameter description**: Specifies whether non-system administrators are allowed to execute the EXECUTE DIRECT ON statement. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that any user is allowed to execute the EXECUTE DIRECT ON statement. -- **off** indicates that only the system administrator is allowed to execute the EXECUTE DIRECT ON statement. - -**Default value**: **off** - -## enable_access_server_directory - -**Parameter description**: Specifies whether the system administrator has the permissions to create and delete DIRECTORY objects. - -This parameter is a **SIGHUP** parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the system administrator has the permission to create and delete DIRECTORY objects. -- **off** indicates that the system administrator does not have the permissions to create and delete DIRECTORY objects. - -**Default value**: **off** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - For security purposes, only initial users can create and delete DIRECTORY objects by default. -> - If **enable_access_server_directory** is set to **on**, system administrators (including initial users) can create and delete DIRECTORY objects when **enableSeparationOfDuty** is set to **off**. When **enableSeparationOfDuty** is set to **on**, only the initial users can create and delete DIRECTORY objects. +--- +title: Operation Auditing +summary: Operation Auditing +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Operation Auditing + +## audit_system_object + +**Parameter description**: Specifies whether to audit the CREATE, DROP, and ALTER operations on MogDB database objects. MogDB database objects include DATABASE, USER, Schema, and TABLE. The operations on the database object can be audited by changing the value of this parameter. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 0 to 268435455 + +- **0** indicates that the function of auditing the CREATE, DROP, and ALTER operations on MogDB database objects is disabled. +- Other values indicate that the CREATE, DROP, and ALTER operations on a certain or some MogDB database objects are audited. + +**Value description:** + +The value of this parameter is calculated by 26 binary bits. The 26 binary bits represent 26 types of MogDB database objects. If the corresponding binary bit is set to **0**, the CREATE, DROP, and ALTER operations on corresponding database objects are not audited. If it is set to **1**, the CREATE, DROP, and ALTER operations are audited. For details about the audit contents represented by these 19 binary bits, see [Table 1](#audit_system_object). + +**Default value**: **67121195** + +**Table 1** Meaning of each value for the **audit_system_object** parameter + +| **Binary Bit** | Description | Value Description | +| -------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| Bit 0 | Whether to audit the CREATE, DROP, and ALTER operations on databases. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | +| Bit 1 | Whether to audit the CREATE, DROP, and ALTER operations on schemas. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | +| Bit 2 | Whether to audit the CREATE, DROP, and ALTER operations on users. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | +| Bit 3 | Whether to audit the CREATE, DROP, ALTER, and TRUNCATE operations on tables. | **0** indicates that the CREATE, DROP, ALTER, and TRUNCATE operations on these objects are not audited.
**1** indicates that the CREATE, DROP, ALTER, and TRUNCATE operations on these objects are audited. | +| Bit 4 | Whether to audit the CREATE, DROP, and ALTER operations on indexes. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | +| Bit 5 | Whether to audit the CREATE and DROP operations on views. | **0** indicates that the CREATE and DROP operations on these objects are not audited.
**1** indicates that the CREATE and DROP operations on these objects are audited. | +| Bit 6 | Whether to audit the CREATE, DROP, and ALTER operations on triggers. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | +| Bit 7 | Whether to audit the CREATE, DROP, and ALTER operations on procedures/functions. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | +| Bit 8 | Whether to audit the CREATE, DROP, and ALTER operations on tablespaces. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | +| Bit 9 | Whether to audit the CREATE, DROP, and ALTER operations on resource pools. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | +| Bit 10 | Whether to audit the CREATE, DROP, and ALTER operations on workloads. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | +| Bit 11 | Whether to audit the CREATE, DROP, and ALTER operations on data sources. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | +| Bit 12 | Reserved. | - | +| Bit 13 | Whether to audit the CREATE, DROP, and ALTER operations on ROW LEVEL SECURITY objects. | **0** indicates that the CREATE, DROP, and ALTER operations on these objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on these objects are audited. | +| Bit 14 | Whether to audit the CREATE, DROP, and ALTER operations on types. | **0** indicates that the CREATE, DROP, and ALTER operations on types are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on types are audited. | +| Bit 15 | Whether to audit the CREATE, DROP, and ALTER operations on text search objects (CONFIGURATION and DICTIONARY). | **0** indicates that the CREATE, DROP, and ALTER operations on text search objects are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on text search objects are audited. | +| Bit 16 | Whether to audit the CREATE, DROP, and ALTER operations on directories. | **0** indicates that the CREATE, DROP, and ALTER operations on directories are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on directories are audited. | +| Bit 17 | Whether to audit the CREATE, DROP, and ALTER operations on workloads. | **0** indicates that the CREATE, DROP, and ALTER operations on types are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations on types are audited. | +| Bit 18 | Whether to audit the CREATE, DROP, and ALTER operations on sequences. | **0** indicates that the operations are not audited.
**1** indicates that the operations are audited. | +| Bit 19 | Whether to audit the CREATE and DROP operations on CMK and CEK objects. | **0** indicates that the CREATE and DROP operations on CMK and CEK objects are not audited.
**1** indicates that the CREATE and DROP operations on CMK and CEK objects are audited. | +| Bit 20 | Whether to audit the CREATE, DROP, and ALTER operations on PACKAGE objects. | **0** indicates that the operations are not audited.
**1** indicates that the operations are audited. | +| Bit 21 | Whether to audit the CREATE and DROP operations on MODEL objects. | **0** indicates that the CREATE and ALTER operations are not audited.
**1** indicates that the CREATE and DROP operations are audited. | +| Bit 22 | Whether to audit the CREATE, DROP, and ALTER operations on PUBLICATION and SUBSCRIPTION objects. | **0** indicates that the CREATE, DROP, and ALTER operations are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations are audited. | +| Bit 23 | Whether to audit the ALTER and DROP operations on the **gs_global_config** objects. | **0** indicates that the ALTER and DROP operations are not audited.
**1** indicates that the ALTER and DROP operations are audited. | +| Bit 24 | Whether to audit the CREATE, DROP, and ALTER operations on FOREIGN DATA WRAPPER objects. | **0** indicates that the CREATE, DROP, and ALTER operations are not audited.
**1** indicates that the CREATE, DROP, and ALTER operations are audited. | + +## audit_dml_state + +**Parameter description**: Specifies whether to audit the INSERT, UPDATE, and DELETE operations on a specific table. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** **0** or **1** + +- **0** indicates that the function of auditing the DML operations (except SELECT) is disabled. +- **1** indicates that the function of auditing the DML operations (except SELECT) is enabled. + +**Default value**: **0** + +## audit_dml_state_select + +**Parameter description**: Specifies whether to audit the SELECT operation. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** **0** or **1** + +- **0** indicates that the SELECT auditing function is disabled. +- **1** indicates that the SELECT auditing function is enabled. + +**Default value**: **0** + +## audit_function_exec + +**Parameter description**: Specifies whether to record the audit information during the execution of the stored procedures, anonymous blocks, or user-defined functions (excluding system functions). + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** **0** or **1** + +- **0** indicates that the function of auditing the procedure or function execution is disabled. +- **1** indicates that the function of auditing the procedure or function execution is enabled. + +**Default value**: **0** + +## audit_system_function_exec + +**Parameter description**: This parameter determines whether the audit log is recorded when executing whitelisted system functions. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** 0, 1. + +- 0 indicates that auditing of system function execution is disabled. +- 1 indicates that auditing of system function execution is enabled. + +**Default value**: **0** + +The whitelist of system functions that support record auditing is shown in the following table: + +| set_working_grand_version_num_manually | set_config | pg_cancel_backend | pg_cancel_session | pg_reload_conf | pg_rotate_logfile | +| -------------------------------------- | ----------------------------------- | ------------------------------------------ | -------------------------------- | ---------------------------------- | ---------------------------------------- | +| pg_terminate_session | pg_terminate_backend | pg_create_restore_point | pg_start_backup | pg_stop_backup | pg_switch_xlog | +| pg_cbm_rotate_file | pg_cbm_get_merged_file | pg_cbm_recycle_file | pg_enable_delay_ddl_recycle | pg_disable_delay_ddl_recycle | gs_roach_stop_backup | +| gs_roach_enable_delay_ddl_recycle | gs_roach_disable_delay_ddl_recycle | gs_roach_switch_xlog | pg_last_xlog_receive_location | pg_xlog_replay_pause | pg_xlog_replay_resume | +| gs_pitr_clean_history_global_barriers | gs_pitr_archive_slot_force_advance | pg_create_physical_replication_slot_extern | gs_set_obs_delete_location | gs_hadr_do_switchover | gs_set_obs_delete_location_with_slotname | +| gs_streaming_dr_in_switchover | gs_upload_obs_file | gs_download_obs_file | gs_set_obs_file_context | gs_get_hadr_key_cn | pg_advisory_lock | +| pg_advisory_lock_shared | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_all | pg_advisory_xact_lock | pg_advisory_xact_lock_shared | +| pg_try_advisory_lock | pg_try_advisory_lock_shared | pg_try_advisory_xact_lock | pg_try_advisory_xact_lock_shared | pg_create_logical_replication_slot | pg_drop_replication_slot | +| pg_logical_slot_peek_changes | pg_logical_slot_get_changes | pg_logical_slot_get_binary_changes | pg_replication_slot_advance | pg_replication_origin_create | pg_replication_origin_drop | +| pg_replication_origin_session_setup | pg_replication_origin_session_reset | pg_replication_origin_session_progress | pg_replication_origin_xact_setup | pg_replication_origin_xact_reset | pg_replication_origin_advance | +| local_space_shrink | gs_space_shrink | pg_free_remain_segment | gs_fault_inject | gs_repair_file | local_clear_bad_block_info | +| gs_repair_page | | | | | | + +## audit_copy_exec + +**Parameter description**: Specifies whether to audit the COPY operation. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** **0** or **1** + +- **0** indicates that the COPY auditing function is disabled. +- **1** indicates that the COPY auditing function is enabled. + +**Default value**: **1** + +## audit_set_parameter + +**Parameter description**: Specifies whether to audit the SET operation. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** **0** or **1** + +- **0** indicates that the SET auditing function is disabled. +- **1** indicates that the SET auditing function is enabled. + +**Default value**: **1** + +## audit_xid_info + +**Parameter description**: Specifies whether to record the transaction ID of the SQL statement in the **detail_info** column of the audit log. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** **0** or **1** + +- **0** indicates that the function of recording transaction IDs in audit logs is disabled. +- **1** indicates that the function of recording transaction IDs in audit logs is enabled. + +**Default value**: **0** + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** If this function is enabled, the **detail_info** information in audit logs starts with **xid**. For example: +> +> ``` +> detail_info: xid=14619 , create table t1(id int); +> ``` +> +> If transaction IDs do not exist, **xid** is recorded as **NA** in audit logs. + +## enableSeparationOfDuty + +**Parameter description**: Specifies whether the separation of three duties is enabled. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the separation of three duties is enabled. +- **off** indicates that the separation of three duties is disabled. + +**Default value**: **off** + +## enable_nonsysadmin_execute_direct + +**Parameter description**: Specifies whether non-system administrators are allowed to execute the EXECUTE DIRECT ON statement. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that any user is allowed to execute the EXECUTE DIRECT ON statement. +- **off** indicates that only the system administrator is allowed to execute the EXECUTE DIRECT ON statement. + +**Default value**: **off** + +## enable_access_server_directory + +**Parameter description**: Specifies whether the system administrator has the permissions to create and delete DIRECTORY objects. + +This parameter is a **SIGHUP** parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the system administrator has the permission to create and delete DIRECTORY objects. +- **off** indicates that the system administrator does not have the permissions to create and delete DIRECTORY objects. + +**Default value**: **off** + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> +> - For security purposes, only initial users can create and delete DIRECTORY objects by default. +> - If **enable_access_server_directory** is set to **on**, system administrators (including initial users) can create and delete DIRECTORY objects when **enableSeparationOfDuty** is set to **off**. When **enableSeparationOfDuty** is set to **on**, only the initial users can create and delete DIRECTORY objects. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/user-and-permission-audit.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/user-and-permission-audit.md index 0d0d5479..99a536fc 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/user-and-permission-audit.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/auditing/user-and-permission-audit.md @@ -1,83 +1,108 @@ ---- -title: User and Permission Audit -summary: User and Permission Audit -author: Zhang Cuiping -date: 2021-04-20 ---- - -# User and Permission Audit - -## audit_login_logout - -**Parameter description**: Specifies whether to audit the MogDB user's login (including login success and failure) and logout. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 0 to 7 - -- **0** indicates that the function of auditing users' logins and logouts is disabled. -- **1** indicates that only successful user logins are audited. -- **2** indicates that only failed user logins are audited. -- **3** indicates that successful and failed user logins are audited. -- **4** indicates that only user logouts are audited. -- **5** indicates that successful user logouts and logins are audited. -- **6** indicates that failed user logouts and logins are audited. -- **7** indicates that successful user logins, failed user logins, and logouts are audited. - -**Default value**: **7** - -## audit_database_process - -**Parameter description**: Specifies whether to audit the MogDB user's login (including login success and failure) and logout. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** **0** or **1** - -- **0** indicates that the function of auditing MogDB start, stop, recovery, and switchover operations of a database is disabled. -- **1** indicates that the function of auditing MogDB start, stop, recovery, and switchover operations of a database is enabled. - -**Default value**: **1** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: -> -> When MogDB starts, DN performs the switchover process. Therefore, when DN starts, the type in the audit log is system_switch. - -## audit_user_locked - -**Parameter description**: Specifies whether to audit the MogDB user's locking and unlocking. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** **0** or **1** - -- **0** indicates that the function of auditing user's locking and unlocking is disabled. -- **1** indicates that the function of auditing user's locking and unlocking is enabled. - -**Default value**: **1** - -## audit_user_violation - -**Parameter description**: Specifies whether to audit the access violation operations of a user. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** **0** or **1** - -- **0** indicates that the function of auditing the access violation operations of a user is disabled. -- **1** indicates that the function of auditing the access violation operations of a user is enabled. - -**Default value**: **0** - -## audit_grant_revoke - -**Parameter description**: Specifies whether to audit the granting and reclaiming of the MogDB user's permission. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** **0** or **1** - -- **0** indicates that the function of auditing the granting and reclaiming of a user's permission is disabled. -- **1** indicates that the function of auditing the granting and reclaiming of a user's permission is enabled. - -**Default value**: **1** +--- +title: User and Permission Audit +summary: User and Permission Audit +author: Zhang Cuiping +date: 2021-04-20 +--- + +# User and Permission Audit + +## audit_login_logout + +**Parameter description**: Specifies whether to audit the MogDB user's login (including login success and failure) and logout. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 0 to 7 + +- **0** indicates that the function of auditing users' logins and logouts is disabled. +- **1** indicates that only successful user logins are audited. +- **2** indicates that only failed user logins are audited. +- **3** indicates that successful and failed user logins are audited. +- **4** indicates that only user logouts are audited. +- **5** indicates that successful user logouts and logins are audited. +- **6** indicates that failed user logouts and logins are audited. +- **7** indicates that successful user logins, failed user logins, and logouts are audited. + +**Default value**: **7** + +## audit_database_process + +**Parameter description**: Specifies whether to audit the MogDB user's login (including login success and failure) and logout. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** **0** or **1** + +- **0** indicates that the function of auditing MogDB start, stop, recovery, and switchover operations of a database is disabled. +- **1** indicates that the function of auditing MogDB start, stop, recovery, and switchover operations of a database is enabled. + +**Default value**: **1** + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +> When MogDB starts, DN performs the switchover process. Therefore, when DN starts, the type in the audit log is system_switch. + +## audit_user_locked + +**Parameter description**: Specifies whether to audit the MogDB user's locking and unlocking. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** **0** or **1** + +- **0** indicates that the function of auditing user's locking and unlocking is disabled. +- **1** indicates that the function of auditing user's locking and unlocking is enabled. + +**Default value**: **1** + +## audit_user_violation + +**Parameter description**: Specifies whether to audit the access violation operations of a user. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** **0** or **1** + +- **0** indicates that the function of auditing the access violation operations of a user is disabled. +- **1** indicates that the function of auditing the access violation operations of a user is enabled. + +**Default value**: **0** + +## audit_grant_revoke + +**Parameter description**: Specifies whether to audit the granting and reclaiming of the MogDB user's permission. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** **0** or **1** + +- **0** indicates that the function of auditing the granting and reclaiming of a user's permission is disabled. +- **1** indicates that the function of auditing the granting and reclaiming of a user's permission is enabled. + +**Default value**: **1** + +## full_audit_users + +**Parameter description**: This parameter is used to configure the Full Audit User List to record audit logs for all auditable operations performed by users in the list. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** String, multiple usernames should be separated by commas. + +**Default value**: Empty string + +## no_audit_client + +**Parameter description**: This parameter is used to configure the list of client names and IP addresses that are not to be audited. The format of the parameter is: client_name@IP, same as client_conninfo field in pg_query_audit function, e.g. `cm_agent@127.0.0.1`, `gs_clean@127.0.0.1`. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** String, multiple configuration items need to be separated by commas. + +**Default value**: Empty string + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **Note**: +> +> - When the executed SQL statement satisfies both full_audit_users and no_audit_client parameter configurations, the no_audit_client configuration takes precedence and the audit log is not recorded. +> - Database server-side internal tools or communication between nodes will also generate audit logs for these lower-risk audit scenarios can be configured no_audit_client parameter does not record audits to save audit log space, enhance audit log query performance. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/automatic-vacuuming.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/automatic-vacuuming.md index cc1d613f..53eab0f4 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/automatic-vacuuming.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/automatic-vacuuming.md @@ -185,3 +185,16 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T **Value range**: an integer ranging from 0 to *INT_MAX*. The unit is ms. **Default value**: **5s** (5000 ms) + +## handle_toast_in_autovac + +**Parameter description**: Used to specify whether the toast table should be processed in an automatic VACUUM operation. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range**: Boolean + +- on enables the automatic VACUUM operation on the toast table. +- off means to disable processing of the toast table by the automatic VACUUM operation. + +**Default value**: off diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/backend-compression.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/backend-compression.md index 86f9bff6..07a90bd9 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/backend-compression.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/backend-compression.md @@ -1,120 +1,180 @@ ---- -title: Backend Compression Parameters -summary: Backend Compression Parameters -author: zhang cuiping -date: 2023-04-07 ---- - -# Backend Compression Parameters - -The system starts backend compression threads to perform compression operations on compression tables. - -## Backend Compression - -### autocmpr_max_workers - -**Parameter description:** specifies the maximum number of backend compression threads that can run concurrently. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range:** integer with a minimum value of 0 (no background compression), a theoretical maximum value of 262143, and a dynamic maximum value in practice. The formula is "262143 - value of `max_connections` - value of `job_queue_processes` - number of helper threads - number of background compressed launcher threads - 1", where the number of helper threads is set by the macro definition, the default value of the current version is 20, and the number of background compressed launcher threads is fixed to 1. - -**Default value:** **3** - -### autocmpr_naptime - -**Parameter description:** specifies the time interval of the operations of setting backend compression. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range:** integer, the unit is second. The minimum value is 1, and the maximum value is 604800. - -**Default value:** **300s** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**Note**: -> -> - The unit can be specified as h, min, or s (default) if you modify it by running `gs_guc`. -> - The unit can be specified as h, min, or s (default) if you modify it by editing the `postgresql.conf` file. -> - The unit cannot be specified and the default one is s if the parameter is modified by running `ALTER SYSTEM SET autocmpr_naptime TO value`. - -## Cost-based Row-level Compression - -The row-level compression feature of MogDB uses an overhead-based flow control mechanism to reduce the impact of background compression on database I/O as much as possible. For example, background compression does not need to be done quickly, and it does not want to seriously affect the performance of other database operations. Overhead-based background compression provides a means for database administrators to achieve this goal. - -**Background information**: During the compression task execution, the system maintains an internal counter that tracks the approximate overhead of various I/O operations during compression. If the accumulated cost reaches the limit declared by `compress_cost_limit`, the thread that initiated the compression task will sleep for the amount of time specified by `compress_cost_delay`, then the system will reset the counter and continue the compression task. - -### **compress_cost_limit** - -**Parameter description:** sets a cost limit for the thread that initiates the compression task. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range:** integer. The minimum value is 1, and the maximum value is 10000. - -**Default value:** **200** - -### **compress_cost_delay** - -**Parameter description:** specifies the time that the compressing task thread sleeps when the cost of compressing a task exceeds this value. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -Note that in many systems, the effective resolution for sleep time is 10 ms. So setting `compress_cost_delay` to something other than a multiple of 10 has the same effect as setting it to the next 10. - -The value of this parameter is generally set to a small value, which is commonly set to 10 or 20 ms. When tuning row-level compression feature resource usage, it is better to tune other parameters than this one. - -**Value range:** integer. The unit is ms. The minimum value is 0, and the maximum value is 100. - -**Default value:** **0** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**Note**: Compression tasks in MogDB include foreground compression and background compression. The foreground compression is mainly triggered by three operations: `COPY`, `VACUUM FULL` and `CLUSTER`, and this parameter affects the execution time of the business operation that triggers the foreground compression task. Since the above three operations are expected to be performed as quickly as possible, `compress_cost_delay` is set to 0 by default. - -### **autocmpr_cost_limit** - -**Parameter description:** sets the cost limit of backend compression threads. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: integer. The minimum value is -1, and the maximum value is 10000. -1 indicates that the value of `compress_cost_limit` is used. - -**Default value**: **-1** - -### **autocmpr_cost_delay** - -**Parameter description:** sets the cost delay of backend compression threads. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range:** integer. The unit is ms. The minimum value is -1, and the maximum value is 100. -1 indicates that the value of `compress_cost_delay` is used. - -**Default value:** **20** - -### **compress_cost_page_hit** - -**Parameter description:** obtains a predicted cost of a data page from shared cache. It indicates that a cache pool is locked to query the cost of the shared hash tables. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range:** integer. The minimum value is 0, and the maximum value is 10000. - -**Default value:** **1** - -### **compress_cost_page_miss** - -**Parameter description:** reads the predicted cost of a data page from a disk. It indicates that a cache pool is locked to query the cost of the shared hash tables and reading data blocks from a disk. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range:** integer. The minimum value is 0, and the maximum value is 10000. - -**Default value:** **10** - -### **compress_cost_page_dirty** - -**Parameter description:** indicates the predicted cost of compressing a data page. It indicates the cost of flushing the dirty page after a page is scanned and compressed. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range:** integer. The minimum value is 0, and the maximum value is 10000. - -**Default value: 20** \ No newline at end of file +--- +title: Backend Compression Parameters +summary: Backend Compression Parameters +author: zhang cuiping +date: 2023-04-07 +--- + +# Backend Compression Parameters + +The system starts backend compression threads to perform compression operations on compression tables. + +## Backend Compression + +### autocmpr_max_workers + +**Parameter description:** specifies the maximum number of backend compression threads that can run concurrently. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** integer with a minimum value of 0 (no background compression), a theoretical maximum value of 262143, and a dynamic maximum value in practice. The formula is "262143 - value of `max_connections` - value of `job_queue_processes` - number of helper threads - number of background compressed launcher threads - 1", where the number of helper threads is set by the macro definition, the default value of the current version is 20, and the number of background compressed launcher threads is fixed to 1. + +**Default value:** **3** + +### autocmpr_naptime + +**Parameter description:** specifies the time interval of the operations of setting backend compression. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** integer, the unit is second. The minimum value is 1, and the maximum value is 604800. + +**Default value:** **300s** + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**Note**: +> +> - The unit can be specified as h, min, or s (default) if you modify it by running `gs_guc`. +> - The unit can be specified as h, min, or s (default) if you modify it by editing the `postgresql.conf` file. +> - The unit cannot be specified and the default one is s if the parameter is modified by running `ALTER SYSTEM SET autocmpr_naptime TO value`. + +## Cost-based Row-level Compression + +The row-level compression feature of MogDB uses an overhead-based flow control mechanism to reduce the impact of background compression on database I/O as much as possible. For example, background compression does not need to be done quickly, and it does not want to seriously affect the performance of other database operations. Overhead-based background compression provides a means for database administrators to achieve this goal. + +**Background information**: During the compression task execution, the system maintains an internal counter that tracks the approximate overhead of various I/O operations during compression. If the accumulated cost reaches the limit declared by `compress_cost_limit`, the thread that initiated the compression task will sleep for the amount of time specified by `compress_cost_delay`, then the system will reset the counter and continue the compression task. + +### **compress_cost_limit** + +**Parameter description:** sets a cost limit for the thread that initiates the compression task. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** integer. The minimum value is 1, and the maximum value is 10000. + +**Default value:** **200** + +### **compress_cost_delay** + +**Parameter description:** specifies the time that the compressing task thread sleeps when the cost of compressing a task exceeds this value. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +Note that in many systems, the effective resolution for sleep time is 10 ms. So setting `compress_cost_delay` to something other than a multiple of 10 has the same effect as setting it to the next 10. + +The value of this parameter is generally set to a small value, which is commonly set to 10 or 20 ms. When tuning row-level compression feature resource usage, it is better to tune other parameters than this one. + +**Value range:** integer. The unit is ms. The minimum value is 0, and the maximum value is 100. + +**Default value:** **0** + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**Note**: Compression tasks in MogDB include foreground compression and background compression. The foreground compression is mainly triggered by three operations: `COPY`, `VACUUM FULL` and `CLUSTER`, and this parameter affects the execution time of the business operation that triggers the foreground compression task. Since the above three operations are expected to be performed as quickly as possible, `compress_cost_delay` is set to 0 by default. + +### **autocmpr_cost_limit** + +**Parameter description:** sets the cost limit of backend compression threads. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range**: integer. The minimum value is -1, and the maximum value is 10000. -1 indicates that the value of `compress_cost_limit` is used. + +**Default value**: **-1** + +### **autocmpr_cost_delay** + +**Parameter description:** sets the cost delay of backend compression threads. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** integer. The unit is ms. The minimum value is -1, and the maximum value is 100. -1 indicates that the value of `compress_cost_delay` is used. + +**Default value:** **20** + +### **compress_cost_page_hit** + +**Parameter description:** obtains a predicted cost of a data page from shared cache. It indicates that a cache pool is locked to query the cost of the shared hash tables. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** integer. The minimum value is 0, and the maximum value is 10000. + +**Default value:** **1** + +### **compress_cost_page_miss** + +**Parameter description:** reads the predicted cost of a data page from a disk. It indicates that a cache pool is locked to query the cost of the shared hash tables and reading data blocks from a disk. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** integer. The minimum value is 0, and the maximum value is 10000. + +**Default value:** **10** + +### **compress_cost_page_dirty** + +**Parameter description:** indicates the predicted cost of compressing a data page. It indicates the cost of flushing the dirty page after a page is scanned and compressed. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** integer. The minimum value is 0, and the maximum value is 10000. + +**Default value: 20** + +## autocompress + +**Parameter description:** Enable the autocopmress sub-process. + +This parameter is a **SIGHUP** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** Boolean + +**Default value**: on + +## enable_backend_compress + +**Parameter description:** Controls whether background compression is enabled. + +This parameter is a **SIGHUP** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** Boolean + +**Default value**: on + +## enable_compression_check + +**Parameter description:** Controls whether compression checking is enabled. + +This parameter is a **POSTMASTER** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** Boolean + +**Default value**: on + +## enable_default_compression_table + +**Parameter description:** Controls whether table building defaults to creating compressed tables. + +This parameter is a **POSTMASTER** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** Boolean + +**Default value**: off + +## enable_default_index_compression + +**Parameter description:** Controls whether index compression is enabled. + +This parameter is a **POSTMASTER** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** Boolean + +**Default value**: off + +## enable_page_compression + +**Parameter description:** Controls whether page compression is enabled. + +This parameter is a **POSTMASTER** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** Boolean + +**Default value**: off \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/communication-library-parameters.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/communication-library-parameters.md index 1794016a..83615abf 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/communication-library-parameters.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/communication-library-parameters.md @@ -1,86 +1,125 @@ ---- -title: Communication Library Parameters -summary: Communication Library Parameters -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Communication Library Parameters - -This section describes parameter settings and value ranges for communication libraries. - -## tcp_keepalives_idle - -**Parameter description**: Specifies the interval for transmitting keepalive signals on an OS that supports the **TCP_KEEPIDLE** socket option. If no keepalive signal is transmitted, the connection is in idle mode. - -This parameter is a USERSET parameter. Set it based on instructions provided in **Table GUC Parameters**. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - If the OS does not support **TCP_KEEPIDLE**, set this parameter to **0**. -> - The parameter is ignored on an OS where connections are established using the Unix domain socket. -> - 0 selects the system default. -> - This parameter is not shared between different sessions, which means that this parameter may have different values in different sessions. -> - When showing this parameter, it is the parameter value in the current session connection, not the value of the guc copy. - -**Value range:** 0 to 3600. The unit is s. - -**Default value**: **0** - -## tcp_keepalives_interval - -**Parameter description:** Specifies the response time before retransmission on an OS that supports the **TCP_KEEPINTVL** socket option. - -This parameter is a USERSET parameter. Set it based on instructions provided in **Table GUC Parameters**. - -**Value range**: 0 to 180. The unit is s. - -**Default value**: **0** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - If the OS does not support **TCP_KEEPINTVL**, set this parameter to **0**. -> - The parameter is ignored on an OS where connections are established using the Unix domain socket. -> - 0 selects the system default. -> - This parameter is not shared between different sessions, which means that this parameter may have different values in different sessions. -> - When showing this parameter, it is the parameter value in the current session connection, not the value of the guc copy. - -## tcp_keepalives_count - -**Parameter description**: Specifies the number of keepalive signals that can be waited before the MogDB server is disconnected from the client on an OS that supports the **TCP_KEEPCNT** socket option. - -This parameter is a USERSET parameter. Set it based on instructions provided in **Table GUC Parameters**. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - If the OS does not support **TCP_KEEPCNT**, set this parameter to **0**. -> - The parameter is ignored on an OS where connections are established using the Unix domain socket. -> - 0 selects the system default. -> - This parameter is not shared between different sessions, which means that this parameter may have different values in different sessions. -> - When showing this parameter, it is the parameter value in the current session connection, not the value of the guc copy. - -**Value range**: 0 to 100. **0** indicates that the connection is immediately broken if MogDB does not receive a keepalived signal from the client. - -**Default value:** **0** - -## comm_proxy_attr - -**Parameter description**: Specifies the parameters related to the communication proxy library. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - This parameter applies only to the centralized ARM standalone system running EulerOS 2.9. -> - This function takes effect when the thread pool is enabled, that is, **enable_thread_pool** is set to **on**. -> - When setting this parameter, you need to set the GUC parameter **local_bind_address** to the IP address of the NIC of the **libos_kni**. -> - **Parameter template**: comm_proxy_attr = '{enable_libnet:true, enable_dfx:false, numa_num:4, numa_bind:[[30,31],[62,63],[94,95],[126,127]]}' -> - Parameters that need to be configured include: -> - **enable_libnet**: whether to enable the user-mode protocol. The options are as follows: **true** and **false**. -> - **enable_dfx**: whether to enable the communication proxy library view. The options are as follows: **true** and **false**. -> - **numa_num**: number of NUMA nodes in the system. 2P and 4P servers are supported. The value can be: **4** or **8**. -> - **numa_bind**: core binding parameter of the agent thread. Each numa has two CPUs. There are a total of **numa_num** groups. The value range is as follows: [0, Number of CPUs - 1]. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in **Table GUC Parameters**. - -**Value range**: a string, consisting of one or more characters - -**Default value**: **none** +--- +title: Communication Library Parameters +summary: Communication Library Parameters +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Communication Library Parameters + +This section describes parameter settings and value ranges for communication libraries. + +## tcp_keepalives_idle + +**Parameter description**: Specifies the interval for transmitting keepalive signals on an OS that supports the **TCP_KEEPIDLE** socket option. If no keepalive signal is transmitted, the connection is in idle mode. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table GUC Parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> +> - If the OS does not support **TCP_KEEPIDLE**, set this parameter to **0**. +> - The parameter is ignored on an OS where connections are established using the Unix domain socket. +> - 0 selects the system default. +> - This parameter is not shared between different sessions, which means that this parameter may have different values in different sessions. +> - When showing this parameter, it is the parameter value in the current session connection, not the value of the guc copy. + +**Value range:** 0 to 3600. The unit is s. + +**Default value**: **0** + +## tcp_keepalives_interval + +**Parameter description:** Specifies the response time before retransmission on an OS that supports the **TCP_KEEPINTVL** socket option. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table GUC Parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: 0 to 180. The unit is s. + +**Default value**: **0** + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> +> - If the OS does not support **TCP_KEEPINTVL**, set this parameter to **0**. +> - The parameter is ignored on an OS where connections are established using the Unix domain socket. +> - 0 selects the system default. +> - This parameter is not shared between different sessions, which means that this parameter may have different values in different sessions. +> - When showing this parameter, it is the parameter value in the current session connection, not the value of the guc copy. + +## tcp_keepalives_count + +**Parameter description**: Specifies the number of keepalive signals that can be waited before the MogDB server is disconnected from the client on an OS that supports the **TCP_KEEPCNT** socket option. + +This parameter is a USERSET parameter. Set it based on instructions provided in [Table GUC Parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> +> - If the OS does not support **TCP_KEEPCNT**, set this parameter to **0**. +> - The parameter is ignored on an OS where connections are established using the Unix domain socket. +> - 0 selects the system default. +> - This parameter is not shared between different sessions, which means that this parameter may have different values in different sessions. +> - When showing this parameter, it is the parameter value in the current session connection, not the value of the guc copy. + +**Value range**: 0 to 100. **0** indicates that the connection is immediately broken if MogDB does not receive a keepalived signal from the client. + +**Default value:** **0** + +## tcp_user_timeout + +**Parameter description**: On operating systems that support the TCP_USER_TIMEOUT socket option, set MogDB to specify the maximum amount of time that transmitted data can remain unacknowledged before the TCP connection is forced to close when sending data. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in [Table GUC Parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> +> - If the operating system does not support the TCP_USER_TIMEOUT option, the value of this parameter will not take effect, and the default is 0. +> - This parameter is ignored on operating systems that connect via Unix domain sockets. + +**Value range**: 0-3600000 in ms. where 0 means follow the OS setting. + +**Default value:** **0** + +Note that the result of this parameter taking effect will be different for different operating system kernels: + +- aarch64 EulerOS (Linux kernel version: 4.19), the timeout time is the value set for this parameter. + +- x86 Euler2.5 (Linux kernel version: 3.10), the timeout is not the value of this parameter, but the maximum value of different intervals, i.e. the timeout is the maximum value of the upper limit of the interval of “Linux TCP retransmission total time” in which the value of tcp_user_timeout is set. The timeout value is the maximum value of the upper limit of the “Total Linux TCP retransmission time” range for the tcp_user_timeout setting. For example, if tcp_user_timeout=40000, the total retransmission time is 51 seconds. + + **Table 1** x86 Euler2.5 (Linux kernel version: 3.10) tcp_user_timeout parameter value description + + | Linux TCP retransmission count | Linux TCP retransmission total time interval (seconds) | Example of tcp_user_timeout setting (milliseconds) | Actual Linux TCP retransmission total elapsed time (seconds) | + | :----------------------------- | :----------------------------------------------------- | :------------------------------------------------- | :----------------------------------------------------------- | + | 1 | (0.2,0.6] | 400 | 0.6 | + | 2 | (0.6,1.4] | 1000 | 1.4 | + | 3 | (1.4,3] | 2000 | 3 | + | 4 | (3,6.2] | 4000 | 6.2 | + | 5 | (6.2,12.6] | 10000 | 12.6 | + | 6 | (12.6,25.4] | 20000 | 25.4 | + | 7 | (25.4,51] | 40000 | 51 | + | 8 | (51,102.2] | 80000 | 102.2 | + | 9 | (102.2,204.6] | 150000 | 204.6 | + | 10 | (204.6,324.6] | 260000 | 324.6 | + | 11 | (324.6,444.6] | 400000 | 444.6 | + +Note: TCP retransmission time per retransmission increases exponentially with the number of retransmissions, when a TCP retransmission reaches 120 seconds, each subsequent retransmission will take 120 seconds without further change. + +## comm_proxy_attr + +**Parameter description**: Specifies the parameters related to the communication proxy library. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> +> - This parameter applies only to the centralized ARM standalone system running EulerOS 2.9. +> - This function takes effect when the thread pool is enabled, that is, **enable_thread_pool** is set to **on**. +> - When setting this parameter, you need to set the GUC parameter **local_bind_address** to the IP address of the NIC of the **libos_kni**. +> - **Parameter template**: comm_proxy_attr = '{enable_libnet:true, enable_dfx:false, numa_num:4, numa_bind:[[30,31],[62,63],[94,95],[126,127]]}' +> - Parameters that need to be configured include: +> - **enable_libnet**: whether to enable the user-mode protocol. The options are as follows: **true** and **false**. +> - **enable_dfx**: whether to enable the communication proxy library view. The options are as follows: **true** and **false**. +> - **numa_num**: number of NUMA nodes in the system. 2P and 4P servers are supported. The value can be: **4** or **8**. +> - **numa_bind**: core binding parameter of the agent thread. Each numa has two CPUs. There are a total of **numa_num** groups. The value range is as follows: [0, Number of CPUs - 1]. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in [Table GUC Parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: a string, consisting of one or more characters + +**Default value**: **none** diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/connection-and-authentication.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/connection-and-authentication.md index 4cfcb79e..ddd36293 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/connection-and-authentication.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/connection-and-authentication.md @@ -1,12 +1,12 @@ ---- -title: Connection and Authentication -summary: Connection and Authentication -author: zhang cuiping -date: 2023-04-07 ---- - -# Connection and Authentication - -- **[Connection Settings](connection-settings.md)** -- **[Security and Authentication (postgresql.conf)](security-and-authentication.md)** +--- +title: Connection and Authentication +summary: Connection and Authentication +author: zhang cuiping +date: 2023-04-07 +--- + +# Connection and Authentication + +- **[Connection Settings](connection-settings.md)** +- **[Security and Authentication (postgresql.conf)](security-and-authentication.md)** - **[Communication Library Parameters](communication-library-parameters.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/connection-settings.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/connection-settings.md index 34d14c74..ae96452d 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/connection-settings.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/connection-settings.md @@ -9,6 +9,19 @@ date: 2021-04-20 This section describes parameters related to client-server connection modes. +## light_comm + +**Parameter description**: This parameter controls whether the server uses lightweight communication methods. + +This parameter specifies whether the server uses lightweight locking and non-blocking socket-based communication. This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: + +- ON / TRUE: Indicates the use of lightweight communication methods +- OFF / FALSE: Indicates that the lightweight communication method is not used. + +**Default value**: OFF / FALSE + ## listen_addresses **Parameter description**: Specifies the TCP/IP addresses that a server listens to for connections from the client. This parameter specifies the IP address used by the MogDB server to listen on, such as IPv4 address or IPv6 address(if supported). A server may have multiple NICs, and each NIC can be bound with multiple IP address. This parameter specifies the IP addresses which the MogDB server is bound. And the client can use the IP address specified in this parameter to connect to MogDB or send requests to MogDB. @@ -195,3 +208,55 @@ This parameter is a USERSET parameter used for O&M. You are advised not to chang > ``` > > **driver_name** and **driver_version** are displayed by default. Whether **driver_path** and **os_user** are displayed is determined by users. + +## enable_dolphin_proto + +**Parameter description**: Whether to enable the dolphin database protocol feature. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on indicates that the dolphin database protocol is enabled. +- off indicates that the dolphin database protocol is disabled. + +**Default value**: off + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> +> - MogDB supports multi-database network protocol functionality since version 3.1.0, which defaults to the MogDB database protocol. +> - When the dolphin plugin is loaded and this parameter is enabled, B-compatible types of databases in MogDB can be accessed via the mysql database protocol. +> - The current dolphin plugin is compatible with mysql 5.7.x client tools, compatible with mysql-connector-java-5.1.47. + +## dolphin_server_port + +**Parameter description**: The TCP port number on which the dolphin protocol plugin listens. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Integer, 1024 to 65535 + +**Default value**: 3308 + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> +> - This feature is available when the dophin plugin is loaded and the dolphin database protocol is enabled. +> - When setting the port number, set an unoccupied port number that does not conflict with the port number of the MogDB database protocol. + +## b_compatibility_user_host_auth + +**Parameter description**: Control whether to allow the creation of users such as `user@host`, `'user'@'host'` and mysql-compatible `user@host` authentication authentication, for mysql-compatible `user@host` authentication, you need to set this parameter to on in the configuration file postgresql.conf. + +**Value range**: Boolean + +**Default value**: off + +**Example**: + +```sql +MogDB=# show b_compatibility_user_host_auth; + b_compatibility_user_host_auth +-------------------------------- + off +(1 row) +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md index fe50b057..db04d381 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md @@ -32,16 +32,6 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** > If the number of iterations is too small, the password storage security is reduced. If the number of iterations is too large, the performance deteriorates in scenarios involving password encryption, such as authentication and user creation. Set the number of iterations based on actual hardware conditions. You are advised to retain the default value. -## session_authorization - -**Parameter description**: Specifies the user ID of the current session. - -This parameter is an internal parameter of the USERSET type and can be set only using the [SET SESSION AUTHORIZATION](../../../reference-guide/sql-syntax/SET-SESSION-AUTHORIZATION.md) syntax. - -**Value range**: a string - -**Default value**: **NULL** - ## session_timeout **Parameter description**: Specifies the longest duration allowed when no operations are performed on a client after it is connected to the server. @@ -52,6 +42,8 @@ This parameter is a USERSET parameter. Set it based on instructions provided in **Default value**: **10min** +PTK will optimize the value of this parameter according to the server configuration when installing the database, please refer to [Recommended Value of GUC Parameters](https://docs.mogdb.io/en/ptk/v2.0/ref-recommend-guc) for more information. + ## idle_in_transaction_session_timeout **Parameter description:** Specifies the maximum duration during which no operation is performed if the current session is in a transaction after the connection to the server is established. @@ -81,6 +73,8 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided **Default value**: **on** +PTK will optimize the value of this parameter according to the server configuration when installing the database, please refer to [Recommended Value of GUC Parameters](https://docs.mogdb.io/en/ptk/v2.0/ref-recommend-guc) for more information. + ## require_ssl **Parameter description**: Specifies whether the server requires the SSL connection. This parameter is valid only when **[ssl](#ssl)** is set to **on**. Read [Using gsql to Connect to a Database](../../../administrator-guide/routine-maintenance/using-the-gsql-client-for-connection.md) before setting this parameter. @@ -127,6 +121,16 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided **Default value**: **server.crt** +## ssl_cert_notify_time + +**Parameter description**: The number of days to be reminded before the SSL server certificate expires. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Integral, 7 to 180, in days. + +**Default value**: 90 + ## ssl_key_file **Parameter description**: Specifies the name of the SSL private key file. The path depends on the data directory. @@ -137,6 +141,8 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided **Default value**: **server.key** +PTK will optimize the value of this parameter according to the server configuration when installing the database, please refer to [Recommended Value of GUC Parameters](https://docs.mogdb.io/en/ptk/v2.0/ref-recommend-guc) for more information. + ## ssl_ca_file **Parameter description**: Specifies the name of a file that contains CA information. The relative path is relative to the data directory. @@ -205,6 +211,8 @@ If the initial user password is not specified during the installation process, t **Default value**: **off** +PTK will optimize the value of this parameter according to the server configuration when installing the database, please refer to [Recommended Value of GUC Parameters](https://docs.mogdb.io/en/ptk/v2.0/ref-recommend-guc) for more information. + ## password_policy **Parameter description**: Specifies whether to check the password complexity when you run the **CREATE ROLE/USER** or **ALTER ROLE/USER** command to create or modify an account of MogDB. For details about the password complexity check policy, see [Setting Password Security Policies](../../../security-guide/security/2-managing-users-and-their-permissions.md#setting-password-security-policies). @@ -279,6 +287,8 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T **Default value**: **1d** +PTK will optimize the value of this parameter according to the server configuration when installing the database, please refer to [Recommended Value of GUC Parameters](https://docs.mogdb.io/en/ptk/v2.0/ref-recommend-guc) for more information. + ## failed_login_attempts **Parameter description**: Specifies the maximum number of incorrect password attempts before an account is locked. The account will be automatically unlocked after the time specified by **password_lock_time**. Only the sysadmin user can access the account. The automatic account locking policy applies in scenarios such as login and password modification using the **ALTER USER** command. For details about the policy, see [Setting Password Security Policies](../../../security-guide/security/2-managing-users-and-their-permissions.md#setting-password-security-policies). @@ -313,6 +323,8 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T **Default value:** **2** +PTK will optimize the value of this parameter according to the server configuration when installing the database, please refer to [Recommended Value of GUC Parameters](https://docs.mogdb.io/en/ptk/v2.0/ref-recommend-guc) for more information. + ## password_min_length **Parameter description**: Specifies the minimum password length. @@ -398,6 +410,8 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T **Default value**: **90** +PTK will optimize the value of this parameter according to the server configuration when installing the database, please refer to [Recommended Value of GUC Parameters](https://docs.mogdb.io/en/ptk/v2.0/ref-recommend-guc) for more information. + ## password_notify_time **Parameter description**: Specifies how many days in advance a user is notified before a password expires. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-pool-parameters.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-pool-parameters.md index 3ec89fac..7bd393ba 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-pool-parameters.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/connection-pool-parameters.md @@ -1,47 +1,23 @@ ---- -title: Connection Pool Parameters -summary: Connection Pool Parameters -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Connection Pool Parameters - -When a connection pool is used to access the database, database connections are established and then stored in the memory as objects during system running. When you need to access the database, no new connection is established. Instead, an existing idle connection is selected from the connection pool. After you finish accessing the database, the database does not disable the connection but puts it back into the connection pool. The connection can be used for the next access request. - -## pooler_maximum_idle_time - -**Parameter description**: Specifies the maximum amount of time that the connections can remain idle in a pool before being removed. After that, the automatic connection clearing mechanism is triggered to reduce the number of connections on each node to the value of **minimum_pool_size**. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This parameter does not take effect in this version. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to *INT_MAX*. The smallest unit is m. - -**Default value**: **1h** (60 minutes) - -## minimum_pool_size - -**Parameter description**: Specifies the minimum number of remaining connections in the pool on each node after the automatic connection clearing is triggered. If this parameter is set to **0**, the automatic connection clearing is disabled. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This parameter does not take effect in this version. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: an integer ranging from 1 to 65535 - -**Default value**: **200** - -## cache_connection - -**Parameter description**: Specifies whether to reclaim the connections of a connection pool. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: Boolean - -- **on** indicates that the connections of a connection pool will be reclaimed. -- **off** indicates that the connections of a connection pool will not be reclaimed. - -**Default value**: **on** +--- +title: Connection Pool Parameters +summary: Connection Pool Parameters +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Connection Pool Parameters + +When a connection pool is used to access the database, database connections are established and then stored in the memory as objects during system running. When you need to access the database, no new connection is established. Instead, an existing idle connection is selected from the connection pool. After you finish accessing the database, the database does not disable the connection but puts it back into the connection pool. The connection can be used for the next access request. + +## cache_connection + +**Parameter description**: Specifies whether to reclaim the connections of a connection pool. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range**: Boolean + +- **on** indicates that the connections of a connection pool will be reclaimed. +- **off** indicates that the connections of a connection pool will not be reclaimed. + +**Default value**: **on** diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/data-import-export.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/data-import-export.md new file mode 100644 index 00000000..da5b7296 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/data-import-export.md @@ -0,0 +1,89 @@ +--- +title: Data Import and Export +summary: Data Import and Export +author: Guo Huan +date: 2024-04-12 +--- + +# Data Import and Export + +## safe_data_path + +**Parameter Description**: Sets the path prefix restrictions for users other than the initial user, currently including restrictions on copy and advanced package paths. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: String (less than 4096 characters) + +**Default Value**: NULL + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **Note**: +> +> - If there are symbolic link files under the `safe_data_path` directory, they will be processed according to the actual file path they point to. If the actual path is not under `safe_data_path`, an error will be reported. +> - If there are hard link files under the `safe_data_path` directory, they can be used normally. For safety reasons, please use hard link files cautiously, avoid creating hard links in the `safe_data_path` directory that point outside the directory, and ensure that the permissions for the `safe_data_path` directory are minimized. + +## enable_copy_server_files + +**Parameter Description**: Whether to enable the permission for copy server-side files. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Boolean + +- on indicates that the permission for copy server-side files is enabled. + +- off indicates that the permission for copy server-side files is not enabled. + +**Default Value**: off + +## enable_delta_store + +**Parameter Description**: To enhance the performance of single-row data import and solve disk redundancy issues for columnar storage, this parameter can be used to select whether to enable the support for columnar storage delta table functionality. When this parameter is enabled, data imported into a columnar storage table will be directed to delta table storage or main table CU storage based on the DELTAROW_THRESHOLD specified in the [CREATE TABLE](../sql-syntax/CREATE-TABLE.md) section when the table is defined. Data will enter the delta table when the data volume is less than DELTAROW_THRESHOLD. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Boolean + +- on indicates that the columnar storage delta table feature is enabled. + +- off indicates that the columnar storage delta table feature is not enabled. + +**Default Value**: off + +## partition_max_cache_size + +**Parameter Description**: To optimize bulk insertions into columnar storage partitioned tables, data is cached before being written to disk in bulk. The `partition_max_cache_size` can specify the size of the data cache area. Setting this value too high will consume more system memory resources; setting it too low will reduce the performance of bulk insertions into columnar storage partitioned tables. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: + +Columnar storage partitioned tables: 4096 to INT_MAX / 2, with the minimum unit being KB. + +**Default Value**: 2GB + +## partition_mem_batch + +**Parameter Description**: To optimize bulk insertions into columnar storage partitioned tables, data is cached before being written to disk in bulk. The `partition_mem_batch` can specify the number of cache batches. Setting this value too high will consume more system memory resources; setting it too low will reduce the performance of bulk insertions into columnar storage partitioned tables. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: + +Columnar storage partitioned tables: 4096 to INT_MAX / 2, with the minimum unit being KB. + +**Default Value**: 2GB + +## raise_errors_if_no_files + +**Parameter Description**: Whether to differentiate between "empty import file record count" and "import file does not exist" during import. If `raise_errors_if_no_files=TRUE`, an error will be thrown when "import file does not exist." + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Boolean + +- on indicates that a distinction should be made between "empty import file record count" and "import file does not exist" during import. + +- off indicates that no distinction should be made between "empty import file record count" and "import file does not exist" during import. + +**Default Value**: off \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/default-settings-of-client-connection/default-settings-of-client-connection.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/default-settings-of-client-connection/default-settings-of-client-connection.md index 69e51bf9..faf55bd5 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/default-settings-of-client-connection/default-settings-of-client-connection.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/default-settings-of-client-connection/default-settings-of-client-connection.md @@ -1,12 +1,12 @@ ---- -title: Default Settings of Client Connection -summary: Default Settings of Client Connection -author: zhang cuiping -date: 2023-04-07 ---- - -# Default Settings of Client Connection - -- **[Statement Behavior](statement-behavior.md)** -- **[Zone and Formatting](zone-and-formatting.md)** +--- +title: Default Settings of Client Connection +summary: Default Settings of Client Connection +author: zhang cuiping +date: 2023-04-07 +--- + +# Default Settings of Client Connection + +- **[Statement Behavior](statement-behavior.md)** +- **[Zone and Formatting](zone-and-formatting.md)** - **[Other Default Parameters](other-default-parameters.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/default-settings-of-client-connection/other-default-parameters.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/default-settings-of-client-connection/other-default-parameters.md index 85311d64..6e6ad8a7 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/default-settings-of-client-connection/other-default-parameters.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/default-settings-of-client-connection/other-default-parameters.md @@ -1,54 +1,54 @@ ---- -title: Other Default Parameters -summary: Other Default Parameters -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Other Default Parameters - -This section describes the default database loading parameters. - -## dynamic_library_path - -**Parameter description**: Specifies the path that the system will search for a shared database file that is dynamically loadable. When a dynamically loadable module needs to be opened and the file name specified in the **CREATE FUNCTION** or **LOAD** command does not have a directory component, the system will search this path for the required file. - -The value of **dynamic_library_path** must be a list of absolute paths separated by colons (:). When the name of a path starts with the special variable \$libdir, the variable will be replaced with the directory in which the module provided by the MogDB is installed. For example: - -```bash -dynamic_library_path = '/usr/local/lib/mogdb:/opt/testgs/lib:$libdir' -``` - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: a string - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> If the value of this parameter is set to an empty character string, the automatic path search is turned off. - -**Default value**: **$libdir** - -## gin_fuzzy_search_limit - -**Parameter description**: Specifies the upper limit of the size of the set returned by GIN indexes. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 0 to 2147483647 - -**Default value**: **0** - -## local_preload_libraries - -**Parameter description**: Specifies one or more shared libraries that are to be preloaded at connection start. If multiple libraries are to be loaded, separate their names with commas (,). All library names are converted to lower case unless double-quoted. - -- Any user can change this option. Therefore, library files that can be loaded are restricted to those saved in the **plugins** subdirectory of the standard library installation directory. It is the database administrator's responsibility to ensure that libraries in this directory are all safe. Entries in **local_preload_libraries** can specify the library directory explicitly, for example, **$libdir/plugins/mylib**, or just specify the library name, for example, **mylib**. (**mylib** is equivalent to **$libdir/plugins/mylib**.) -- Unlike **shared_preload_libraries**, there are no differences in performance between loading a module at session start or doing this during the session. The intent of this feature is to allow debugging or performance-measurement libraries to be loaded into specific sessions without an explicit LOAD command. For example, debugging can be enabled under a given user name by setting this parameter to **ALTER USER SET**. -- If a specified library is not found, the connection attempt will fail. -- Every MogDB-supported library has a "magic block" that is checked to guarantee compatibility. For this reason, non-MogDB-supported libraries cannot be loaded in this way. - -This parameter is a BACKEND parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: a string - -**Default value**: empty +--- +title: Other Default Parameters +summary: Other Default Parameters +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Other Default Parameters + +This section describes the default database loading parameters. + +## dynamic_library_path + +**Parameter description**: Specifies the path that the system will search for a shared database file that is dynamically loadable. When a dynamically loadable module needs to be opened and the file name specified in the **CREATE FUNCTION** or **LOAD** command does not have a directory component, the system will search this path for the required file. + +The value of **dynamic_library_path** must be a list of absolute paths separated by colons (:). When the name of a path starts with the special variable \$libdir, the variable will be replaced with the directory in which the module provided by the MogDB is installed. For example: + +```bash +dynamic_library_path = '/usr/local/lib/mogdb:/opt/testgs/lib:$libdir' +``` + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: a string + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> If the value of this parameter is set to an empty character string, the automatic path search is turned off. + +**Default value**: **$libdir** + +## gin_fuzzy_search_limit + +**Parameter description**: Specifies the upper limit of the size of the set returned by GIN indexes. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 0 to 2147483647 + +**Default value**: **0** + +## local_preload_libraries + +**Parameter description**: Specifies one or more shared libraries that are to be preloaded at connection start. If multiple libraries are to be loaded, separate their names with commas (,). All library names are converted to lower case unless double-quoted. + +- Any user can change this option. Therefore, library files that can be loaded are restricted to those saved in the **plugins** subdirectory of the standard library installation directory. It is the database administrator's responsibility to ensure that libraries in this directory are all safe. Entries in **local_preload_libraries** can specify the library directory explicitly, for example, **$libdir/plugins/mylib**, or just specify the library name, for example, **mylib**. (**mylib** is equivalent to **$libdir/plugins/mylib**.) +- Unlike **shared_preload_libraries**, there are no differences in performance between loading a module at session start or doing this during the session. The intent of this feature is to allow debugging or performance-measurement libraries to be loaded into specific sessions without an explicit LOAD command. For example, debugging can be enabled under a given user name by setting this parameter to **ALTER USER SET**. +- If a specified library is not found, the connection attempt will fail. +- Every MogDB-supported library has a "magic block" that is checked to guarantee compatibility. For this reason, non-MogDB-supported libraries cannot be loaded in this way. + +This parameter is a BACKEND parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: a string + +**Default value**: empty diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/delimiter.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/delimiter.md new file mode 100644 index 00000000..e7b75f4b --- /dev/null +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/delimiter.md @@ -0,0 +1,18 @@ +--- +title: Delimiter +summary: Delimiter +author: Guo Huan +date: 2024-04-11 +--- + +# Delimiter + +## delimiter_name + +**Parameter Description**: Saves the name of a delimiter. + +When the gsql client recognizes a delimiter, it immediately sends one or more SQL statements entered to the server for execution. This usage can be applied when there are multiple statements to input, and a semicolon exists within the statements, allowing you to specify a special symbol as the termination character. This parameter is of the USERSET type. Please refer to the corresponding setting method in the [GUC parameter classification](appendix.md). This parameter is only available for setting in the gsql client and can be configured through the DELIMITER command. + +**Value Range**: String, with a length greater than 0 + +**Default Value**: ";" diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/developer-options.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/developer-options.md index b29c5f1f..adf7cd9b 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/developer-options.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/developer-options.md @@ -24,6 +24,20 @@ It is not recommended to modify the default value of this parameter. If it is se > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif)**NOTICE**: It is not recommended to modify the default value of this parameter. If it is set to on, the system tables may be damaged and the database may not start. +## allow_create_sysobject + +**Parameter description**: Sets whether to allow the creation or modification of objects such as functions, stored procedures, synonyms, aggregate functions, operators, and so on, in the system schema. The system schema here refers to the schema that comes with the database after initialization, but does not include the public schema. The oid of system schema is usually less than 16384. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- on means that initial users and system administrators are allowed to create or modify objects such as functions, stored procedures, synonyms, aggregate functions, etc., and initial users are allowed to create operators in the system mode. Whether other users are allowed to create these objects, please refer to the privilege requirements of the corresponding mode. + +- off means that all users are prohibited from creating or modifying functions, stored procedures, synonyms, aggregate functions, operators and other objects in system mode. + +**Default value**: on + ## debug_assertions **Parameter description:** Specifies whether to enable various assertion checks. This parameter assists in debugging. If you are experiencing strange problems or crashes, set this parameter to **on** to identify programming defects. To use this parameter, the macro USE_ASSERT_CHECKING must be defined (through the configure option **--enable-cassert**) during the MogDB compilation. @@ -281,6 +295,19 @@ This parameter is a USERSET parameter. Set it based on instructions provided in **Default value**: **off** +## enable_csqual_pushdown + +**Parameter description**: Whether to push the filter down for Rough Check when performing a query. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- on indicates that the query should be performed with the filter pushed down to Rough Check. +- off means do not push down the filter and perform Rough Check when querying. + +**Default value**: on + ## string_hash_compatible **Parameter description**: Specifies whether to use the same method to calculate char-type hash values and varchar- or text-type hash values. Based on the setting of this parameter, you can determine whether a redistribution is required when a distribution column is converted from a char-type data distribution into a varchar- or text-type data distribution. @@ -314,4 +341,137 @@ This parameter is a **USERSET** parameter. Set it based on instructions provided **Value range**: Boolean -**Default value**: **off** \ No newline at end of file +**Default value**: **off** + +## ustore_attr + +**Parameter description:** This parameter is mainly used to control the information statistics of the USTORE storage engine table, the type of rollback, the focus of the module (including data, indexes, rollback segments, playback, etc.) run-time data verification, mainly used to assist in research and development problem location. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: String, the parameter value is set in key-value mode, the correspondence between key and value and the description are as follows. If more than one key-value are combined, use “;” as separator, for example: + +ustore_attr='ustore_verify_level=FAST;ustore_verify_module=UPAGE:UBTREE:UNDO:REDO'. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Description:** When ustore_attr sets the parameter value, don't have spaces or other characters before or after the “=” between key and value, for example, ustore_attr='ustore_verify_level = FAST;, the kernel code checking will find that the parameter is not legal, resulting in the parameter setting failure. + +- ustore_verify_level: Controls the level of verification. + + **Value range**: String with case-insensitive values, see the table below for a detailed description. + + **Table 1** Meaning of ustore_verify_level values. + + | parameter value | meaning | + | :-------------- | :----------------------------------------------------------- | + | NONE | NONE means the checksum is turned off, it is recommended to enable it for performance testing scenarios. | + | FAST | FAST denotes fast checksums with fewer checksums and minimal performance impacts | + | COMPLETE | COMPLETE means full checksum, which has the most checksum content and has a larger performance impact | + + **Default value**: FAST + +- ustore_verify_module: Controls the module for verifying. + + **Value range**: string, set one or more of the values UPAGE, UBTREE, UNDO, REDO, or ALL or NULL (case insensitive) individually. When setting more than one of UPAGE, UBTREE, UNDO, REDO, use “:” as a concatenator. For example ustore_verify_module=UPAGE:UBTREE:UNDO:REDO. + + **Table 2** Meaning of ustore_verify_module values. + + | parameter value | meaning | + | :-------------- | :----------------------------------------------------------- | + | UPAGE | Enables data page validation. | + | UBTREE | Enables UBTREE index checking. | + | UNDO | Enables rollback segment data validation. | + | REDO | Enables data page validation for the REDO process. | + | ROACH | Enables data page validation for ROACH backups. | + | ALL | Enables data page validation for UPAGE, UBTREE, UNDO, and REDO modules. | + | NULL | Disables validation of UPAGE, UBTREE, UNDO, and REDO module data at the same time. | + + **Default value**: UPAGE:UBTREE:UNDO + +- index_trace_level: Controls enable index tracking and controls the print level. When enabled, information about eligible index tuples is printed according to different print levels during index scanning. + + **Value range**: String with values described in the table below. + + **Default value**: NO + + **Table 3** Index_trace_level values. + + | parameter value | meaning | + | :-------------- | :----------------------------------------------------------- | + | NO | No additional information is printed. | + | NORMAL | Prints information about the **visible index tuple**, including: the index page number where the current index tuple is located, and the offset. The current tuple status. TID and partOid of the current tuple. xmin and xmax information of the current tuple. Current tuple content (if enable_log_tuple is enabled). | + | VISIBILITY | On top of NORMAL, prints additional information about index tuples that do not pass the visibility check, and marks them as visible or not. | + | SHOWHIKEY | On the basis of VISIBILITY, try to print information about the HIKEY tuple on the page. | + | ALL | Prints information about all tuples on the scanned index page. | + +- enable_log_tuple: Controls whether printing log-level alert messages allows the contents of related tuples to be printed out at the same time for problem troubleshooting and localization. + + **Value range**: Boolean + + **Default value**: off + + Note: This parameter is deprecated + +- enable_ustore_sync_rollback: Controls whether synchronization rollback is enabled for the USTORE table. + + **Value range**: Boolean + + **Default value**: true + +- enable_ustore_async_rollback: Controls whether asynchronous rollback is enabled for the USTORE table. + + **Value range**: Boolean + + **Default value**: true + +- enable_ustore_page_rollback: Controls whether page rollback is enabled for the USTORE table. + + **Value range**: Boolean + + **Default value**: true + +- enable_ustore_partial_seqscan: Whether to enable partial scanning for the USTORE table. + + **Value range**: Boolean + + **Default value**: false + +- enable_candidate_buf_usage_count: Enables or disables cache usage counting statistics. + + **Value range**: Boolean + + **Default value**: false + +- ustats_tracker_naptime: Controls the USTORE table statistics information period. + + **Value range**: 1~INT_MAX/1000 + + **Default value**: 20, in seconds + +- umax_search_length_for_prune: Controls the maximum depth of the USTORE table prune operation search. + + **Value range**: 1~INT_MAX/1000 + + **Default value**: 10, in times + +- ustore_unit_test: R&D White Box Testing Specifying Test Parameters + + **Value range**: String + + **Default value**: null + +**Default value**: empty string + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-warning.gif) **NOTICE**: +> +> - The ustore_attr parameter should be set with caution and is recommended to be modified with the assistance of an engineer. +> - This parameter only applies to USTORE storage engine tables on a centralized or MogDB standalone. + +## ifnull_all_return_text + +**Parameter description**: When enabled, both ifnull and coalesce will return text to ensure compatibility. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +**Default value**: off \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/error-reporting-and-logging/error-reporting-and-logging.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/error-reporting-and-logging/error-reporting-and-logging.md index 39f4d9db..8a5ebee6 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/error-reporting-and-logging/error-reporting-and-logging.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/error-reporting-and-logging/error-reporting-and-logging.md @@ -1,13 +1,13 @@ ---- -title: Error Reporting and Logging -summary: Error Reporting and Logging -author: zhang cuiping -date: 2023-04-07 ---- - -# Error Reporting and Logging - -- **[Logging Destination](logging-destination.md)** -- **[Logging Time](logging-time.md)** -- **[Logging Content](logging-content.md)** +--- +title: Error Reporting and Logging +summary: Error Reporting and Logging +author: zhang cuiping +date: 2023-04-07 +--- + +# Error Reporting and Logging + +- **[Logging Destination](logging-destination.md)** +- **[Logging Time](logging-time.md)** +- **[Logging Content](logging-content.md)** - **[Using CSV Log Output](using-csv-log-output.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md index c98dee1d..d401e4e6 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md @@ -304,6 +304,16 @@ MogDB=# show logging_module; **Dependency**: The value of this parameter depends on the settings of **log_min_messages**. +## max_error_count + +**Parameter description**: Used to set the maximum total number of error messages to display. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: 0~65535 + +**Default value**: 64 + ## opfusion_debug_mode **Parameter description**: Checks whether simple queries are optimized for debugging. If this parameter is set to **log**, you can view the specific reasons why queries are not optimized in the execution plans of database nodes. @@ -332,3 +342,16 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T - **off/false** indicates that output of VACUUM-related logs is disallowed. **Default value**: **off** + +## debug_select_o + +**Parameter description**: This parameter controls whether or not to output the log of the select transaction autocommit function, which makes it easier to locate the problem. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on/true means this function is enabled. +- off/false indicates that this feature is disabled. + +**Default value**: **off** diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/error-reporting-and-logging/using-csv-log-output.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/error-reporting-and-logging/using-csv-log-output.md index 54d6573f..504075b1 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/error-reporting-and-logging/using-csv-log-output.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/error-reporting-and-logging/using-csv-log-output.md @@ -1,88 +1,88 @@ ---- -title: Using CSV Log Output -summary: Using CSV Log Output -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Using CSV Log Output - -**Prerequisites** - -- The [log_destination](logging-destination.md#log_destination) parameter is set to **csvlog**. -- The [logging_collector](logging-destination.md#logging_collector) parameter is set to **on**. - -**Definition of csvlog** - -Log lines are emitted in comma separated values (CSV) format. - -An example table definition for storing CSV-format log output is shown as follows: - -``` -CREATE TABLE mogdb_log -( -log_time timestamp(3) with time zone, -node_name text, -user_name text, -database_name text, -process_id bigint, -connection_from text, -"session_id" text, -session_line_num bigint, -command_tag text, -session_start_time timestamp with time zone, -virtual_transaction_id text, -transaction_id bigint, -query_id bigint, -module text, -error_severity text, -sql_state_code text, -message text, -detail text, -hint text, -internal_query text, -internal_query_pos integer, -context text, -query text, -query_pos integer, -location text, -application_name text -); -``` - -For details, see [Table 1](#meaning). - -**Table 1** Meaning of each csvlog field - -| Column | Description | Column | Description | -| :--------------------- | :------------------------------ | :----------------- | :----------------------------------------------------------- | -| log_time | Timestamp in milliseconds | module | Module to which the log belongs. | -| node_name | Node name | error_severity | ERRORSTATE code | -| user_name | Username | sql_state_code | SQLSTATE code | -| database_name | Database name | message | Error message | -| process_id | Process ID | detail | Detailed error message | -| connection_from | Port number of the client host | hint | Prompt message | -| session_id | Session ID | internal_query | Internal query (This field is used to query the information leading to errors if any.) | -| session_line_num | Number of lines in each session | internal_query_pos | Pointer for an internal query | -| command_tag | Command tag | context | Environment | -| session_start_time | Start time of a session | query | Character count at the position where errors occur | -| virtual_transaction_id | Regular transaction | query_pos | Pointer at the position where errors occur | -| transaction_id | Transaction ID | location | Position where errors occur in the MogDB source code if **log_error_verbosity** is set to **verbose** | -| query_id | Query ID | application_name | Application name | - -Run the following command to import a log file to this table: - -``` -COPY mogdb_log FROM '/opt/data/pg_log/logfile.csv' WITH csv; -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> The log name (**logfile.csv**) here needs to be replaced with the name of a log generated. - -**Simplifying Input** - -Simplify importing CSV log files by performing the following operations: - -- Set [log_filename](logging-destination.md#log_filename) and [log_rotation_age](logging-destination.md#log_rotation_age) to provide a consistent, predictable naming solution for log files. By doing this, you can predict when an individual log file is complete and ready to be imported. -- Set [log_rotation_size](logging-destination.md#log_rotation_size) to **0** to disable size-based log rollback, as it makes the log file name difficult to predict. -- Set [log_truncate_on_rotation](logging-destination.md#log_truncate_on_rotation) to **on** so that old log data cannot be mixed with the new one in the same file. +--- +title: Using CSV Log Output +summary: Using CSV Log Output +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Using CSV Log Output + +**Prerequisites** + +- The [log_destination](logging-destination.md#log_destination) parameter is set to **csvlog**. +- The [logging_collector](logging-destination.md#logging_collector) parameter is set to **on**. + +**Definition of csvlog** + +Log lines are emitted in comma separated values (CSV) format. + +An example table definition for storing CSV-format log output is shown as follows: + +``` +CREATE TABLE mogdb_log +( +log_time timestamp(3) with time zone, +node_name text, +user_name text, +database_name text, +process_id bigint, +connection_from text, +"session_id" text, +session_line_num bigint, +command_tag text, +session_start_time timestamp with time zone, +virtual_transaction_id text, +transaction_id bigint, +query_id bigint, +module text, +error_severity text, +sql_state_code text, +message text, +detail text, +hint text, +internal_query text, +internal_query_pos integer, +context text, +query text, +query_pos integer, +location text, +application_name text +); +``` + +For details, see [Table 1](#meaning). + +**Table 1** Meaning of each csvlog field + +| Column | Description | Column | Description | +| :--------------------- | :------------------------------ | :----------------- | :----------------------------------------------------------- | +| log_time | Timestamp in milliseconds | module | Module to which the log belongs. | +| node_name | Node name | error_severity | ERRORSTATE code | +| user_name | Username | sql_state_code | SQLSTATE code | +| database_name | Database name | message | Error message | +| process_id | Process ID | detail | Detailed error message | +| connection_from | Port number of the client host | hint | Prompt message | +| session_id | Session ID | internal_query | Internal query (This field is used to query the information leading to errors if any.) | +| session_line_num | Number of lines in each session | internal_query_pos | Pointer for an internal query | +| command_tag | Command tag | context | Environment | +| session_start_time | Start time of a session | query | Character count at the position where errors occur | +| virtual_transaction_id | Regular transaction | query_pos | Pointer at the position where errors occur | +| transaction_id | Transaction ID | location | Position where errors occur in the MogDB source code if **log_error_verbosity** is set to **verbose** | +| query_id | Query ID | application_name | Application name | + +Run the following command to import a log file to this table: + +``` +COPY mogdb_log FROM '/opt/data/pg_log/logfile.csv' WITH csv; +``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> The log name (**logfile.csv**) here needs to be replaced with the name of a log generated. + +**Simplifying Input** + +Simplify importing CSV log files by performing the following operations: + +- Set [log_filename](logging-destination.md#log_filename) and [log_rotation_age](logging-destination.md#log_rotation_age) to provide a consistent, predictable naming solution for log files. By doing this, you can predict when an individual log file is complete and ready to be imported. +- Set [log_rotation_size](logging-destination.md#log_rotation_size) to **0** to disable size-based log rollback, as it makes the log file name difficult to predict. +- Set [log_truncate_on_rotation](logging-destination.md#log_truncate_on_rotation) to **on** so that old log data cannot be mixed with the new one in the same file. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/fault-tolerance.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/fault-tolerance.md index c86b5f31..745a32bb 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/fault-tolerance.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/fault-tolerance.md @@ -71,16 +71,6 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided **Default value**: **8kB** -## max_cn_temp_file_size - -**Parameter description**: Specifies the maximum number of temporary files that can be used by the the primary node of the database during automatic SQL statement retries. The value **0** indicates that no temporary file is used. - -This parameter is a **SIGHUP** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to 10485760. The unit is kB. - -**Default value**: **5 GB** - ## retry_ecode_list **Parameter description**: Specifies the list of SQL error types that support automatic retry. @@ -117,3 +107,29 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided - **authentication** indicates that the remote read function is enabled and certificate authentication is required. **Default value**: **authentication** + +## data_sync_failed_ignore + +**Parameter description**: Controls whether pagewriter discards items to be sync'd when it fails to perform fsync. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- on means that fsync failure will cause core to stop the database. +- off means that fsync failure will be retried and will not core, you can stop the database normally. + +**Default value**: off + +## damage_page_ignore + +**Parameter description**: Controls whether to ignore pages that generate corruption during redo to force the database to start. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- on means ignore the damaged page and force the database to start. +- off means that the damaged page is not ignored and the database fails to start. + +**Default value**: off diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/flashback.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/flashback.md index 1a360806..d3593cc1 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/flashback.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/flashback.md @@ -1,81 +1,66 @@ ---- -title: Flashback -summary: Flashback -author: Zhang Cuiping -date: 2021-11-08 ---- - -# Flashback - -This section describes parameters related to the flashback function. - -## enable_recyclebin - -**Parameter description**: Specifies whether the recycle bin is enabled or disabled in real time. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: Boolean - -**Default value**: **off** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-warning.gif) **Caution:** The recycle bin does not support Astore and supports only Ustore. - -## timecapsule_mode - -**Parameter description**: Specifies whether to enable the flashback function. - -This parameter is a **SIGHUP** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: enumerated values - -- **none** indicates that the flashback function is disabled. -- **heap** indicates that only the flashback function of the Heap engine is enabled. -- **ustore** indicates that only the flashback function of the UStore engine is enabled. -- **all** indicates that the flashback function is enabled for both the Heap and UStore engines. - -**Default value**: **none** - -## recyclebin_retention_time - -**Parameter description**: Specifies the retention period of objects in the recycle bin. The objects will be automatically deleted after the retention period expires. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from 1 to 2147483647. The unit is s. - -Default value: **15 min** (900s) - -## version_retention_age - -**Parameter description**: Specifies the number of transactions retained in the old version. If the number of transactions exceeds the value of this parameter, the old version will be recycled and cleared. - -This parameter is a **SIGHUP** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to 576460752303423487. **0** means no delay. - -**Default value**: **0** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** This parameter has been deprecated. - -## vacuum_defer_cleanup_age - -**Parameter description**: Specifies the number of transactions by which **VACUUM** will defer the cleanup of invalid row-store table records, so that **VACUUM** and **VACUUM FULL** do not clean up deleted tuples immediately. You can also set this parameter to configure the retention period of the flashback function in the old version. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to 1000000. **0** means no delay. The value range needs to be extended to 100 million. - -**Default value**: **0** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** This parameter can be ignored when you use the Ustore engine to flash back. It serves the Astore flashback function of the earlier version and has other functions. The flashback function is not used in this version. - -## undo_retention_time - -**Parameter description**: Specifies the period for retaining undo logs of earlier versions. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to 2147483647. The unit is second. - +--- +title: Flashback +summary: Flashback +author: Zhang Cuiping +date: 2021-11-08 +--- + +# Flashback + +This section describes parameters related to the flashback function. + +## enable_recyclebin + +**Parameter description**: Specifies whether the recycle bin is enabled or disabled in real time. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +**Default value**: **off** + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-warning.gif) **Caution:** The recycle bin does not support Astore and supports only Ustore. + +## recyclebin_retention_time + +**Parameter description**: Specifies the retention period of objects in the recycle bin. The objects will be automatically deleted after the retention period expires. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from 1 to 2147483647. The unit is s. + +Default value: **15 min** (900s) + +## version_retention_age + +**Parameter description**: Specifies the number of transactions retained in the old version. If the number of transactions exceeds the value of this parameter, the old version will be recycled and cleared. + +This parameter is a **SIGHUP** parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from 0 to 576460752303423487. **0** means no delay. + +**Default value**: **0** + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** This parameter has been deprecated. + +## vacuum_defer_cleanup_age + +**Parameter description**: Specifies the number of transactions by which **VACUUM** will defer the cleanup of invalid row-store table records, so that **VACUUM** and **VACUUM FULL** do not clean up deleted tuples immediately. You can also set this parameter to configure the retention period of the flashback function in the old version. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from 0 to 1000000. **0** means no delay. The value range needs to be extended to 100 million. + +**Default value**: **0** + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** This parameter can be ignored when you use the Ustore engine to flash back. It serves the Astore flashback function of the earlier version and has other functions. The flashback function is not used in this version. + +## undo_retention_time + +**Parameter description**: Specifies the period for retaining undo logs of earlier versions. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from 0 to 2147483647. The unit is second. + **Default value**: **0** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/global-temporary-table.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/global-temporary-table.md index 8153aa7a..d8019277 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/global-temporary-table.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/global-temporary-table.md @@ -1,31 +1,44 @@ ---- -title: Global Temporary Table -summary: Global Temporary Table -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Global Temporary Table - -## max_active_global_temporary_table - -**Parameter description**: Specifies whether global temporary tables can be created. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to 1000000 - -- **0**: The global temporary table function is disabled. -- > 0: The global temporary table function is enabled. - -**Default value**: **1000** - -## vacuum_gtt_defer_check_age - -**Parameter description**: Checks the differences between the global temporary table relfrozenxid and the ordinary table after VACUUM is executed. WARNING is generated if the difference value exceeds the specified parameter value. Use the default value for this parameter. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to 1000000 - -**Default value**: **10000** +--- +title: Global Temporary Table +summary: Global Temporary Table +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Global Temporary Table + +## max_active_global_temporary_table + +**Parameter description**: Specifies whether global temporary tables can be created. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from 0 to 1000000 + +- **0**: The global temporary table function is disabled. +- > 0: The global temporary table function is enabled. + +**Default value**: **1000** + +## vacuum_gtt_defer_check_age + +**Parameter description**: Checks the differences between the global temporary table relfrozenxid and the ordinary table after VACUUM is executed. WARNING is generated if the difference value exceeds the specified parameter value. Use the default value for this parameter. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: an integer ranging from 0 to 1000000 + +**Default value**: **10000** + +## enable_gtt_concurrent_truncate + +**Parameter description**: Whether concurrent execution of global temporary table truncate table and DML is supported, and concurrent execution of global temporary table truncate table and truncate table. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- on/true means the above operations are supported. +- off/false means that the above operations are not supported. + +**Default value**: on diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/guc-parameter-list.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/guc-parameter-list.md index a3fb1fd8..297f8bcc 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/guc-parameter-list.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/guc-parameter-list.md @@ -7,648 +7,806 @@ date: 2022-05-26 # GUC Parameter List -| GUC Parameter Index | -| ------------------------------------------------------------ | -| [acce_min_datasize_per_thread](reserved-parameters.md) | -| [advance_xlog_file_num](developer-options.md#advance_xlog_file_num) | -| [alarm_report_interval](alarm-detection.md#alarm_report_interval) | -| [allocate_mem_cost](../../reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md#allocate_mem_cost) | -| [allow_concurrent_tuple_update](MogDB-transaction.md#allow_concurrent_tuple_update) | -| [allow_system_table_mods](developer-options.md#allow_system_table_mods) | -| [analysis_options](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#analysis_options) | -| [application_name](../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#application_name) | -| [archive_command](../../reference-guide/guc-parameters/write-ahead-log/archiving.md#archive_command) | -| [archive_dest](../../reference-guide/guc-parameters/write-ahead-log/archiving.md#archive_dest) | -| [archive_mode](../../reference-guide/guc-parameters/write-ahead-log/archiving.md#archive_mode) | -| [archive_timeout](../../reference-guide/guc-parameters/write-ahead-log/archiving.md#archive_timeout) | -| [array_nulls](../../reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md#array_nulls) | -| [asp_flush_mode](system-performance-snapshot.md#asp_flush_mode) | -| [asp_flush_rate](system-performance-snapshot.md#asp_flush_rate) | -| [asp_log_filename](system-performance-snapshot.md#asp_log_filename) | -| [asp_retention_days](system-performance-snapshot.md#asp_retention_days) | -| [asp_sample_interval](system-performance-snapshot.md#asp_sample_interval) | -| [asp_sample_num](system-performance-snapshot.md#asp_sample_num) | -| [audit_copy_exec](../../reference-guide/guc-parameters/auditing/operation-audit.md#audit_copy_exec) | -| [audit_data_format](../../reference-guide/guc-parameters/auditing/audit-switch.md#audit_data_format) | -| [audit_database_process](../../reference-guide/guc-parameters/auditing/user-and-permission-audit.md#audit_database_process) | -| [audit_directory](../../reference-guide/guc-parameters/auditing/audit-switch.md#audit_directory) | -| [audit_dml_state](../../reference-guide/guc-parameters/auditing/operation-audit.md#audit_dml_state) | -| [audit_dml_state_select](../../reference-guide/guc-parameters/auditing/operation-audit.md#audit_dml_state_select) | -| [audit_enabled](../../reference-guide/guc-parameters/auditing/audit-switch.md#audit_enabled) | -| [audit_file_remain_threshold](../../reference-guide/guc-parameters/auditing/audit-switch.md#audit_file_remain_threshold) | -| [audit_file_remain_time](../../reference-guide/guc-parameters/auditing/audit-switch.md#audit_file_remain_time) | -| [audit_function_exec](../../reference-guide/guc-parameters/auditing/operation-audit.md#audit_function_exec) | -| [audit_grant_revoke](../../reference-guide/guc-parameters/auditing/user-and-permission-audit.md#audit_grant_revoke) | -| [audit_login_logout](../../reference-guide/guc-parameters/auditing/user-and-permission-audit.md#audit_login_logout) | -| [audit_resource_policy](../../reference-guide/guc-parameters/auditing/audit-switch.md#audit_resource_policy) | -| [audit_rotation_interval](../../reference-guide/guc-parameters/auditing/audit-switch.md#audit_rotation_interval) | -| [audit_rotation_size](../../reference-guide/guc-parameters/auditing/audit-switch.md#audit_rotation_size) | -| [audit_set_parameter](../../reference-guide/guc-parameters/auditing/operation-audit.md#audit_set_parameter) | -| [audit_space_limit](../../reference-guide/guc-parameters/auditing/audit-switch.md#audit_space_limit) | -| [audit_system_object](../../reference-guide/guc-parameters/auditing/operation-audit.md#audit_system_object) | -| [audit_thread_num](../../reference-guide/guc-parameters/auditing/audit-switch.md#audit_thread_num) | -| [audit_user_locked](../../reference-guide/guc-parameters/auditing/user-and-permission-audit.md#audit_user_locked) | -| [audit_user_violation](../../reference-guide/guc-parameters/auditing/user-and-permission-audit.md#audit_user_violation) | -| [audit_xid_info](../../reference-guide/guc-parameters/auditing/operation-audit.md#audit_xid_info) | -| [auth_iteration_count](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#auth_iteration_count) | -| [authentication_timeout](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#authentication_timeout) | -| [auto_explain_level](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#auto_explain_level) | -| [autoanalyze](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#autoanalyze) | -| [autoanalyze_timeout](automatic-vacuuming.md#autoanalyze_timeout) | -| [autovacuum](automatic-vacuuming.md#autovacuum) | -| [autovacuum_analyze_scale_factor](automatic-vacuuming.md#autovacuum_analyze_scale_factor) | -| [autovacuum_analyze_threshold](automatic-vacuuming.md#autovacuum_analyze_threshold) | -| [autovacuum_freeze_max_age](automatic-vacuuming.md#autovacuum_freeze_max_age) | -| [autovacuum_io_limits](automatic-vacuuming.md#autovacuum_io_limits) | -| [autovacuum_max_workers](automatic-vacuuming.md#autovacuum_max_workers) | -| [autovacuum_mode](automatic-vacuuming.md#autovacuum_mode) | -| [autovacuum_naptime](automatic-vacuuming.md#autovacuum_naptime) | -| [autovacuum_vacuum_cost_delay](automatic-vacuuming.md#autovacuum_vacuum_cost_delay) | -| [autovacuum_vacuum_cost_limit](automatic-vacuuming.md#autovacuum_vacuum_cost_limit) | -| [autovacuum_vacuum_scale_factor](automatic-vacuuming.md#autovacuum_vacuum_scale_factor) | -| [autovacuum_vacuum_threshold](automatic-vacuuming.md#autovacuum_vacuum_threshold) | -| [available_zone](../../reference-guide/guc-parameters/ha-replication/sending-server.md#available_zone) | -| [backend_flush_after](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#backend_flush_after) | -| [backend_version](reserved-parameters.md) | -| [backslash_quote](../../reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md#backslash_quote) | -| [backtrace_min_messages](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-time.md#backtrace_min_messages) | -| [backwrite_quantity](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#backwrite_quantity) | -| [basebackup_timeout](miscellaneous-parameters.md#basebackup_timeout) | -| [bbox_blanklist_items](load-management.md#bbox_blanklist_items) | -| [bbox_dump_count](load-management.md#bbox_dump_count) | -| [bbox_dump_path](load-management.md#bbox_dump_path) | -| [behavior_compat_options](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options) | -| [bgwriter_delay](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#bgwriter_delay) | -| [bgwriter_flush_after](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#bgwriter_flush_after) | -| [bgwriter_lru_maxpages](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#bgwriter_lru_maxpages) | -| [bgwriter_lru_multiplier](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#bgwriter_lru_multiplier) | -| [bgwriter_thread_num](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#bgwriter_thread_num) | -| [block_encryption_mode](../../reference-guide/guc-parameters/security-configuration.md#block_encryption_mode) | -| [block_size](miscellaneous-parameters.md#block_size) | -| [bulk_read_ring_size](../../reference-guide/guc-parameters/resource-consumption/memory.md#bulk_read_ring_size) | -| [bulk_write_ring_size](../../reference-guide/guc-parameters/resource-consumption/memory.md#bulk_write_ring_size) | -| [bytea_output](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#bytea_output) | -| [cache_connection](connection-pool-parameters.md#cache_connection) | -| [candidate_buf_percent_target](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#candidate_buf_percent_target) | -| [catchup2normal_wait_time](../../reference-guide/guc-parameters/ha-replication/primary-server.md#catchup2normal_wait_time) | -| [cgroup_name](load-management.md#cgroup_name) | -| [check_function_bodies](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#check_function_bodies) | -| [check_implicit_conversions](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#check_implicit_conversions) | -| [checkpoint_completion_target](../../reference-guide/guc-parameters/write-ahead-log/checkpoints.md#checkpoint_completion_target) | -| [checkpoint_flush_after](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#checkpoint_flush_after) | -| [checkpoint_segments](../../reference-guide/guc-parameters/write-ahead-log/checkpoints.md#checkpoint_segments) | -| [checkpoint_timeout](../../reference-guide/guc-parameters/write-ahead-log/checkpoints.md#checkpoint_timeout) | -| [checkpoint_wait_timeout](../../reference-guide/guc-parameters/write-ahead-log/checkpoints.md#checkpoint_wait_timeout) | -| [checkpoint_warning](../../reference-guide/guc-parameters/write-ahead-log/checkpoints.md#checkpoint_warning) | -| [client_encoding](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md#client_encoding) | -| [client_min_messages](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-time.md#client_min_messages) | -| [cn_send_buffer_size](fault-tolerance.md#cn_send_buffer_size) | -| [codegen_cost_threshold](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#codegen_cost_threshold) | -| [codegen_strategy](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#codegen_strategy) | -| [comm_proxy_attr](../../reference-guide/guc-parameters/connection-and-authentication/communication-library-parameters.md#comm_proxy_attr) | -| [commit_delay](../../reference-guide/guc-parameters/write-ahead-log/settings.md#commit_delay) | -| [commit_siblings](../../reference-guide/guc-parameters/write-ahead-log/settings.md#commit_siblings) | -| [config_file](file-location.md#config_file) | -| [connection_alarm_rate](alarm-detection.md#connection_alarm_rate) | -| [connection_info](../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#connection_info) | -| [constraint_exclusion](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#constraint_exclusion) | -| [convert_string_to_digit](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#convert_string_to_digit) | -| [cost_param](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#cost_param) | -| [cost_weight_index](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#cost_weight_index) | -| [cpu_collect_timer](load-management.md#cpu_collect_timer) | -| [cpu_index_tuple_cost](../../reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md#cpu_index_tuple_cost) | -| [cpu_operator_cost](../../reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md#cpu_operator_cost) | -| [cpu_tuple_cost](../../reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md#cpu_tuple_cost) | -| [cross_cluster_replconninfoN](../../reference-guide/guc-parameters/ha-replication/sending-server.md#cross_cluster_replconninfon) | -| [cstore_backwrite_max_threshold](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#cstore_backwrite_max_threshold) | -| [cstore_backwrite_quantity](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#cstore_backwrite_quantity) | -| [cstore_buffers](../../reference-guide/guc-parameters/resource-consumption/memory.md#cstore_buffers) | -| [cstore_insert_mode](reserved-parameters.md) | -| [cstore_prefetch_quantity](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#cstore_prefetch_quantity) | -| [current_schema](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#current_schema) | -| [cursor_tuple_fraction](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#cursor_tuple_fraction) | -| [data_directory](file-location.md#data_directory) | -| [data_replicate_buffer_size](../../reference-guide/guc-parameters/ha-replication/primary-server.md#data_replicate_buffer_size) | -| [data_sync_retry](fault-tolerance.md#data_sync_retry) | -| [datanode_heartbeat_interval](miscellaneous-parameters.md#datanode_heartbeat_interval) | -| [DateStyle](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md#datestyle) | -| [db4ai_snapshot_mode](AI-features.md#db4ai_snapshot_mode) | -| [db4ai_snapshot_version_delimiter](AI-features.md#db4ai_snapshot_version_delimiter) | -| [db4ai_snapshot_version_separator](AI-features.md#db4ai_snapshot_version_separator) | -| [dcf_compress_algorithm](DCF-parameters-settings.md#dcf_compress_algorithm) | -| [dcf_compress_level](DCF-parameters-settings.md#dcf_compress_level) | -| [dcf_config](DCF-parameters-settings.md#dcf_config) | -| [dcf_connect_timeout](DCF-parameters-settings.md#dcf_connect_timeout) | -| [dcf_data_path](DCF-parameters-settings.md#dcf_data_path) | -| [dcf_election_switch_threshold](DCF-parameters-settings.md#dcf_election_switch_threshold) | -| [dcf_election_timeout](DCF-parameters-settings.md#dcf_election_timeout) | -| [dcf_enable_auto_election_priority](DCF-parameters-settings.md#dcf_enable_auto_election_priority) | -| [dcf_flow_control_cpu_threshold](DCF-parameters-settings.md#dcf_flow_control_cpu_threshold) | -| [dcf_flow_control_disk_rawait_threshold](DCF-parameters-settings.md#dcf_flow_control_disk_rawait_threshold) | -| [dcf_flow_control_net_queue_message_num_threshold](DCF-parameters-settings.md#dcf_flow_control_net_queue_message_num_threshold) | -| [dcf_log_backup_file_count](DCF-parameters-settings.md#dcf_log_backup_file_count) | -| [dcf_log_file_permission](DCF-parameters-settings.md#dcf_log_file_permission) | -| [dcf_log_level](DCF-parameters-settings.md#dcf_log_level) | -| [dcf_log_path](DCF-parameters-settings.md#dcf_log_path) | -| [dcf_log_path_permission](DCF-parameters-settings.md#dcf_log_path_permission) | -| [dcf_majority_groups](../../reference-guide/guc-parameters/DCF-parameters-settings.md#dcf_majority_groups) | -| [dcf_max_log_file_size](DCF-parameters-settings.md#dcf_max_log_file_size) | -| [dcf_max_workers](DCF-parameters-settings.md#dcf_max_workers) | -| [dcf_mec_agent_thread_num](DCF-parameters-settings.md#dcf_mec_agent_thread_num) | -| [dcf_mec_batch_size](DCF-parameters-settings.md#dcf_mec_batch_size) | -| [dcf_mec_channel_num](DCF-parameters-settings.md#dcf_mec_channel_num) | -| [dcf_mec_fragment_size](DCF-parameters-settings.md#dcf_mec_fragment_size) | -| [dcf_mec_pool_max_size](DCF-parameters-settings.md#dcf_mec_pool_max_size) | -| [dcf_mec_reactor_thread_num](DCF-parameters-settings.md#dcf_mec_reactor_thread_num) | -| [dcf_mem_pool_init_size](DCF-parameters-settings.md#dcf_mem_pool_init_size) | -| [dcf_mem_pool_max_size](DCF-parameters-settings.md#dcf_mem_pool_max_size) | -| [dcf_node_id](DCF-parameters-settings.md#dcf_node_id) | -| [dcf_rep_append_thread_num](DCF-parameters-settings.md#dcf_rep_append_thread_num) | -| [dcf_run_mode](DCF-parameters-settings.md#dcf_run_mode) | -| [dcf_socket_timeout](DCF-parameters-settings.md#dcf_socket_timeout) | -| [dcf_ssl](DCF-parameters-settings.md#dcf_ssl) | -| [dcf_stg_pool_init_size](DCF-parameters-settings.md#dcf_stg_pool_init_size) | -| [dcf_stg_pool_max_size](DCF-parameters-settings.md#dcf_stg_pool_max_size) | -| [dcf_truncate_threshold](DCF-parameters-settings.md#dcf_truncate_threshold) | -| [deadlock_timeout](lock-management.md#deadlock_timeout) | -| [debug_assertions](developer-options.md#debug_assertions) | -| [debug_pretty_print](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#debug_pretty_print) | -| [debug_print_parse](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#debug_print_parse) | -| [debug_print_plan](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#debug_print_plan) | -| [debug_print_rewritten](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#debug_print_rewritten) | -| [default_limit_rows](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#default_limit_rows) | -| [default_statistics_target](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#default_statistics_target) | -| [default_tablespace](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#default_tablespace) | -| [default_text_search_config](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md#default_text_search_config) | -| [default_transaction_deferrable](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#default_transaction_deferrable) | -| [default_transaction_isolation](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#default_transaction_isolation) | -| [default_transaction_read_only](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#default_transaction_read_only) | -| [defer_csn_cleanup_time](automatic-vacuuming.md#defer_csn_cleanup_time) | -| [dirty_page_percent_max](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#dirty_page_percent_max) | -| [disable_memory_protect](load-management.md#disable_memory_protect) | -| [dw_file_num](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#dw_file_num) | -| [dw_file_size](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#dw_file_size) | -| [dynamic_library_path](../../reference-guide/guc-parameters/default-settings-of-client-connection/other-default-parameters.md#dynamic_library_path) | -| [effective_cache_size](../../reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md#effective_cache_size) | -| [effective_io_concurrency](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#effective_io_concurrency) | -| [elastic_search_ip_addr](security-configuration.md#elastic_search_ip_addr) | -| [emit_illegal_bind_chars](miscellaneous-parameters.md#emit_illegal_bind_chars) | -| [enable_absolute_tablespace](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_absolute_tablespace) | -| [enable_access_server_directory](../../reference-guide/guc-parameters/auditing/operation-audit.md#enable_access_server_directory) | -| [enable_adio_debug](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#enable_adio_debug) | -| [enable_adio_function](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#enable_adio_function) | -| [enable_alarm](alarm-detection.md#enable_alarm) | -| [enable_analyze_check](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_analyze_check) | -| [enable_asp](system-performance-snapshot.md#enable_asp) | -| [enable_auto_clean_unique_sql](query.md#enable_auto_clean_unique_sql) | -| [enable_auto_explain](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_auto_explain) | -| [enable_batch_dispatch](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#enable_batch_dispatch) | -| [enable_bbox_dump](load-management.md#enable_bbox_dump) | -| [enable_beta_features](../../reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md#enable_beta_features) | -| [enable_beta_opfusion](developer-options.md#enable_beta_opfusion) | -| [enable_bitmapscan](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_bitmapscan) | -| [enable_bloom_filter](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_bloom_filter) | -| [enable_cbm_tracking](backup-and-restoration-parameter.md#enable_cbm_tracking) | -| [enable_change_hjcost](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_change_hjcost) | -| [enable_codegen](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_codegen) | -| [enable_codegen_print](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_codegen_print) | -| [enable_compress_spill](developer-options.md#enable_compress_spill) | -| [enable_consider_usecount](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#enable_consider_usecount) | -| [enable_constraint_optimization](reserved-parameters.md) | -| [enable_data_replicate](../../reference-guide/guc-parameters/ha-replication/primary-server.md#enable_data_replicate) | -| [enable_debug_vacuum](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#enable_debug_vacuum) | -| [enable_default_cfunc_libpath](file-location.md#enable_default_cfunc_libpath) | -| [enable_default_ustore_table](miscellaneous-parameters.md#enable_default_ustore_table) | -| [enable_defer_calculate_snapshot](MogDB-transaction.md#enable_defer_calculate_snapshot) | -| [enable_double_write](../../reference-guide/guc-parameters/write-ahead-log/checkpoints.md#enable_double_write) | -| [enable_early_free](../../reference-guide/guc-parameters/resource-consumption/memory.md#enable_early_free) | -| [enable_extrapolation_stats](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_extrapolation_stats) | -| [enable_fast_allocate](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#enable_fast_allocate) | -| [enable_fast_numeric](developer-options.md#enable_fast_numeric) | -| [enable_ffic_log](load-management.md#enable_ffic_log) | -| [enable_force_vector_engine](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_force_vector_engine) | -| [enable_functional_dependency](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_functional_dependency) | -| [enable_global_plancache](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_global_plancache) | -| [enable_global_stats](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_global_stats) | -| [enable_global_syscache](global-syscache-parameters.md#enable_global_syscache) | -| [enable_hadoop_env](reserved-parameters.md) | -| [enable_hashagg](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_hashagg) | -| [enable_hashjoin](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_hashjoin) | -| [enable_hdfs_predicate_pushdown](reserved-parameters.md) | -| [enable_hypo_index](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_hypo_index) | -| [enable_incremental_catchup](../../reference-guide/guc-parameters/ha-replication/primary-server.md#enable_incremental_catchup) | -| [enable_incremental_checkpoint](../../reference-guide/guc-parameters/write-ahead-log/checkpoints.md#enable_incremental_checkpoint) | -| [enable_index_nestloop](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_index_nestloop) | -| [enable_indexonlyscan](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_indexonlyscan) | -| [enable_indexscan](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_indexscan) | -| [enable_instance_metric_persistent](load-management.md#enable_instance_metric_persistent) | -| [enable_instr_cpu_timer](query.md#enable_instr_cpu_timer) | -| [enable_instr_rt_percentile](query.md#enable_instr_rt_percentile) | -| [enable_instr_track_wait](wait-events.md#enable_instr_track_wait) | -| [enable_kill_query](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_kill_query) | -| [enable_logical_io_statistics](load-management.md#enable_logical_io_statistics) | -| [enable_material](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_material) | -| [enable_memory_context_contro](../../reference-guide/guc-parameters/resource-consumption/memory.md#enable_memory_context_control) | -| [enable_memory_limit](../../reference-guide/guc-parameters/resource-consumption/memory.md#enable_memory_limit) | -| [enable_mergejoin](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_mergejoin) | -| [enable_mix_replication](../../reference-guide/guc-parameters/ha-replication/primary-server.md#enable_mix_replication) | -| [enable_nestloop](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_nestloop) | -| [enable_nodegroup_debug](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_nodegroup_debug) | -| [enable_nonsysadmin_execute_direct](../../reference-guide/guc-parameters/auditing/operation-audit.md#enable_nonsysadmin_execute_direct) | -| [enable_online_ddl_waitlock](lock-management.md#enable_online_ddl_waitlock) | -| [enable_opfusion](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_opfusion) | -| [enable_orc_cache](reserved-parameters.md) | -| [enable_page_lsn_check](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#enable_page_lsn_check) | -| [enable_partition_opfusion](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_partition_opfusion) | -| [enable_partitionwise](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_partitionwise) | -| [enable_pbe_optimization](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_pbe_optimization) | -| [enable_prevent_job_task_startup](scheduled-task.md#enable_prevent_job_task_startup) | -| [enable_recyclebin](flashback.md#enable_recyclebin) | -| [enable_resource_record](load-management.md#enable_resource_record) | -| [enable_resource_track](load-management.md#enable_resource_track) | -| [enable_save_datachanged_timestamp](../../reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md#enable_save_datachanged_timestamp) | -| [enable_security_policy](security-configuration.md#enable_security_policy) | -| [enable_seqscan](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_seqscan) | -| [enable_set_variables_b_format](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#enable_set_variables_b_format) | -| [enable_show_any_tuples](MogDB-transaction.md#enable_show_any_tuples) | -| [enable_slot_log](../../reference-guide/guc-parameters/ha-replication/sending-server.md#enable_slot_log) | -| [enable_sonic_hashagg](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_sonic_hashagg) | -| [enable_sonic_hashjoin](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_sonic_hashjoin) | -| [enable_sonic_optspill](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_sonic_optspill) | -| [enable_sort](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_sort) | -| [enable_startwith_debug](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_startwith_debug) | -| [enable_stmt_track](query.md#enable_stmt_track) | -| [enable_stream_replication](../../reference-guide/guc-parameters/ha-replication/primary-server.md#enable_stream_replication) | -| [enable_tde](security-configuration.md#enable_tde) | -| [enable_time_report](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#enable_time_report) | -| [enable_thread_pool](thread-pool.md#enable_thread_pool) | -| [enable_tidscan](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_tidscan) | -| [enable_upgrade_merge_lock_mode](miscellaneous-parameters.md#enable_upgrade_merge_lock_mode) | -| [enable_user_metric_persisten](load-management.md#enable_user_metric_persistent) | -| [enable_valuepartition_pruning](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_valuepartition_prunin) | -| [enable_vector_engine](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_vector_engine) | -| [enable_wal_shipping_compression](../../reference-guide/guc-parameters/ha-replication/sending-server.md#enable_wal_shipping_compression) | -| [enable_wdr_snapshot](system-performance-snapshot.md#enable_wdr_snapshot) | -| [enable_xlog_prune](../../reference-guide/guc-parameters/write-ahead-log/checkpoints.md#enable_xlog_prune) | -| [enableSeparationOfDuty](../../reference-guide/guc-parameters/auditing/operation-audit.md#enableseparationofduty) | -| [enforce_a_behavior](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enforce_a_behavior) | -| [escape_string_warning](../../reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md#escape_string_warning) | -| [event_source](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md#event_source) | -| [exit_on_error](fault-tolerance.md#exit_on_error) | -| [expected_computing_nodegroup](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#expected_computing_nodegroup) | -| [explain_dna_file](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#explain_dna_file) | -| [explain_perf_mode](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#explain_perf_mode) | -| [external_pid_file](file-location.md#external_pid_file) | -| [extra_float_digits](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md#extra_float_digits) | -| [failed_login_attempts](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#failed_login_attempts) | -| [fast_extend_file_size](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#fast_extend_file_size) | -| [fault_mon_timeout](lock-management.md#fault_mon_timeout) | -| [FencedUDFMemoryLimit](guc-user-defined-functions.md#fencedudfmemorylimit) | -| [force_bitmapand](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#force_bitmapand) | -| [force_promote](../../reference-guide/guc-parameters/write-ahead-log/settings.md#force_promote) | -| [from_collapse_limit](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#from_collapse_limit) | -| [fsync](../../reference-guide/guc-parameters/write-ahead-log/settings.md#fsync) | -| [full_page_writes](../../reference-guide/guc-parameters/write-ahead-log/settings.md#full_page_writes) | -| [geqo](../../reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md#geqo) | -| [geqo_effort](../../reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md#geqo_effort) | -| [geqo_generations](../../reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md#geqo_generations) | -| [geqo_pool_size](../../reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md#geqo_pool_size) | -| [geqo_seed](../../reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md#geqo_seed) | -| [geqo_selection_bias](../../reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md#geqo_selection_bias) | -| [geqo_threshold](../../reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md#geqo_threshold) | -| [gin_fuzzy_search_limit](../../reference-guide/guc-parameters/default-settings-of-client-connection/other-default-parameters.md#gin_fuzzy_search_limit) | -| [gin_pending_list_limit](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#gin_pending_list_limit) | -| [global_syscache_threshold](global-syscache-parameters.md#global_syscache_threshold) | -| [gpc_clean_timeout](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#gpc_clean_timeout) | -| [group_concat_max_len](../../reference-guide/guc-parameters/miscellaneous-parameters.md#group_concat_max_len) | -| [gs_clean_timeout](lock-management.md#gs_clean_timeout) | -| [ha_module_debug](../../reference-guide/guc-parameters/ha-replication/primary-server.md#ha_module_debug) | -| [hadr_max_size_for_xlog_receiver](backup-and-restoration-parameter.md#hadr_max_size_for_xlog_receiver) | -| [hashagg_table_size](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#hashagg_table_size) | -| [hba_file](file-location.md#hba_file) | -| [hll_default_log2explicit](HyperLogLog.md#hll_default_log2explicit) | -| [hll_default_log2m](HyperLogLog.md#hll_default_log2m) | -| [hll_default_log2sparse](HyperLogLog.md#hll_default_log2sparse) | -| [hll_duplicate_check](HyperLogLog.md#hll_duplicate_check) | -| [hot_standby](../../reference-guide/guc-parameters/ha-replication/standby-server.md#hot_standby) | -| [hot_standby_feedback](../../reference-guide/guc-parameters/ha-replication/standby-server.md#hot_standby_feedback) | -| [ident_file](file-location.md#ident_file) | -| [idle_in_transaction_session_timeout](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#idle_in_transaction_session_timeout) | -| [ignore_checksum_failure](developer-options.md#ignore_checksum_failure) | -| [ignore_system_indexes](developer-options.md#ignore_system_indexes) | -| [incremental_checkpoint_timeout](../../reference-guide/guc-parameters/write-ahead-log/checkpoints.md#incremental_checkpoint_timeout) | -| [inplace_upgrade_next_system_object_oids](upgrade-parameters.md#inplace_upgrade_next_system_object_oids) | -| [instance_metric_retention_time](load-management.md#instance_metric_retention_time) | -| [instr_rt_percentile_interval](query.md#instr_rt_percentile_interval) | -| [instr_unique_sql_count](query.md#instr_unique_sql_count) | -| [instr_unique_sql_track_type](query.md#instr_unique_sql_track_type) | -| [integer_datetimes](miscellaneous-parameters.md#integer_datetimes) | -| [IntervalStyle](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md#intervalstyle) | -| [io_control_unit](load-management.md#io_control_unit) | -| [io_limits](load-management.md#io_limits) | -| [io_priority](load-management.md#io_priority) | -| [isinplaceupgrade](upgrade-parameters.md#isinplaceupgrade) | -| [is_sysadmin](security-configuration.md#is_sysadmin) | -| [job_queue_processes](scheduled-task.md#job_queue_processes) | -| [join_collapse_limit](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#join_collapse_limit) | -| [keep_sync_window](../../reference-guide/guc-parameters/ha-replication/primary-server.md#keep_sync_window) | -| [krb_caseins_users](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#krb_caseins_users) | -| [krb_server_keyfile](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#krb_server_keyfile) | -| [krb_srvname](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#krb_srvname) | -| [lastval_supported](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#lastval_supported) | -| [lc_collate](miscellaneous-parameters.md#lc_collate) | -| [lc_ctype](miscellaneous-parameters.md#lc_ctype) | -| [lc_messages](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md#lc_messages) | -| [lc_monetary](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md#lc_monetary) | -| [lc_numeric](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md#lc_numeric) | -| [lc_time](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md#lc_time) | -| [listen_addresses](../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#listen_addresses) | -| [lo_compat_privileges](../../reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md#lo_compat_privileges) | -| [local_bind_address](../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#local_bind_address) | -| [local_preload_libraries](../../reference-guide/guc-parameters/default-settings-of-client-connection/other-default-parameters.md#local_preload_libraries) | -| [local_syscache_threshold](../../reference-guide/guc-parameters/resource-consumption/memory.md#local_syscache_threshold) | -| [lockwait_timeout](lock-management.md#lockwait_timeout) | -| [log_autovacuum_min_duration](automatic-vacuuming.md#log_autovacuum_min_duration) | -| [log_checkpoints](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#log_checkpoints) | -| [log_connections](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#log_connections) | -| [log_destination](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md#log_destination) | -| [log_directory](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md#log_directory) | -| [log_disconnections](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#log_disconnections) | -| [log_duration](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#log_duration) | -| [log_error_verbosity](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#log_error_verbosity) | -| [log_executor_stats](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#log_executor_stats) | -| [log_file_mode](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md#log_file_mode) | -| [log_filename](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md#log_filename) | -| [log_hostname](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#log_hostname) | -| [log_line_prefix](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#log_line_prefix) | -| [log_lock_waits](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#log_lock_waits) | -| [log_min_duration_statement](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-time.md#log_min_duration_statement) | -| [log_min_error_statement](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-time.md#log_min_error_statement) | -| [log_min_messages](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-time.md#log_min_messages) | -| [log_pagewriter](developer-options.md#log_pagewriter) | -| [log_parser_stats](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#log_parser_stats) | -| [log_planner_stats](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#log_planner_stats) | -| [log_rotation_age](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md#log_rotation_age) | -| [log_rotation_size](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md#log_rotation_size) | -| [log_statement](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#log_statement) | -| [log_statement_stats](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#log_statement_stats) | -| [log_temp_files](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#log_temp_files) | -| [log_timezone](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#log_timezone) | -| [log_truncate_on_rotation](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md#log_truncate_on_rotation) | -| [logging_collector](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md#logging_collector) | -| [logging_module](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#logging_module) | -| [maintenance_work_mem](../../reference-guide/guc-parameters/resource-consumption/memory.md#maintenance_work_mem) | -| [max_active_global_temporary_table](global-temporary-table.md#max_active_global_temporary_table) | -| [max_cached_tuplebufs](../../reference-guide/guc-parameters/ha-replication/sending-server.md#max_cached_tuplebufs) | -| [max_changes_in_memory](../../reference-guide/guc-parameters/ha-replication/sending-server.md#max_changes_in_memory) | -| [max_cn_temp_file_size](fault-tolerance.md#max_cn_temp_file_size) | -| [max_compile_functions](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#max_compile_functions) | -| [max_concurrent_autonomous_transactions](miscellaneous-parameters.md#max_concurrent_autonomous_transactions) | -| [max_connections](../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#max_connections) | -| [max_files_per_process](../../reference-guide/guc-parameters/resource-consumption/kernel-resource-usage.md#max_files_per_process) | -| [max_function_args](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#max_function_args) | -| [max_identifier_length](miscellaneous-parameters.md#max_identifier_length) | -| [max_index_keys](miscellaneous-parameters.md#max_index_keys) | -| [max_inner_tool_connections](../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#max_inner_tool_connections) | -| [max_io_capacity](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#max_io_capacity) | -| [max_loaded_cudesc](../../reference-guide/guc-parameters/resource-consumption/memory.md#max_loaded_cudesc) | -| [max_locks_per_transaction](lock-management.md#max_locks_per_transaction) | -| [max_logical_replication_workers](../../reference-guide/guc-parameters/ha-replication/standby-server.md#max_logical_replication_workers) | -| [max_pred_locks_per_transaction](lock-management.md#max_pred_locks_per_transaction) | -| [max_prepared_transactions](../../reference-guide/guc-parameters/resource-consumption/memory.md#max_prepared_transactions) | -| [max_process_memory](../../reference-guide/guc-parameters/resource-consumption/memory.md#max_process_memory) | -| [max_query_retry_times](fault-tolerance.md#max_query_retry_times) | -| [max_recursive_times](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#max_recursive_times) | -| [max_redo_log_size](../../reference-guide/guc-parameters/write-ahead-log/checkpoints.md#max_redo_log_size) | -| [max_replication_slots](../../reference-guide/guc-parameters/ha-replication/sending-server.md#max_replication_slots) | -| [max_size_for_xlog_prune](../../reference-guide/guc-parameters/write-ahead-log/checkpoints.md#max_size_for_xlog_prune) | -| [max_stack_depth](../../reference-guide/guc-parameters/resource-consumption/memory.md#max_stack_depth) | -| [max_standby_archive_delay](../../reference-guide/guc-parameters/ha-replication/standby-server.md#max_standby_archive_delay) | -| [max_standby_streaming_delay](../../reference-guide/guc-parameters/ha-replication/standby-server.md#max_standby_streaming_delay) | -| [max_sync_workers_per_subscription](../../reference-guide/guc-parameters/ha-replication/standby-server.md#max_sync_workers_per_subscription) | -| [max_undo_workers](rollback-parameters.md#max_undo_workers) | -| [max_user_defined_exception](developer-options.md#max_user_defined_exception) | -| [max_wal_senders](../../reference-guide/guc-parameters/ha-replication/sending-server.md#max_wal_senders) | -| [memory_detail_tracking](load-management.md#memory_detail_tracking) | -| [memory_fault_percent](load-management.md#memory_fault_percent) | -| [memory_tracking_mode](load-management.md#memory_tracking_mode) | -| [memorypool_enable](../../reference-guide/guc-parameters/resource-consumption/memory.md#memorypool_enable) | -| [memorypool_size](../../reference-guide/guc-parameters/resource-consumption/memory.md#memorypool_size) | -| [minimum_pool_size](connection-pool-parameters.md#minimum_pool_size) | -| [modify_initial_password](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#modify_initial_password) | -| [most_available_sync](../../reference-guide/guc-parameters/ha-replication/primary-server.md#most_available_sync) | -| [ngram_gram_size](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#ngram_gram_size) | -| [ngram_grapsymbol_ignore](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#ngram_grapsymbol_ignore) | -| [ngram_punctuation_ignore](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#ngram_punctuation_ignore) | -| [nls_timestamp_format](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#nls_timestamp_format) | -| [num_internal_lock_partitions](lock-management.md#num_internal_lock_partitions) | -| [numa_distribute_mode](developer-options.md#numa_distribute_mode) | -| [omit_encoding_error](fault-tolerance.md#omit_encoding_error) | -| [operation_mode](backup-and-restoration-parameter.md#operation_mode) | -| [opfusion_debug_mode](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md#opfusion_debug_mode) | -| [pagewriter_sleep](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#pagewriter_sleep) | -| [pagewriter_thread_num](../../reference-guide/guc-parameters/resource-consumption/background-writer.md#pagewriter_thread_num) | -| [parallel_recovery_batch](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#parallel_recovery_batch) | -| [parallel_recovery_dispatch_algorithm](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#parallel_recovery_dispatch_algorithm) | -| [parallel_recovery_timeout](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#parallel_recovery_timeout) | -| [partition_iterator_elimination](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#partition_iterator_elimination) | -| [partition_lock_upgrade_timeout](lock-management.md#partition_lock_upgrade_timeout) | -| [partition_page_estimation](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#partition_page_estimation) | -| [password_effect_time](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_effect_time) | -| [password_encryption_type](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_encryption_type) | -| [password_lock_time](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_lock_time) | -| [password_max_length](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_max_length) | -| [password_min_digital](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_min_digital) | -| [password_min_length](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_min_length) | -| [password_min_lowercase](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_min_lowercase) | -| [password_min_special](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_min_special) | -| [password_min_uppercase](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_min_uppercase) | -| [password_notify_time](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_notify_time) | -| [password_policy](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_policy) | -| [password_reuse_max](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_reuse_max) | -| [password_reuse_time](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#password_reuse_time) | -| [pca_shared_buffer](../../reference-guide/guc-parameters/parameters-related-to-efficient-data-compression-algorithms.md#pca_shared_buffer) | -| [percentile](query.md#percentile) | -| [pgxc_node_name](MogDB-transaction.md#pgxc_node_name) | -| [plan_cache_mode](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#plan_cache_mode) | -| [plan_mode_seed](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#plan_mode_seed) | -| [pldebugger_timeout](developer-options.md#pldebugger_timeout) | -| [pljava_vmoptions](guc-user-defined-functions.md#pljava_vmoptions) | -| [plog_merge_age](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-time.md#plog_merge_age) | -| [plpgsql.variable_conflict](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#plpgsqlvariable_conflict) | -| [plsql_show_all_error](developer-options.md#plsql_show_all_error) | -| [pooler_maximum_idle_time](connection-pool-parameters.md#pooler_maximum_idle_time) | -| [port](../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#port) | -| [post_auth_delay](developer-options.md#post_auth_delay) | -| [pre_auth_delay](developer-options.md#pre_auth_delay) | -| [prefetch_quantity](../../reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md#prefetch_quantity) | -| [primary_slotname](../../reference-guide/guc-parameters/ha-replication/standby-server.md#primary_slotname) | -| [psort_work_mem](../../reference-guide/guc-parameters/resource-consumption/memory.md#psort_work_mem) | -| [qrw_inlist2join_optmode](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#qrw_inlist2join_optmode) | -| [query_band](load-management.md#query_band) | -| [query_dop](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#query_dop) | -| [query_max_mem](../../reference-guide/guc-parameters/resource-consumption/memory.md#query_max_mem) | -| [query_mem](../../reference-guide/guc-parameters/resource-consumption/memory.md#query_mem) | -| [quote_all_identifiers](../../reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md#quote_all_identifiers) | -| [random_page_cost](../../reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md#random_page_cost) | -| [recovery_max_workers](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#recovery_max_workers) | -| [recovery_min_apply_delay](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#recovery_min_apply_delay) | -| [recovery_parallelism](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#recovery_parallelism) | -| [recovery_parse_workers](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#recovery_parse_workers) | -| [recovery_redo_workers](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#recovery_redo_workers) | -| [recovery_time_target](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#recovery_time_target) | -| [recyclebin_retention_time](flashback.md#recyclebin_retention_time) | -| [redo_bind_cpu_attr](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#redo_bind_cpu_attr) | -| [remote_read_mode](fault-tolerance.md#remote_read_mode) | -| [remotetype](developer-options.md#remotetype) | -| [replconninfoN](../../reference-guide/guc-parameters/ha-replication/sending-server.md#replconninfon) | -| [replication_type](MogDB-transaction.md#replication_type) | -| [RepOriginId](../../reference-guide/guc-parameters/replication-parameters-of-two-database-instances.md#RepOriginId) | -| [require_ssl](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#require_ssl) | -| [reserve_space_for_nullable_atts](miscellaneous-parameters.md#reserve_space_for_nullable_atts) | -| [resource_track_cost](load-management.md#resource_track_cost) | -| [resource_track_duration](load-management.md#resource_track_duration) | -| [resource_track_level](load-management.md#resource_track_level) | -| [resource_track_log](developer-options.md#resource_track_log) | -| [restart_after_crash](fault-tolerance.md#restart_after_crash) | -| [retry_ecode_list](fault-tolerance.md#retry_ecode_list) | -| [rewrite_rule](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#rewrite_rule) | -| [schedule_splits_threshold](reserved-parameters.md) | -| [search_path](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#search_path) | -| [segment_buffers](../../reference-guide/guc-parameters/resource-consumption/memory.md#segment_buffers) | -| [segment_size](miscellaneous-parameters.md#segment_size) | -| [seq_page_cost](../../reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md#seq_page_cost) | -| [server_encoding](miscellaneous-parameters.md#server_encoding) | -| [server_version](miscellaneous-parameters.md#server_version) | -| [server_version_num](miscellaneous-parameters.md#server_version_num) | -| [session_authorization](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#session_authorization) | -| [session_history_memory](load-management.md#session_history_memory) | -| [session_replication_role](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#session_replication_role) | -| [session_respool](load-management.md#session_respool) | -| [session_statistics_memory](load-management.md#session_statistics_memory) | -| [session_timeout](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#session_timeout) | -| [shared_buffers](../../reference-guide/guc-parameters/resource-consumption/memory.md#shared_buffers) | -| [shared_preload_libraries](../../reference-guide/guc-parameters/resource-consumption/kernel-resource-usage.md#shared_preload_libraries) | -| [show_acce_estimate_detail](developer-options.md#show_acce_estimate_detail) | -| [skew_option](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#skew_option) | -| [sql_beta_feature](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#sql_beta_feature) | -| [sql_compatibility](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#sql_compatibility) | -| [sql_ignore_strategy](../../reference-guide/guc-parameters/miscellaneous-parameters.md#sql_ignore_strategy) | -| [sql_inheritance](../../reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md#sql_inheritance) | -| [sql_use_spacelimit](../../reference-guide/guc-parameters/resource-consumption/disk-space.md#sql_use_spacelimit) | -| [ssl](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#ssl) | -| [ssl_ca_file](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#ssl_ca_file) | -| [ssl_cert_file](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#ssl_cert_file) | -| [ssl_ciphers](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#ssl_ciphers) | -| [ssl_crl_file](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#ssl_crl_file) | -| [ssl_key_file](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#ssl_key_file) | -| [ssl_renegotiation_limit](../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#ssl_renegotiation_limit) | -| [standard_conforming_strings](../../reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md#standard_conforming_strings) | -| [standby_shared_buffers_fract](../../reference-guide/guc-parameters/resource-consumption/memory.md#standby_shared_buffers_fraction) | -| [statement_timeout](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#statement_timeout) | -| [stats_temp_directory](../../reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md#stats_temp_directory) | -| [string_hash_compatible](developer-options.md#string_hash_compatible) | -| [support_batch_bind](developer-options.md#support_batch_bind) | -| [support_extended_features](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#support_extended_features) | -| [sync_config_strategy](../../reference-guide/guc-parameters/ha-replication/primary-server.md#sync_config_strategy) | -| [synchronize_seqscans](../../reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md#synchronize_seqscans) | -| [synchronous_commit](../../reference-guide/guc-parameters/write-ahead-log/settings.md#synchronous_commit) | -| [synchronous_standby_names](../../reference-guide/guc-parameters/ha-replication/primary-server.md#synchronous_standby_names) | -| [sysadmin_reserved_connection](../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#sysadmin_reserved_connections) | -| [syslog_facility](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md#syslog_facility) | -| [syslog_ident](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md#syslog_ident) | -| [table_skewness_warning_rows](alarm-detection.md#table_skewness_warning_rows) | -| [table_skewness_warning_threshold](alarm-detection.md#table_skewness_warning_threshold) | -| [tcp_keepalives_count](../../reference-guide/guc-parameters/connection-and-authentication/communication-library-parameters.md#tcp_keepalives_count) | -| [tcp_keepalives_idle](../../reference-guide/guc-parameters/connection-and-authentication/communication-library-parameters.md#tcp_keepalives_idle) | -| [tcp_keepalives_interval](../../reference-guide/guc-parameters/connection-and-authentication/communication-library-parameters.md#tcp_keepalives_interval) | -| [td_compatible_truncation](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#td_compatible_truncation) | -| [tde_cmk_id](security-configuration.md#tde_cmk_id) | -| [temp_buffers](../../reference-guide/guc-parameters/resource-consumption/memory.md#temp_buffers) | -| [temp_file_limit](../../reference-guide/guc-parameters/resource-consumption/disk-space.md#temp_file_limit) | -| [temp_tablespaces](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#temp_tablespaces) | -| [thread_pool_attr](thread-pool.md#thread_pool_attr) | -| [TimeZone](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md#timezone) | -| [timezone_abbreviations](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md#timezone_abbreviations) | -| [topsql_retention_time](load-management.md#topsql_retention_time) | -| [trace_notify](developer-options.md#trace_notify) | -| [trace_recovery_messages](developer-options.md#trace_recovery_messages) | -| [trace_sort](developer-options.md#trace_sort) | -| [track_activities](../../reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md#track_activities) | -| [track_activity_query_size](../../reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md#track_activity_query_size) | -| [track_counts](../../reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md#track_counts) | -| [track_functions](../../reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md#track_functions) | -| [track_io_timing](../../reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md#track_io_timing) | -| [track_sql_count](../../reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md#track_sql_count) | -| [track_stmt_details_size](query.md#track_stmt_details_size) | -| [track_stmt_retention_time](query.md#track_stmt_retention_time) | -| [track_stmt_session_slot](query.md#track_stmt_session_slot) | -| [track_stmt_standby_chain_size](../../reference-guide/guc-parameters/query.md#track_stmt_standby_chain_size) | -| [track_stmt_stat_level](query.md#track_stmt_stat_level) | -| [track_thread_wait_status_interval](../../reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md#track_thread_wait_status_interval) | -| [transaction_deferrable](MogDB-transaction.md#transaction_deferrable) | -| [transaction_isolation](MogDB-transaction.md#transaction_isolation) | -| [transaction_pending_time](load-management.md#transaction_pending_time) | -| [transaction_read_only](MogDB-transaction.md#transaction_read_only) | -| [transform_null_equals](../../reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#transform_null_equals) | -| [transparent_encrypt_kms_region](miscellaneous-parameters.md#transparent_encrypt_kms_region) | -| [transparent_encrypt_kms_url](miscellaneous-parameters.md#transparent_encrypt_kms_url) | -| [transparent_encrypted_string](miscellaneous-parameters.md#transparent_encrypted_string) | -| [try_vector_engine_strategy](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#try_vector_engine_strategy) | -| [udf_memory_limit](guc-user-defined-functions.md#udf_memory_limit) | -| [UDFWorkerMemHardLimit](guc-user-defined-functions.md#udfworkermemhardlimit) | -| [uncontrolled_memory_context](../../reference-guide/guc-parameters/resource-consumption/memory.md#uncontrolled_memory_context) | -| [undo_limit_size_per_transaction](rollback-parameters.md#undo_limit_size_per_transaction) | -| [undo_retention_time](flashback.md#undo_retention_time) | -| [undo_space_limit_size](rollback-parameters.md#undo_space_limit_size) | -| [undo_zone_count](reserved-parameters.md) | -| [unix_socket_directory](../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#unix_socket_directory) | -| [unix_socket_group](../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#unix_socket_group) | -| [unix_socket_permissions](../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#unix_socket_permissions) | -| [update_lockwait_timeout](lock-management.md#update_lockwait_timeout) | -| [upgrade_mode](upgrade-parameters.md#upgrade_mode) | -| [use_elastic_search](security-configuration.md#use_elastic_search) | -| [use_workload_manager](load-management.md#use_workload_manager) | -| [user_metric_retention_time](load-management.md#user_metric_retention_time) | -| [ustore_attr](miscellaneous-parameters.md#ustore_attr) | -| [vacuum_cost_delay](../../reference-guide/guc-parameters/resource-consumption/cost-based-vacuum-delay.md#vacuum_cost_delay) | -| [vacuum_cost_limit](../../reference-guide/guc-parameters/resource-consumption/cost-based-vacuum-delay.md#vacuum_cost_limit) | -| [vacuum_cost_page_dirty](../../reference-guide/guc-parameters/resource-consumption/cost-based-vacuum-delay.md#vacuum_cost_page_dirty) | -| [vacuum_cost_page_hit](../../reference-guide/guc-parameters/resource-consumption/cost-based-vacuum-delay.md#vacuum_cost_page_hit) | -| [vacuum_cost_page_miss](../../reference-guide/guc-parameters/resource-consumption/cost-based-vacuum-delay.md#vacuum_cost_page_miss) | -| [vacuum_defer_cleanup_age](../../reference-guide/guc-parameters/ha-replication/primary-server.md#vacuum_defer_cleanup_age) | -| [vacuum_freeze_min_age](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#vacuum_freeze_min_age) | -| [vacuum_freeze_table_age](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#vacuum_freeze_table_age) | -| [vacuum_gtt_defer_check_age](global-temporary-table.md#vacuum_gtt_defer_check_age) | -| [var_eq_const_selectivity](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#var_eq_const_selectivity) | -| [version_retention_age](flashback.md#version_retention_age) | -| [wait_dummy_time](../../reference-guide/guc-parameters/ha-replication/primary-server.md#wait_dummy_time) | -| [wal_block_size](../../reference-guide/guc-parameters/write-ahead-log/settings.md#wal_block_size) | -| [wal_buffers](../../reference-guide/guc-parameters/write-ahead-log/settings.md#wal_buffers) | -| [wal_file_init_num](../../reference-guide/guc-parameters/write-ahead-log/settings.md#wal_file_init_num) | -| [wal_flush_delay](../../reference-guide/guc-parameters/write-ahead-log/settings.md#wal_flush_delay) | -| [wal_flush_timeout](../../reference-guide/guc-parameters/write-ahead-log/settings.md#wal_flush_timeout) | -| [wal_keep_segments](../../reference-guide/guc-parameters/ha-replication/sending-server.md#wal_keep_segments) | -| [wal_level](../../reference-guide/guc-parameters/write-ahead-log/settings.md#wal_level) | -| [wal_log_hints](../../reference-guide/guc-parameters/write-ahead-log/settings.md#wal_log_hints) | -| [wal_receiver_buffer_size](../../reference-guide/guc-parameters/ha-replication/standby-server.md#wal_receiver_buffer_size) | -| [wal_receiver_connect_retries](../../reference-guide/guc-parameters/ha-replication/standby-server.md#wal_receiver_connect_retries) | -| [wal_receiver_connect_timeout](../../reference-guide/guc-parameters/ha-replication/standby-server.md#wal_receiver_connect_timeout) | -| [wal_receiver_status_interval](../../reference-guide/guc-parameters/ha-replication/standby-server.md#wal_receiver_status_interval) | -| [wal_receiver_timeout](../../reference-guide/guc-parameters/ha-replication/standby-server.md#wal_receiver_timeout) | -| [wal_segment_size](../../reference-guide/guc-parameters/write-ahead-log/settings.md#wal_segment_size) | -| [wal_sender_timeout](../../reference-guide/guc-parameters/ha-replication/sending-server.md#wal_sender_timeout) | -| [wal_sync_method](../../reference-guide/guc-parameters/write-ahead-log/settings.md#wal_sync_method) | -| [wal_writer_delay](../../reference-guide/guc-parameters/write-ahead-log/settings.md#wal_writer_delay) | -| [walsender_max_send_size](../../reference-guide/guc-parameters/ha-replication/primary-server.md#walsender_max_send_size) | -| [walwriter_cpu_bind](../../reference-guide/guc-parameters/write-ahead-log/settings.md#walwriter_cpu_bind) | -| [walwriter_sleep_threshold](../../reference-guide/guc-parameters/write-ahead-log/settings.md#walwriter_sleep_threshold) | -| [wdr_snapshot_interval](system-performance-snapshot.md#wdr_snapshot_interval) | -| [wdr_snapshot_query_timeout](system-performance-snapshot.md#wdr_snapshot_query_timeout) | -| [wdr_snapshot_retention_days](system-performance-snapshot.md#wdr_snapshot_retention_days) | -| [work_mem](../../reference-guide/guc-parameters/resource-consumption/memory.md#work_mem) | -| [xc_maintenance_mode](MogDB-transaction.md#xc_maintenance_mode) | -| [xloginsert_locks](lock-management.md#xloginsert_locks) | -| [xmlbinary](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#xmlbinary) | -| [xmloption](../../reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md#xmloption) | -| [zero_damaged_pages](developer-options.md#zero_damaged_pages) | +| GUC Parameter Index | Introduced Version | +| ------------------------------------------------------------ | ------------------------------------------------------------ | +| [acce_min_datasize_per_thread](reserved-parameters.md) | | +| [acceleration_with_compute_pool](./miscellaneous-parameters.md#acceleration_with_compute_pool) | | +| [adaptive_hashagg_allow_spill](./query-planning/other-optimizer-options.md#adaptive_hashagg_allow_spill) | | +| [adaptive_hashagg_min_rows](./query-planning/other-optimizer-options.md#adaptive_hashagg_min_rows) | | +| [adaptive_hashagg_reduce_ratio_threshold](./query-planning/other-optimizer-options.md#adaptive_hashagg_reduce_ratio_threshold) | | +| [advance_xlog_file_num](developer-options.md#advance_xlog_file_num) | | +| [alarm_component](./alarm-detection.md#alarm_component) | | +| [alarm_report_interval](alarm-detection.md#alarm_report_interval) | | +| [allocate_mem_cost](query-planning/optimizer-cost-constants.md#allocate_mem_cost) | | +| [allow_concurrent_tuple_update](MogDB-transaction.md#allow_concurrent_tuple_update) | | +| [allow_create_sysobject](./developer-options.md#allow_create_sysobject) | | +| [allow_system_table_mods](developer-options.md#allow_system_table_mods) | | +| [analysis_options](query-planning/other-optimizer-options.md#analysis_options) | | +| [application_name](connection-and-authentication/connection-settings.md#application_name) | | +| [archive_command](write-ahead-log/archiving.md#archive_command) | | +| [archive_dest](write-ahead-log/archiving.md#archive_dest) | | +| [archive_interval](write-ahead-log/archiving.md#archive_interval) | | +| [archive_mode](write-ahead-log/archiving.md#archive_mode) | | +| [archive_timeout](write-ahead-log/archiving.md#archive_timeout) | | +| [array_nulls](version-and-platform-compatibility/compatibility-with-earlier-versions.md#array_nulls) | | +| [asp_flush_mode](system-performance-snapshot.md#asp_flush_mode) | | +| [asp_flush_rate](system-performance-snapshot.md#asp_flush_rate) | | +| [asp_log_directory](./query.md#asp_log_directory) | | +| [asp_log_filename](system-performance-snapshot.md#asp_log_filename) | | +| [asp_retention_days](system-performance-snapshot.md#asp_retention_days) | | +| [asp_sample_interval](system-performance-snapshot.md#asp_sample_interval) | | +| [asp_sample_num](system-performance-snapshot.md#asp_sample_num) | | +| [async_submit](./MogDB-transaction.md#async_submit) | | +| [audit_copy_exec](auditing/operation-audit.md#audit_copy_exec) | | +| [audit_data_format](auditing/audit-switch.md#audit_data_format) | | +| [audit_database_process](auditing/user-and-permission-audit.md#audit_database_process) | | +| [audit_directory](auditing/audit-switch.md#audit_directory) | | +| [audit_dml_state](auditing/operation-audit.md#audit_dml_state) | | +| [audit_dml_state_select](auditing/operation-audit.md#audit_dml_state_select) | | +| [audit_enabled](auditing/audit-switch.md#audit_enabled) | | +| [audit_file_remain_threshold](auditing/audit-switch.md#audit_file_remain_threshold) | | +| [audit_file_remain_time](auditing/audit-switch.md#audit_file_remain_time) | | +| [audit_function_exec](auditing/operation-audit.md#audit_function_exec) | | +| [audit_grant_revoke](auditing/user-and-permission-audit.md#audit_grant_revoke) | | +| [audit_login_logout](auditing/user-and-permission-audit.md#audit_login_logout) | | +| [audit_resource_policy](auditing/audit-switch.md#audit_resource_policy) | | +| [audit_rotation_interval](auditing/audit-switch.md#audit_rotation_interval) | | +| [audit_rotation_size](auditing/audit-switch.md#audit_rotation_size) | | +| [audit_set_parameter](auditing/operation-audit.md#audit_set_parameter) | | +| [audit_space_limit](auditing/audit-switch.md#audit_space_limit) | | +| [audit_system_function_exec](./auditing/operation-audit.md#audit_system_function_exec) | | +| [audit_system_object](auditing/operation-audit.md#audit_system_object) | | +| [audit_thread_num](auditing/audit-switch.md#audit_thread_num) | | +| [audit_user_locked](auditing/user-and-permission-audit.md#audit_user_locked) | | +| [audit_user_violation](auditing/user-and-permission-audit.md#audit_user_violation) | | +| [audit_xid_info](auditing/operation-audit.md#audit_xid_info) | | +| [auth_iteration_count](connection-and-authentication/security-and-authentication.md#auth_iteration_count) | | +| [authentication_timeout](connection-and-authentication/security-and-authentication.md#authentication_timeout) | | +| [auto_explain_level](query-planning/other-optimizer-options.md#auto_explain_level) | | +| [autoanalyze](query-planning/other-optimizer-options.md#autoanalyze) | | +| [autoanalyze_timeout](automatic-vacuuming.md#autoanalyze_timeout) | | +| [autocmpr_cost_delay](./backend-compression.md#autocmpr_cost_delay) | | +| [autocmpr_cost_limit](./backend-compression.md#autocmpr_cost_limit) | | +| [autocmpr_max_workers](./backend-compression.md#autocmpr_max_workers) | | +| [autocmpr_naptime](./backend-compression.md#autocmpr_naptime) | | +| [autocompress](./backend-compression.md#autocompress) | | +| [autovacuum](automatic-vacuuming.md#autovacuum) | | +| [autovacuum_analyze_scale_factor](automatic-vacuuming.md#autovacuum_analyze_scale_factor) | | +| [autovacuum_analyze_threshold](automatic-vacuuming.md#autovacuum_analyze_threshold) | | +| [autovacuum_freeze_max_age](automatic-vacuuming.md#autovacuum_freeze_max_age) | | +| [autovacuum_io_limits](automatic-vacuuming.md#autovacuum_io_limits) | | +| [autovacuum_max_workers](automatic-vacuuming.md#autovacuum_max_workers) | | +| [autovacuum_mode](automatic-vacuuming.md#autovacuum_mode) | | +| [autovacuum_naptime](automatic-vacuuming.md#autovacuum_naptime) | | +| [autovacuum_vacuum_cost_delay](automatic-vacuuming.md#autovacuum_vacuum_cost_delay) | | +| [autovacuum_vacuum_cost_limit](automatic-vacuuming.md#autovacuum_vacuum_cost_limit) | | +| [autovacuum_vacuum_scale_factor](automatic-vacuuming.md#autovacuum_vacuum_scale_factor) | | +| [autovacuum_vacuum_threshold](automatic-vacuuming.md#autovacuum_vacuum_threshold) | | +| [available_zone](ha-replication/sending-server.md#available_zone) | | +| [b_compatibility_user_host_auth](./connection-and-authentication/connection-settings.md#b_compatibility_user_host_auth) | | +| [b_format_behavior_compat_options](./version-and-platform-compatibility/platform-and-client-compatibility.md#b_format_behavior_compat_options) | | +| [backend_flush_after](resource-consumption/asynchronous-io-operations.md#backend_flush_after) | | +| [backslash_quote](version-and-platform-compatibility/compatibility-with-earlier-versions.md#backslash_quote) | | +| [backtrace_min_messages](error-reporting-and-logging/logging-time.md#backtrace_min_messages) | | +| [backwrite_quantity](resource-consumption/asynchronous-io-operations.md#backwrite_quantity) | | +| [basebackup_timeout](miscellaneous-parameters.md#basebackup_timeout) | | +| [bbox_blanklist_items](load-management.md#bbox_blanklist_items) | | +| [bbox_dump_count](load-management.md#bbox_dump_count) | | +| [bbox_dump_path](load-management.md#bbox_dump_path) | | +| [behavior_compat_options](version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options) | | +| [best_agg_plan](./query-planning/other-optimizer-options.md#best_agg_plan) | | +| [bgwriter_delay](resource-consumption/background-writer.md#bgwriter_delay) | | +| [bgwriter_flush_after](resource-consumption/asynchronous-io-operations.md#bgwriter_flush_after) | | +| [bgwriter_lru_maxpages](resource-consumption/background-writer.md#bgwriter_lru_maxpages) | | +| [bgwriter_lru_multiplier](resource-consumption/background-writer.md#bgwriter_lru_multiplier) | | +| [block_encryption_mode](security-configuration.md#block_encryption_mode) | | +| [block_size](miscellaneous-parameters.md#block_size) | | +| [bulk_read_ring_size](resource-consumption/memory.md#bulk_read_ring_size) | | +| [bulk_write_ring_size](resource-consumption/memory.md#bulk_write_ring_size) | | +| [bypass_dram](./multi-level-cache-management-parameters.md#bypass_dram) | | +| [bypass_nvm](./multi-level-cache-management-parameters.md#bypass_nvm) | | +| [bytea_output](default-settings-of-client-connection/statement-behavior.md#bytea_output) | | +| [cache_connection](connection-pool-parameters.md#cache_connection) | | +| [candidate_buf_percent_target](resource-consumption/background-writer.md#candidate_buf_percent_target) | | +| [catchup2normal_wait_time](ha-replication/primary-server.md#catchup2normal_wait_time) | | +| [cgroup_name](load-management.md#cgroup_name) | | +| [check_function_bodies](default-settings-of-client-connection/statement-behavior.md#check_function_bodies) | | +| [check_implicit_conversions](query-planning/optimizer-method-configuration.md#check_implicit_conversions) | | +| [checkpoint_completion_target](write-ahead-log/checkpoints.md#checkpoint_completion_target) | | +| [checkpoint_flush_after](resource-consumption/asynchronous-io-operations.md#checkpoint_flush_after) | | +| [checkpoint_segments](write-ahead-log/checkpoints.md#checkpoint_segments) | | +| [checkpoint_target_time](./resource-consumption/background-writer.md#checkpoint_target_time) | 5.0.8 - [Enhancement of Dirty Pages Flushing Performance](../../characteristic-description/high-performance/enhancement-of-dirty-pages-flushing-performance.md) | +| [checkpoint_timeout](write-ahead-log/checkpoints.md#checkpoint_timeout) | | +| [checkpoint_wait_timeout](write-ahead-log/checkpoints.md#checkpoint_wait_timeout) | | +| [checkpoint_warning](write-ahead-log/checkpoints.md#checkpoint_warning) | | +| [client_encoding](default-settings-of-client-connection/zone-and-formatting.md#client_encoding) | | +| [client_min_messages](error-reporting-and-logging/logging-time.md#client_min_messages) | | +| [cluster_run_mode](./miscellaneous-parameters.md#cluster_run_mode) | | +| [cn_send_buffer_size](fault-tolerance.md#cn_send_buffer_size) | | +| [codegen_cost_threshold](query-planning/other-optimizer-options.md#codegen_cost_threshold) | | +| [codegen_strategy](query-planning/other-optimizer-options.md#codegen_strategy) | | +| [comm_proxy_attr](connection-and-authentication/communication-library-parameters.md#comm_proxy_attr) | | +| [commit_delay](write-ahead-log/settings.md#commit_delay) | | +| [commit_siblings](write-ahead-log/settings.md#commit_siblings) | | +| [compress_cost_delay](./backend-compression.md#compress_cost_delay) | | +| [compress_cost_limit](./backend-compression.md#compress_cost_limit) | | +| [compress_cost_page_dirty](./backend-compression.md#compress_cost_page_dirty) | | +| [compress_cost_page_hit](./backend-compression.md#compress_cost_page_hit) | | +| [compress_cost_page_miss](./backend-compression.md#compress_cost_page_miss) | | +| [config_file](file-location.md#config_file) | | +| [connection_alarm_rate](alarm-detection.md#connection_alarm_rate) | | +| [connection_info](connection-and-authentication/connection-settings.md#connection_info) | | +| [constraint_exclusion](query-planning/other-optimizer-options.md#constraint_exclusion) | | +| [convert_string_to_digit](version-and-platform-compatibility/platform-and-client-compatibility.md#convert_string_to_digit) | | +| [cost_param](query-planning/other-optimizer-options.md#cost_param) | | +| [cost_weight_index](query-planning/optimizer-method-configuration.md#cost_weight_index) | | +| [cpu_collect_timer](load-management.md#cpu_collect_timer) | | +| [cpu_index_tuple_cost](query-planning/optimizer-cost-constants.md#cpu_index_tuple_cost) | | +| [cpu_operator_cost](query-planning/optimizer-cost-constants.md#cpu_operator_cost) | | +| [cpu_tuple_cost](query-planning/optimizer-cost-constants.md#cpu_tuple_cost) | | +| [cross_cluster_replconninfoN](ha-replication/sending-server.md#cross_cluster_replconninfon) | | +| [cstore_backwrite_max_threshold](resource-consumption/asynchronous-io-operations.md#cstore_backwrite_max_threshold) | | +| [cstore_backwrite_quantity](resource-consumption/asynchronous-io-operations.md#cstore_backwrite_quantity) | | +| [cstore_buffers](resource-consumption/memory.md#cstore_buffers) | | +| [cstore_insert_mode](reserved-parameters.md) | | +| [cstore_prefetch_quantity](resource-consumption/asynchronous-io-operations.md#cstore_prefetch_quantity) | | +| [current_logic_cluster](./load-management.md#current_logic_cluster) | | +| [current_schema](default-settings-of-client-connection/statement-behavior.md#current_schema) | | +| [cursor_tuple_fraction](query-planning/other-optimizer-options.md#cursor_tuple_fraction) | | +| [damage_page_ignore](fault-tolerance.md#damage_page_ignore) | 5.0.6 - [Corrupt Files Handling](../../characteristic-description/maintainability/corrupt-files-handling.md) | +| [data_directory](file-location.md#data_directory) | | +| [data_replicate_buffer_size](ha-replication/primary-server.md#data_replicate_buffer_size) | | +| [data_sync_failed_ignore](fault-tolerance.md#data_sync_failed_ignore) | 5.0.6 - [Corrupt Files Handling](../../characteristic-description/maintainability/corrupt-files-handling.md) | +| [data_sync_retry](fault-tolerance.md#data_sync_retry) | | +| [datanode_heartbeat_interval](miscellaneous-parameters.md#datanode_heartbeat_interval) | | +| [DateStyle](default-settings-of-client-connection/zone-and-formatting.md#datestyle) | | +| [db4ai_snapshot_mode](AI-features.md#db4ai_snapshot_mode) | | +| [db4ai_snapshot_version_delimiter](AI-features.md#db4ai_snapshot_version_delimiter) | | +| [db4ai_snapshot_version_separator](AI-features.md#db4ai_snapshot_version_separator) | | +| [dcf_compress_algorithm](DCF-parameters-settings.md#dcf_compress_algorithm) | | +| [dcf_compress_level](DCF-parameters-settings.md#dcf_compress_level) | | +| [dcf_config](DCF-parameters-settings.md#dcf_config) | | +| [dcf_connect_timeout](DCF-parameters-settings.md#dcf_connect_timeout) | | +| [dcf_data_path](DCF-parameters-settings.md#dcf_data_path) | | +| [dcf_election_switch_threshold](DCF-parameters-settings.md#dcf_election_switch_threshold) | | +| [dcf_election_timeout](DCF-parameters-settings.md#dcf_election_timeout) | | +| [dcf_enable_auto_election_priority](DCF-parameters-settings.md#dcf_enable_auto_election_priority) | | +| [dcf_flow_control_cpu_threshold](DCF-parameters-settings.md#dcf_flow_control_cpu_threshold) | | +| [dcf_flow_control_disk_rawait_threshold](DCF-parameters-settings.md#dcf_flow_control_disk_rawait_threshold) | | +| [dcf_flow_control_net_queue_message_num_threshold](DCF-parameters-settings.md#dcf_flow_control_net_queue_message_num_threshold) | | +| [dcf_log_backup_file_count](DCF-parameters-settings.md#dcf_log_backup_file_count) | | +| [dcf_log_file_permission](DCF-parameters-settings.md#dcf_log_file_permission) | | +| [dcf_log_level](DCF-parameters-settings.md#dcf_log_level) | | +| [dcf_log_path](DCF-parameters-settings.md#dcf_log_path) | | +| [dcf_log_path_permission](DCF-parameters-settings.md#dcf_log_path_permission) | | +| [dcf_majority_groups](DCF-parameters-settings.md#dcf_majority_groups) | | +| [dcf_max_log_file_size](DCF-parameters-settings.md#dcf_max_log_file_size) | | +| [dcf_max_workers](DCF-parameters-settings.md#dcf_max_workers) | | +| [dcf_mec_agent_thread_num](DCF-parameters-settings.md#dcf_mec_agent_thread_num) | | +| [dcf_mec_batch_size](DCF-parameters-settings.md#dcf_mec_batch_size) | | +| [dcf_mec_channel_num](DCF-parameters-settings.md#dcf_mec_channel_num) | | +| [dcf_mec_fragment_size](DCF-parameters-settings.md#dcf_mec_fragment_size) | | +| [dcf_mec_pool_max_size](DCF-parameters-settings.md#dcf_mec_pool_max_size) | | +| [dcf_mec_reactor_thread_num](DCF-parameters-settings.md#dcf_mec_reactor_thread_num) | | +| [dcf_mem_pool_init_size](DCF-parameters-settings.md#dcf_mem_pool_init_size) | | +| [dcf_mem_pool_max_size](DCF-parameters-settings.md#dcf_mem_pool_max_size) | | +| [dcf_node_id](DCF-parameters-settings.md#dcf_node_id) | | +| [dcf_rep_append_thread_num](DCF-parameters-settings.md#dcf_rep_append_thread_num) | | +| [dcf_run_mode](DCF-parameters-settings.md#dcf_run_mode) | | +| [dcf_socket_timeout](DCF-parameters-settings.md#dcf_socket_timeout) | | +| [dcf_ssl](DCF-parameters-settings.md#dcf_ssl) | | +| [dcf_stg_pool_init_size](DCF-parameters-settings.md#dcf_stg_pool_init_size) | | +| [dcf_stg_pool_max_size](DCF-parameters-settings.md#dcf_stg_pool_max_size) | | +| [dcf_truncate_threshold](DCF-parameters-settings.md#dcf_truncate_threshold) | | +| [deadlock_timeout](lock-management.md#deadlock_timeout) | | +| [debug_assertions](developer-options.md#debug_assertions) | | +| [debug_pretty_print](error-reporting-and-logging/logging-content.md#debug_pretty_print) | | +| [debug_print_parse](error-reporting-and-logging/logging-content.md#debug_print_parse) | | +| [debug_print_plan](error-reporting-and-logging/logging-content.md#debug_print_plan) | | +| [debug_print_rewritten](error-reporting-and-logging/logging-content.md#debug_print_rewritten) | | +| [debug_select_o](error-reporting-and-logging/logging-content.md#debug_select_o) | 5.0.8 | +| [default_limit_rows](query-planning/optimizer-method-configuration.md#default_limit_rows) | | +| [default_statistics_target](query-planning/other-optimizer-options.md#default_statistics_target) | | +| [default_tablespace](default-settings-of-client-connection/statement-behavior.md#default_tablespace) | | +| [default_text_search_config](default-settings-of-client-connection/zone-and-formatting.md#default_text_search_config) | | +| [default_transaction_deferrable](default-settings-of-client-connection/statement-behavior.md#default_transaction_deferrable) | | +| [default_transaction_isolation](default-settings-of-client-connection/statement-behavior.md#default_transaction_isolation) | | +| [default_transaction_read_only](default-settings-of-client-connection/statement-behavior.md#default_transaction_read_only) | | +| [default_with_oids](./version-and-platform-compatibility/compatibility-with-earlier-versions.md#default_with_oids) | | +| [defer_csn_cleanup_time](automatic-vacuuming.md#defer_csn_cleanup_time) | | +| [delimiter_name](./delimiter.md#delimiter_name) | | +| [dfs_partition_directory_length](./miscellaneous-parameters.md#dfs_partition_directory_length) | | +| [dirty_page_percent_max](resource-consumption/background-writer.md#dirty_page_percent_max) | | +| [disable_memory_protect](load-management.md#disable_memory_protect) | | +| [dolphin_server_port](./connection-and-authentication/connection-settings.md#dolphin_server_port) | | +| [dw_file_num](resource-consumption/background-writer.md#dw_file_num) | | +| [dw_file_size](resource-consumption/background-writer.md#dw_file_size) | | +| [dynamic_library_path](default-settings-of-client-connection/other-default-parameters.md#dynamic_library_path) | | +| [effective_cache_size](query-planning/optimizer-cost-constants.md#effective_cache_size) | | +| [effective_io_concurrency](resource-consumption/asynchronous-io-operations.md#effective_io_concurrency) | | +| [elastic_search_ip_addr](security-configuration.md#elastic_search_ip_addr) | | +| [emit_illegal_bind_chars](miscellaneous-parameters.md#emit_illegal_bind_chars) | 5.0.2 - [Error When Writing Illegal Characters](../../characteristic-description/maintainability/error-when-writing-illegal-characters.md) | +| [enable_absolute_tablespace](query-planning/optimizer-method-configuration.md#enable_absolute_tablespace) | | +| [enable_accept_empty_str](./query-planning/other-optimizer-options.md#enable_accept_empty_str) | | +| [enable_access_server_directory](auditing/operation-audit.md#enable_access_server_directory) | | +| [enable_adaptive_hashagg](./query-planning/optimizer-method-configuration.md#enable_adaptive_hashagg) | | +| [enable_adio_debug](resource-consumption/asynchronous-io-operations.md#enable_adio_debug) | | +| [enable_adio_function](resource-consumption/asynchronous-io-operations.md#enable_adio_function) | | +| [enable_ai_stats](./AI-features.md#enable_ai_stats) | | +| [enable_alarm](alarm-detection.md#enable_alarm) | | +| [enable_analyze_check](query-planning/other-optimizer-options.md#enable_analyze_check) | | +| [enable_asp](system-performance-snapshot.md#enable_asp) | | +| [enable_auto_clean_unique_sql](query.md#enable_auto_clean_unique_sql) | | +| [enable_auto_explain](query-planning/other-optimizer-options.md#enable_auto_explain) | | +| [enable_availablezone](./ha-replication/sending-server.md#enable_availablezone) | | +| [enable_backend_compress](./backend-compression.md#enable_backend_compress) | | +| [enable_batch_dispatch](write-ahead-log/log-replay.md#enable_batch_dispatch) | | +| [enable_bbox_dump](load-management.md#enable_bbox_dump) | | +| [enable_beta_features](version-and-platform-compatibility/compatibility-with-earlier-versions.md#enable_beta_features) | | +| [enable_beta_opfusion](developer-options.md#enable_beta_opfusion) | | +| [enable_bitmapscan](query-planning/optimizer-method-configuration.md#enable_bitmapscan) | | +| [enable_bloom_filter](query-planning/other-optimizer-options.md#enable_bloom_filter) | | +| [enable_broadcast](./query-planning/optimizer-method-configuration.md#enable_broadcast) | | +| [enable_cachedplan_mgr](./AI-features.md#enable_cachedplan_mgr) | | +| [enable_cbm_tracking](backup-and-restoration-parameter.md#enable_cbm_tracking) | | +| [enable_change_hjcost](query-planning/optimizer-method-configuration.md#enable_change_hjcost) | | +| [enable_codegen](query-planning/other-optimizer-options.md#enable_codegen) | | +| [enable_codegen_print](query-planning/other-optimizer-options.md#enable_codegen_print) | | +| [enable_compress_hll](./HyperLogLog.md#enable_compress_hll) | | +| [enable_compress_spill](developer-options.md#enable_compress_spill) | | +| [enable_compression_check](./backend-compression.md#enable_compression_check) | | +| [enable_consider_usecount](resource-consumption/background-writer.md#enable_consider_usecount) | | +| [enable_constraint_optimization](reserved-parameters.md) | | +| [enable_copy_server_files](./data-import-export.md#enable_copy_server_files) | | +| [enable_csqual_pushdown](./developer-options.md#enable_csqual_pushdown) | | +| [enable_custom_parser](./version-and-platform-compatibility/platform-and-client-compatibility.md#enable_custom_parser) | | +| [enable_data_replicate](ha-replication/primary-server.md#enable_data_replicate) | | +| [enable_date_operator_sub_oracle](./version-and-platform-compatibility/platform-and-client-compatibility.md#enable_date_operator_sub_oracle) | 5.0.0 - [Support Subtracting Two Date Types To Return Numeric Type](../../characteristic-description/compatibility/support-subtracting-two-date-types-to-return-numeric-type.md) | +| [enable_dcf](./DCF-parameters-settings.md#enable_dcf) | | +| [enable_debug_vacuum](error-reporting-and-logging/logging-content.md#enable_debug_vacuum) | | +| [enable_default_cfunc_libpath](file-location.md#enable_default_cfunc_libpath) | | +| [enable_default_compression_table](./backend-compression.md#enable_default_compression_table) | | +| [enable_default_index_compression](./backend-compression.md#enable_default_index_compression) | | +| [enable_default_ustore_table](miscellaneous-parameters.md#enable_default_ustore_table) | | +| [enable_defer_calculate_snapshot](MogDB-transaction.md#enable_defer_calculate_snapshot) | | +| [enable_delta_store](./data-import-export.md#enable_delta_store) | | +| [enable_ddl_logical_record](./ha-replication/sending-server.md#enable_ddl_logical_record) | 5.0.8 - [Logical Decoding Support for DDL](../../developer-guide/logical-replication/logical-decoding/logical-decoding-support-for-DDL.md) | +| [enable_dolphin_proto](./connection-and-authentication/connection-settings.md#enable_dolphin_proto) | | +| [enable_double_write](write-ahead-log/checkpoints.md#enable_double_write) | | +| [enable_early_free](resource-consumption/memory.md#enable_early_free) | | +| [enable_event_trigger_a_mode](./miscellaneous-parameters.md#enable_event_trigger_a_mode) | | +| [enable_expr_fusion](./query-planning/optimizer-method-configuration.md#enable_expr_fusion) | | +| [enable_extrapolation_stats](query-planning/other-optimizer-options.md#enable_extrapolation_stats) | | +| [enable_fast_allocate](resource-consumption/asynchronous-io-operations.md#enable_fast_allocate) | | +| [enable_fast_numeric](developer-options.md#enable_fast_numeric) | | +| [enable_ffic_log](load-management.md#enable_ffic_log) | | +| [enable_force_vector_engine](query-planning/other-optimizer-options.md#enable_force_vector_engine) | | +| [enable_functional_dependency](query-planning/other-optimizer-options.md#enable_functional_dependency) | | +| [enable_global_plancache](query-planning/other-optimizer-options.md#enable_global_plancache) | | +| [enable_global_stats](query-planning/other-optimizer-options.md#enable_global_stats) | | +| [enable_global_syscache](global-syscache-parameters.md#enable_global_syscache) | | +| [enable_gtt_concurrent_truncate](./global-temporary-table.md#enable_gtt_concurrent_truncate) | | +| [enable_hadoop_env](reserved-parameters.md) | | +| [enable_hashagg](query-planning/optimizer-method-configuration.md#enable_hashagg) | | +| [enable_hashjoin](query-planning/optimizer-method-configuration.md#enable_hashjoin) | | +| [enable_hdfs_predicate_pushdown](reserved-parameters.md) | | +| [enable_heap_async_prefetch](thread-pool.md#enable_heap_async_prefetch) | 5.0.8 - [Sequential Scan Prefetch](../../characteristic-description/high-performance/seqscan-prefetch.md) | +| [enable_hypo_index](query-planning/other-optimizer-options.md#enable_hypo_index) | | +| [enable_incremental_catchup](ha-replication/primary-server.md#enable_incremental_catchup) | | +| [enable_incremental_checkpoint](write-ahead-log/checkpoints.md#enable_incremental_checkpoint) | | +| [enable_incremental_sort](./query-planning/optimizer-method-configuration.md#enable_incremental_sort) | 3.1.0 - [Sorting Operator Optimization](../../characteristic-description/high-performance/ordering-operator-optimization.md) | +| [enable_index_nestloop](query-planning/optimizer-method-configuration.md#enable_index_nestloop) | | +| [enable_indexonlyscan](query-planning/optimizer-method-configuration.md#enable_indexonlyscan) | | +| [enable_indexscan](query-planning/optimizer-method-configuration.md#enable_indexscan) | | +| [enable_indexscan_optimization](./query-planning/other-optimizer-options.md#enable_indexscan_optimization) | | +| [enable_inner_unique_opt](./query-planning/optimizer-method-configuration.md#enable_inner_unique_opt) | | +| [enable_instance_metric_persistent](load-management.md#enable_instance_metric_persistent) | | +| [enable_instr_cpu_timer](query.md#enable_instr_cpu_timer) | | +| [enable_instr_rt_percentile](query.md#enable_instr_rt_percentile) | | +| [enable_instr_track_wait](wait-events.md#enable_instr_track_wait) | | +| [enable_ios](thread-pool.md#enable_ios) | 5.0.8 - [Sequential Scan Prefetch](../../characteristic-description/high-performance/seqscan-prefetch.md) | +| [enable_kill_query](query-planning/optimizer-method-configuration.md#enable_kill_query) | | +| [enable_logical_io_statistics](load-management.md#enable_logical_io_statistics) | | +| [enable_material](query-planning/optimizer-method-configuration.md#enable_material) | | +| [enable_memory_context_contro](resource-consumption/memory.md#enable_memory_context_control) | | +| [enable_memory_limit](resource-consumption/memory.md#enable_memory_limit) | | +| [enable_mergejoin](query-planning/optimizer-method-configuration.md#enable_mergejoin) | | +| [enable_mergeinto_subqueryalias](./version-and-platform-compatibility/platform-and-client-compatibility.md#enable_mergeinto_subqueryalias) | 5.0.8 | +| [enable_mix_replication](ha-replication/primary-server.md#enable_mix_replication) | | +| [enable_multitable_update](./version-and-platform-compatibility/platform-and-client-compatibility.md#enable_multitable_update) | 5.0.8 | +| [enable_nestloop](query-planning/optimizer-method-configuration.md#enable_nestloop) | | +| [enable_nonsysadmin_execute_direct](auditing/operation-audit.md#enable_nonsysadmin_execute_direct) | | +| [enable_nvm](./multi-level-cache-management-parameters.md#enable_nvm) | | +| [enable_online_ddl_waitlock](lock-management.md#enable_online_ddl_waitlock) | | +| [enable_opfusion](query-planning/other-optimizer-options.md#enable_opfusion) | | +| [enable_orc_cache](reserved-parameters.md) | | +| [enable_page_compression](./backend-compression.md#enable_page_compression) | | +| [enable_page_lsn_check](write-ahead-log/log-replay.md#enable_page_lsn_check) | | +| [enable_partition_opfusion](query-planning/other-optimizer-options.md#enable_partition_opfusion) | | +| [enable_partitionwise](query-planning/other-optimizer-options.md#enable_partitionwise) | | +| [enable_pbe_optimization](query-planning/other-optimizer-options.md#enable_pbe_optimization) | | +| [enable_prevent_job_task_startup](scheduled-task.md#enable_prevent_job_task_startup) | | +| [enable_recyclebin](flashback.md#enable_recyclebin) | | +| [enable_remote_excute](writer-statement-parameters-supported-by-standby-server.md#enable_remote_excute) | | +| [enable_resource_record](load-management.md#enable_resource_record) | | +| [enable_resource_track](load-management.md#enable_resource_track) | | +| [enable_save_confirmed_lsn](./ha-replication/primary-server.md#enable_save_confirmed_lsn) | | +| [enable_save_datachanged_timestamp](statistics-during-the-database-running/query-and-index-statistics-collector.md#enable_save_datachanged_timestamp) | | +| [enable_security_policy](security-configuration.md#enable_security_policy) | | +| [enable_segment](./miscellaneous-parameters.md#enable_segment) | | +| [enable_seqscan](query-planning/optimizer-method-configuration.md#enable_seqscan) | | +| [enable_seqscan_fusion](./miscellaneous-parameters.md#enable_seqscan_fusion) | | +| [enable_set_variables_b_format](version-and-platform-compatibility/platform-and-client-compatibility.md#enable_set_variables_b_format) | | +| [enable_show_any_tuples](MogDB-transaction.md#enable_show_any_tuples) | | +| [enable_slot_log](ha-replication/sending-server.md#enable_slot_log) | | +| [enable_slow_query_log](./query.md#enable_slow_query_log) | | +| [enable_sonic_hashagg](query-planning/other-optimizer-options.md#enable_sonic_hashagg) | | +| [enable_sonic_hashjoin](query-planning/other-optimizer-options.md#enable_sonic_hashjoin) | | +| [enable_sonic_optspill](query-planning/other-optimizer-options.md#enable_sonic_optspill) | | +| [enable_sort](query-planning/optimizer-method-configuration.md#enable_sort) | | +| [enable_sse42](./query-planning/other-optimizer-options.md#enable_sse42) | | +| [enable_startwith_debug](query-planning/other-optimizer-options.md#enable_startwith_debug) | | +| [enable_stmt_track](query.md#enable_stmt_track) | | +| [enable_stream_replication](ha-replication/primary-server.md#enable_stream_replication) | | +| [enable_tde](security-configuration.md#enable_tde) | | +| [enable_thread_pool](thread-pool.md#enable_thread_pool) | | +| [enable_tidrangescan](./miscellaneous-parameters.md#enable_tidrangescan) | | +| [enable_tidscan](query-planning/optimizer-method-configuration.md#enable_tidscan) | | +| [enable_time_report](write-ahead-log/log-replay.md#enable_time_report) | | +| [enable_uheap_async_prefetch](thread-pool.md#enable_uheap_async_prefetch) | 5.0.8 - [Sequential Scan Prefetch](../../characteristic-description/high-performance/seqscan-prefetch.md) | +| [enable_upgrade_merge_lock_mode](miscellaneous-parameters.md#enable_upgrade_merge_lock_mode) | | +| [enable_user_metric_persisten](load-management.md#enable_user_metric_persistent) | | +| [enable_ustore](miscellaneous-parameters.md#enable_ustore) | | +| [enable_valuepartition_pruning](query-planning/optimizer-method-configuration.md#enable_valuepartition_prunin) | | +| [enable_vector_engine](query-planning/optimizer-method-configuration.md#enable_vector_engine) | | +| [enable_wal_shipping_compression](ha-replication/sending-server.md#enable_wal_shipping_compression) | | +| [enable_walrcv_reply_dueto_commit](./write-ahead-log/log-replay.md#enable_walrcv_reply_dueto_commit) | | +| [enable_wdr_snapshot](system-performance-snapshot.md#enable_wdr_snapshot) | | +| [enable_xlog_prune](write-ahead-log/checkpoints.md#enable_xlog_prune) | | +| [enableSeparationOfDuty](auditing/operation-audit.md#enableseparationofduty) | | +| [enforce_a_behavior](query-planning/optimizer-method-configuration.md#enforce_a_behavior) | | +| [escape_string_warning](version-and-platform-compatibility/compatibility-with-earlier-versions.md#escape_string_warning) | | +| [event_source](error-reporting-and-logging/logging-destination.md#event_source) | | +| [exit_on_error](fault-tolerance.md#exit_on_error) | | +| [explain_dna_file](query-planning/other-optimizer-options.md#explain_dna_file) | | +| [explain_perf_mode](query-planning/other-optimizer-options.md#explain_perf_mode) | | +| [external_pid_file](file-location.md#external_pid_file) | | +| [extra_float_digits](default-settings-of-client-connection/zone-and-formatting.md#extra_float_digits) | | +| [extreme_flush_dirty_page](./resource-consumption/background-writer.md#extreme_flush_dirty_page) | 5.0.8 - [Enhancement of Dirty Pages Flushing Performance](../../characteristic-description/high-performance/enhancement-of-dirty-pages-flushing-performance.md) | +| [failed_login_attempts](connection-and-authentication/security-and-authentication.md#failed_login_attempts) | | +| [fast_extend_file_size](resource-consumption/asynchronous-io-operations.md#fast_extend_file_size) | | +| [fault_mon_timeout](lock-management.md#fault_mon_timeout) | | +| [FencedUDFMemoryLimit](guc-user-defined-functions.md#fencedudfmemorylimit) | | +| [force_bitmapand](query-planning/optimizer-method-configuration.md#force_bitmapand) | | +| [force_promote](write-ahead-log/settings.md#force_promote) | | +| [force_tidrangescan](./miscellaneous-parameters.md#force_tidrangescan) | | +| [from_collapse_limit](query-planning/other-optimizer-options.md#from_collapse_limit) | | +| [fsync](write-ahead-log/settings.md#fsync) | | +| [full_audit_users](./auditing/user-and-permission-audit.md#full_audit_users) | | +| [full_page_writes](write-ahead-log/settings.md#full_page_writes) | | +| [geqo](query-planning/genetic-query-optimizer.md#geqo) | | +| [geqo_effort](query-planning/genetic-query-optimizer.md#geqo_effort) | | +| [geqo_generations](query-planning/genetic-query-optimizer.md#geqo_generations) | | +| [geqo_pool_size](query-planning/genetic-query-optimizer.md#geqo_pool_size) | | +| [geqo_seed](query-planning/genetic-query-optimizer.md#geqo_seed) | | +| [geqo_selection_bias](query-planning/genetic-query-optimizer.md#geqo_selection_bias) | | +| [geqo_threshold](query-planning/genetic-query-optimizer.md#geqo_threshold) | | +| [gin_fuzzy_search_limit](default-settings-of-client-connection/other-default-parameters.md#gin_fuzzy_search_limit) | | +| [gin_pending_list_limit](default-settings-of-client-connection/statement-behavior.md#gin_pending_list_limit) | | +| [global_syscache_threshold](global-syscache-parameters.md#global_syscache_threshold) | | +| [gpc_clean_timeout](query-planning/other-optimizer-options.md#gpc_clean_timeout) | | +| [group_concat_max_len](miscellaneous-parameters.md#group_concat_max_len) | | +| [gs_clean_timeout](lock-management.md#gs_clean_timeout) | | +| [ha_module_debug](ha-replication/primary-server.md#ha_module_debug) | | +| [hadr_max_size_for_xlog_receiver](backup-and-restoration-parameter.md#hadr_max_size_for_xlog_receiver) | | +| [hadr_recovery_point_target](./ha-replication/primary-server.md#hadr_recovery_point_target) | | +| [hadr_recovery_time_target](./ha-replication/primary-server.md#hadr_recovery_time_target) | | +| [hadr_super_user_record_path](./ha-replication/primary-server.md#hadr_super_user_record_path) | | +| [handle_toast_in_autovac](./automatic-vacuuming.md#handle_toast_in_autovac) | | +| [hash_agg_total_cost_ratio](./query-planning/optimizer-cost-constants.md#hash_agg_total_cost_ratio) | | +| [hash_join_total_cost_ratio](./query-planning/optimizer-cost-constants.md#hash_join_total_cost_ratio) | | +| [hashagg_table_size](query-planning/other-optimizer-options.md#hashagg_table_size) | | +| [hba_file](file-location.md#hba_file) | | +| [hll_default_expthresh](./HyperLogLog.md#hll_default_expthresh) | | +| [hll_default_log2explicit](HyperLogLog.md#hll_default_log2explicit) | | +| [hll_default_log2m](HyperLogLog.md#hll_default_log2m) | | +| [hll_default_log2sparse](HyperLogLog.md#hll_default_log2sparse) | | +| [hll_default_regwidth](./HyperLogLog.md#hll_default_regwidth) | | +| [hll_default_sparseon](./HyperLogLog.md#hll_default_sparseon) | | +| [hll_duplicate_check](HyperLogLog.md#hll_duplicate_check) | | +| [hll_max_sparse](./HyperLogLog.md#hll_max_sparse) | | +| [hot_standby](ha-replication/standby-server.md#hot_standby) | | +| [hot_standby_feedback](ha-replication/standby-server.md#hot_standby_feedback) | | +| [ident_file](file-location.md#ident_file) | | +| [idle_in_transaction_session_timeout](connection-and-authentication/security-and-authentication.md#idle_in_transaction_session_timeout) | | +| [ifnull_all_return_text](./developer-options.md#ifnull_all_return_text) | | +| [ignore_checksum_failure](developer-options.md#ignore_checksum_failure) | | +| [ignore_system_indexes](developer-options.md#ignore_system_indexes) | | +| [incremental_checkpoint_timeout](write-ahead-log/checkpoints.md#incremental_checkpoint_timeout) | | +| [instance_metric_retention_time](./load-management.md#instance_metric_retention_time) | | +| [instr_rt_percentile_interval](query.md#instr_rt_percentile_interval) | | +| [instr_unique_sql_count](query.md#instr_unique_sql_count) | | +| [instr_unique_sql_track_type](query.md#instr_unique_sql_track_type) | | +| [integer_datetimes](miscellaneous-parameters.md#integer_datetimes) | | +| [IntervalStyle](default-settings-of-client-connection/zone-and-formatting.md#intervalstyle) | | +| [io_control_unit](load-management.md#io_control_unit) | | +| [io_limits](load-management.md#io_limits) | | +| [io_priority](load-management.md#io_priority) | | +| [ios_batch_read_size](thread-pool.md#ios_batch_read_size) | 5.0.8 - [Sequential Scan Prefetch](../../characteristic-description/high-performance/seqscan-prefetch.md) | +| [ios_status_update_gap](thread-pool.md#ios_status_update_gap) | 5.0.8 - [Sequential Scan Prefetch](../../characteristic-description/high-performance/seqscan-prefetch.md) | +| [ios_worker_num](thread-pool.md#ios_worker_num) | 5.0.8 - [Sequential Scan Prefetch](../../characteristic-description/high-performance/seqscan-prefetch.md) | +| [job_queue_processes](scheduled-task.md#job_queue_processes) | | +| [join_collapse_limit](query-planning/other-optimizer-options.md#join_collapse_limit) | | +| [keep_sync_window](ha-replication/primary-server.md#keep_sync_window) | | +| [krb_caseins_users](connection-and-authentication/security-and-authentication.md#krb_caseins_users) | | +| [krb_server_keyfile](connection-and-authentication/security-and-authentication.md#krb_server_keyfile) | | +| [krb_srvname](connection-and-authentication/security-and-authentication.md#krb_srvname) | | +| [lastval_supported](version-and-platform-compatibility/platform-and-client-compatibility.md#lastval_supported) | | +| [lc_collate](miscellaneous-parameters.md#lc_collate) | | +| [lc_ctype](miscellaneous-parameters.md#lc_ctype) | | +| [lc_messages](default-settings-of-client-connection/zone-and-formatting.md#lc_messages) | | +| [lc_monetary](default-settings-of-client-connection/zone-and-formatting.md#lc_monetary) | | +| [lc_numeric](default-settings-of-client-connection/zone-and-formatting.md#lc_numeric) | | +| [lc_time](default-settings-of-client-connection/zone-and-formatting.md#lc_time) | | +| [light_comm](./connection-and-authentication/connection-settings.md#light_comm) | | +| [listen_addresses](connection-and-authentication/connection-settings.md#listen_addresses) | | +| [lo_compat_privileges](version-and-platform-compatibility/compatibility-with-earlier-versions.md#lo_compat_privileges) | | +| [local_bind_address](connection-and-authentication/connection-settings.md#local_bind_address) | | +| [local_preload_libraries](default-settings-of-client-connection/other-default-parameters.md#local_preload_libraries) | | +| [local_syscache_threshold](resource-consumption/memory.md#local_syscache_threshold) | | +| [lockwait_timeout](lock-management.md#lockwait_timeout) | | +| [log_autovacuum_min_duration](automatic-vacuuming.md#log_autovacuum_min_duration) | | +| [log_checkpoints](error-reporting-and-logging/logging-content.md#log_checkpoints) | | +| [log_connections](error-reporting-and-logging/logging-content.md#log_connections) | | +| [log_destination](error-reporting-and-logging/logging-destination.md#log_destination) | | +| [log_directory](error-reporting-and-logging/logging-destination.md#log_directory) | | +| [log_disconnections](error-reporting-and-logging/logging-content.md#log_disconnections) | | +| [log_duration](error-reporting-and-logging/logging-content.md#log_duration) | | +| [log_error_verbosity](error-reporting-and-logging/logging-content.md#log_error_verbosity) | | +| [log_executor_stats](query-planning/other-optimizer-options.md#log_executor_stats) | | +| [log_file_mode](error-reporting-and-logging/logging-destination.md#log_file_mode) | | +| [log_filename](error-reporting-and-logging/logging-destination.md#log_filename) | | +| [log_hostname](error-reporting-and-logging/logging-content.md#log_hostname) | | +| [log_line_prefix](error-reporting-and-logging/logging-content.md#log_line_prefix) | | +| [log_lock_waits](error-reporting-and-logging/logging-content.md#log_lock_waits) | | +| [log_min_duration_statement](error-reporting-and-logging/logging-time.md#log_min_duration_statement) | | +| [log_min_error_statement](error-reporting-and-logging/logging-time.md#log_min_error_statement) | | +| [log_min_messages](error-reporting-and-logging/logging-time.md#log_min_messages) | | +| [log_pagewriter](developer-options.md#log_pagewriter) | | +| [log_parser_stats](query-planning/other-optimizer-options.md#log_parser_stats) | | +| [log_planner_stats](query-planning/other-optimizer-options.md#log_planner_stats) | | +| [log_rotation_age](error-reporting-and-logging/logging-destination.md#log_rotation_age) | | +| [log_rotation_size](error-reporting-and-logging/logging-destination.md#log_rotation_size) | | +| [log_statement](error-reporting-and-logging/logging-content.md#log_statement) | | +| [log_statement_stats](query-planning/other-optimizer-options.md#log_statement_stats) | | +| [log_temp_files](error-reporting-and-logging/logging-content.md#log_temp_files) | | +| [log_timezone](error-reporting-and-logging/logging-content.md#log_timezone) | | +| [log_truncate_on_rotation](error-reporting-and-logging/logging-destination.md#log_truncate_on_rotation) | | +| [logging_collector](error-reporting-and-logging/logging-destination.md#logging_collector) | | +| [logging_module](error-reporting-and-logging/logging-content.md#logging_module) | | +| [logical_decode_options_default](./ha-replication/sending-server.md#logical_decode_options_default) | | +| [logical_sender_timeout](./ha-replication/sending-server.md#logical_sender_timeout) | | +| [maintenance_work_mem](resource-consumption/memory.md#maintenance_work_mem) | | +| [max_active_global_temporary_table](global-temporary-table.md#max_active_global_temporary_table) | | +| [max_cached_tuplebufs](ha-replication/sending-server.md#max_cached_tuplebufs) | | +| [max_changes_in_memory](ha-replication/sending-server.md#max_changes_in_memory) | | +| [max_compile_functions](default-settings-of-client-connection/statement-behavior.md#max_compile_functions) | | +| [max_concurrent_autonomous_transactions](miscellaneous-parameters.md#max_concurrent_autonomous_transactions) | | +| [max_connections](connection-and-authentication/connection-settings.md#max_connections) | | +| [max_error_count](./error-reporting-and-logging/logging-content.md#max_error_count) | | +| [max_files_per_process](resource-consumption/kernel-resource-usage.md#max_files_per_process) | | +| [max_function_args](version-and-platform-compatibility/platform-and-client-compatibility.md#max_function_args) | | +| [max_identifier_length](miscellaneous-parameters.md#max_identifier_length) | | +| [max_index_keys](miscellaneous-parameters.md#max_index_keys) | | +| [max_inner_tool_connections](connection-and-authentication/connection-settings.md#max_inner_tool_connections) | | +| [max_io_capacity](resource-consumption/background-writer.md#max_io_capacity) | | +| [max_keep_log_seg](./ha-replication/sending-server.md#max_keep_log_seg) | | +| [max_loaded_cudesc](resource-consumption/memory.md#max_loaded_cudesc) | | +| [max_locks_per_transaction](lock-management.md#max_locks_per_transaction) | | +| [max_logical_replication_workers](ha-replication/standby-server.md#max_logical_replication_workers) | | +| [max_pred_locks_per_transaction](lock-management.md#max_pred_locks_per_transaction) | | +| [max_prepared_transactions](resource-consumption/memory.md#max_prepared_transactions) | | +| [max_process_memory](resource-consumption/memory.md#max_process_memory) | | +| [max_query_retry_times](fault-tolerance.md#max_query_retry_times) | | +| [max_recursive_times](query-planning/optimizer-method-configuration.md#max_recursive_times) | | +| [max_redo_log_size](write-ahead-log/checkpoints.md#max_redo_log_size) | | +| [max_replication_slots](ha-replication/sending-server.md#max_replication_slots) | | +| [max_requests_per_worker](thread-pool.md#max_requests_per_worker) | 5.0.8 - [Sequential Scan Prefetch](../../characteristic-description/high-performance/seqscan-prefetch.md) | +| [max_resource_package](./miscellaneous-parameters.md#max_resource_package) | | +| [max_size_for_xlog_prune](write-ahead-log/checkpoints.md#max_size_for_xlog_prune) | | +| [max_stack_depth](resource-consumption/memory.md#max_stack_depth) | | +| [max_standby_archive_delay](ha-replication/standby-server.md#max_standby_archive_delay) | | +| [max_standby_streaming_delay](ha-replication/standby-server.md#max_standby_streaming_delay) | | +| [max_sync_workers_per_subscription](ha-replication/standby-server.md#max_sync_workers_per_subscription) | | +| [max_undo_workers](rollback-parameters.md#max_undo_workers) | | +| [max_user_defined_exception](developer-options.md#max_user_defined_exception) | | +| [max_wal_senders](ha-replication/sending-server.md#max_wal_senders) | | +| [memory_detail_tracking](load-management.md#memory_detail_tracking) | | +| [memory_fault_percent](load-management.md#memory_fault_percent) | | +| [memory_trace_level](./resource-consumption/memory.md#memory_trace_level) | | +| [memory_tracking_mode](load-management.md#memory_tracking_mode) | | +| [memorypool_enable](resource-consumption/memory.md#memorypool_enable) | | +| [memorypool_size](resource-consumption/memory.md#memorypool_size) | | +| [merge_join_total_cost_ratio](./query-planning/optimizer-cost-constants.md#merge_join_total_cost_ratio) | | +| [min_table_block_num_enable_ios](thread-pool.md#min_table_block_num_enable_ios) | 5.0.8 - [Sequential Scan Prefetch](../../characteristic-description/high-performance/seqscan-prefetch.md) | +| [min_uheap_table_block_num_enable_ios](thread-pool.md#min_uheap_table_block_num_enable_ios) | 5.0.8 - [Sequential Scan Prefetch](../../characteristic-description/high-performance/seqscan-prefetch.md) | +| [modify_initial_password](connection-and-authentication/security-and-authentication.md#modify_initial_password) | | +| [most_available_sync](ha-replication/primary-server.md#most_available_sync) | | +| [multi_stats_type](./AI-features.md#multi_stats_type) | | +| [nestloop_total_cost_ratio](./query-planning/optimizer-cost-constants.md#nestloop_total_cost_ratio) | | +| [ngram_gram_size](query-planning/other-optimizer-options.md#ngram_gram_size) | | +| [ngram_grapsymbol_ignore](query-planning/other-optimizer-options.md#ngram_grapsymbol_ignore) | | +| [ngram_punctuation_ignore](query-planning/other-optimizer-options.md#ngram_punctuation_ignore) | | +| [nls_timestamp_format](version-and-platform-compatibility/platform-and-client-compatibility.md#nls_timestamp_format) | | +| [no_audit_client](./auditing/user-and-permission-audit.md#no_audit_client) | | +| [num_internal_lock_partitions](lock-management.md#num_internal_lock_partitions) | | +| [numa_distribute_mode](developer-options.md#numa_distribute_mode) | | +| [nvm_buffers](./multi-level-cache-management-parameters.md#nvm_buffers) | | +| [nvm_file_path](./multi-level-cache-management-parameters.md#nvm_file_path) | | +| [omit_encoding_error](fault-tolerance.md#omit_encoding_error) | | +| [operation_mode](backup-and-restoration-parameter.md#operation_mode) | | +| [opfusion_debug_mode](error-reporting-and-logging/logging-content.md#opfusion_debug_mode) | | +| [ora_dblink_col_case_sensitive](./version-and-platform-compatibility/platform-and-client-compatibility.md#ora_dblink_col_case_sensitive) | | +| [pagewriter_sleep](resource-consumption/background-writer.md#pagewriter_sleep) | | +| [pagewriter_thread_num](resource-consumption/background-writer.md#pagewriter_thread_num) | | +| [parallel_recovery_batch](write-ahead-log/log-replay.md#parallel_recovery_batch) | | +| [parallel_recovery_dispatch_algorithm](write-ahead-log/log-replay.md#parallel_recovery_dispatch_algorithm) | | +| [parallel_recovery_timeout](write-ahead-log/log-replay.md#parallel_recovery_timeout) | | +| [parallel_scan_gap](thread-pool.md#parallel_scan_gap) | 5.0.8 - [Sequential Scan Prefetch](../../characteristic-description/high-performance/seqscan-prefetch.md) | +| [partition_lock_upgrade_timeout](lock-management.md#partition_lock_upgrade_timeout) | | +| [partition_max_cache_size](./data-import-export.md#partition_max_cache_size) | | +| [partition_mem_batch](./data-import-export.md#partition_mem_batch) | | +| [partition_page_estimation](query-planning/other-optimizer-options.md#partition_page_estimation) | | +| [password_effect_time](connection-and-authentication/security-and-authentication.md#password_effect_time) | | +| [password_encryption_type](connection-and-authentication/security-and-authentication.md#password_encryption_type) | | +| [password_lock_time](connection-and-authentication/security-and-authentication.md#password_lock_time) | | +| [password_max_length](connection-and-authentication/security-and-authentication.md#password_max_length) | | +| [password_min_digital](connection-and-authentication/security-and-authentication.md#password_min_digital) | | +| [password_min_length](connection-and-authentication/security-and-authentication.md#password_min_length) | | +| [password_min_lowercase](connection-and-authentication/security-and-authentication.md#password_min_lowercase) | | +| [password_min_special](connection-and-authentication/security-and-authentication.md#password_min_special) | | +| [password_min_uppercase](connection-and-authentication/security-and-authentication.md#password_min_uppercase) | | +| [password_notify_time](connection-and-authentication/security-and-authentication.md#password_notify_time) | | +| [password_policy](connection-and-authentication/security-and-authentication.md#password_policy) | | +| [password_reuse_max](connection-and-authentication/security-and-authentication.md#password_reuse_max) | | +| [password_reuse_time](connection-and-authentication/security-and-authentication.md#password_reuse_time) | | +| [pca_shared_buffer](parameters-related-to-efficient-data-compression-algorithms.md#pca_shared_buffer) | | +| [percentile](query.md#percentile) | | +| [perf_directory](./query.md#perf_directory) | | +| [pgxc_node_name](MogDB-transaction.md#pgxc_node_name) | | +| [plan_cache_mode](query-planning/other-optimizer-options.md#plan_cache_mode) | | +| [plan_cache_type_validation](./miscellaneous-parameters.md#plan_cache_type_validation) | | +| [plan_mode_seed](query-planning/other-optimizer-options.md#plan_mode_seed) | | +| [pldebugger_timeout](developer-options.md#pldebugger_timeout) | | +| [pljava_vmoptions](guc-user-defined-functions.md#pljava_vmoptions) | | +| [plog_merge_age](error-reporting-and-logging/logging-time.md#plog_merge_age) | | +| [plsql_compile_check_options](./version-and-platform-compatibility/platform-and-client-compatibility.md#plsql_compile_check_options) | | +| [plsql_show_all_error](developer-options.md#plsql_show_all_error) | | +| [port](connection-and-authentication/connection-settings.md#port) | | +| [post_auth_delay](developer-options.md#post_auth_delay) | | +| [pre_auth_delay](developer-options.md#pre_auth_delay) | | +| [prefetch_protect_time](thread-pool.md#prefetch_protect_time) | 5.0.8 - [Sequential Scan Prefetch](../../characteristic-description/high-performance/seqscan-prefetch.md) | +| [prefetch_quantity](resource-consumption/asynchronous-io-operations.md#prefetch_quantity) | | +| [primary_slotname](ha-replication/standby-server.md#primary_slotname) | | +| [proc_inparam_immutable](./version-and-platform-compatibility/platform-and-client-compatibility.md#proc_inparam_immutable) | 5.0.0 - [Support For Constants In Package As Default Values](../../characteristic-description/compatibility/support-for-constants-in-package-as-default-values.md) | +| [protect_standby](ha-replication/standby-server.md#protect_standby) | 5.0.6 | +| [psort_work_mem](resource-consumption/memory.md#psort_work_mem) | | +| [qrw_inlist2join_optmode](query-planning/optimizer-method-configuration.md#qrw_inlist2join_optmode) | | +| [query_band](load-management.md#query_band) | | +| [query_dop](query-planning/other-optimizer-options.md#query_dop) | | +| [query_log_directory](./query.md#query_log_directory) | | +| [query_log_file](./query.md#query_log_file) | | +| [query_max_mem](resource-consumption/memory.md#query_max_mem) | | +| [query_mem](resource-consumption/memory.md#query_mem) | | +| [quote_all_identifiers](version-and-platform-compatibility/compatibility-with-earlier-versions.md#quote_all_identifiers) | | +| [raise_errors_if_no_files](./data-import-export.md#raise_errors_if_no_files) | | +| [random_page_cost](query-planning/optimizer-cost-constants.md#random_page_cost) | | +| [recovery_max_workers](write-ahead-log/log-replay.md#recovery_max_workers) | | +| [recovery_min_apply_delay](write-ahead-log/log-replay.md#recovery_min_apply_delay) | | +| [recovery_parallelism](write-ahead-log/log-replay.md#recovery_parallelism) | | +| [recovery_parse_workers](write-ahead-log/log-replay.md#recovery_parse_workers) | | +| [recovery_redo_workers](write-ahead-log/log-replay.md#recovery_redo_workers) | | +| [recovery_time_target](write-ahead-log/log-replay.md#recovery_time_target) | | +| [recyclebin_retention_time](flashback.md#recyclebin_retention_time) | | +| [redo_bind_cpu_attr](write-ahead-log/log-replay.md#redo_bind_cpu_attr) | | +| [remote_read_mode](fault-tolerance.md#remote_read_mode) | | +| [remotetype](developer-options.md#remotetype) | | +| [repl_auth_mode](./ha-replication/sending-server.md#repl_auth_mode) | | +| [repl_uuid](./ha-replication/sending-server.md#repl_uuid) | | +| [replconninfoN](ha-replication/sending-server.md#replconninfon) | | +| [replication_type](MogDB-transaction.md#replication_type) | | +| [RepOriginId](replication-parameters-of-two-database-instances.md#RepOriginId) | | +| [require_ssl](connection-and-authentication/security-and-authentication.md#require_ssl) | | +| [reserve_space_for_nullable_atts](miscellaneous-parameters.md#reserve_space_for_nullable_atts) | | +| [resilience_memory_reject_percent](./resource-consumption/memory.md#resilience_memory_reject_percent) | | +| [resilience_threadpool_reject_cond](./thread-pool.md#resilience_threadpool_reject_cond) | | +| [resource_track_cost](load-management.md#resource_track_cost) | | +| [resource_track_duration](load-management.md#resource_track_duration) | | +| [resource_track_level](load-management.md#resource_track_level) | | +| [resource_track_log](developer-options.md#resource_track_log) | | +| [restart_after_crash](fault-tolerance.md#restart_after_crash) | | +| [retry_ecode_list](fault-tolerance.md#retry_ecode_list) | | +| [rewrite_rule](query-planning/other-optimizer-options.md#rewrite_rule) | | +| [safe_data_path](./data-import-export.md#safe_data_path) | | +| [schedule_splits_threshold](reserved-parameters.md) | | +| [search_path](default-settings-of-client-connection/statement-behavior.md#search_path) | | +| [segment_buffers](resource-consumption/memory.md#segment_buffers) | | +| [segment_size](miscellaneous-parameters.md#segment_size) | | +| [seq_page_cost](query-planning/optimizer-cost-constants.md#seq_page_cost) | | +| [server_encoding](miscellaneous-parameters.md#server_encoding) | | +| [server_version](miscellaneous-parameters.md#server_version) | | +| [server_version_num](miscellaneous-parameters.md#server_version_num) | | +| [session_history_memory](load-management.md#session_history_memory) | | +| [session_replication_role](default-settings-of-client-connection/statement-behavior.md#session_replication_role) | | +| [session_respool](load-management.md#session_respool) | | +| [session_statistics_memory](load-management.md#session_statistics_memory) | | +| [session_timeout](connection-and-authentication/security-and-authentication.md#session_timeout) | | +| [shared_buffers](resource-consumption/memory.md#shared_buffers) | | +| [shared_preload_libraries](resource-consumption/kernel-resource-usage.md#shared_preload_libraries) | | +| [show_acce_estimate_detail](developer-options.md#show_acce_estimate_detail) | | +| [show_fdw_remote_plan](./query-planning/other-optimizer-options.md#show_fdw_remote_plan) | | +| [skew_option](query-planning/optimizer-method-configuration.md#skew_option) | | +| [smp_thread_cost](./query-planning/optimizer-cost-constants.md#smp_thread_cost) | | +| [sort_agg_total_cost_ratio](./query-planning/optimizer-cost-constants.md#sort_agg_total_cost_ratio) | | +| [sort_key_pruning_level](query-planning/other-optimizer-options.md#sort_key_pruning_level) | | +| [sql_beta_feature](query-planning/other-optimizer-options.md#sql_beta_feature) | | +| [sql_compatibility](version-and-platform-compatibility/platform-and-client-compatibility.md#sql_compatibility) | | +| [sql_ignore_strategy](miscellaneous-parameters.md#sql_ignore_strategy) | | +| [sql_inheritance](version-and-platform-compatibility/compatibility-with-earlier-versions.md#sql_inheritance) | | +| [sql_note](../../developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/guc-parameters.md#sql_note) | | +| [sql_use_spacelimit](resource-consumption/disk-space.md#sql_use_spacelimit) | | +| [ss_dss_conn_path](./resource-pooling-parameters.md#ss_dss_conn_path) | | +| [ss_dss_vg_name](./resource-pooling-parameters.md#ss_dss_vg_name) | | +| [ss_enable_aio](./resource-pooling-parameters.md#ss_enable_aio) | | +| [ss_enable_catalog_centralized](./resource-pooling-parameters.md#ss_enable_catalog_centralized) | | +| [ss_enable_dms](./resource-pooling-parameters.md#ss_enable_dms) | | +| [ss_enable_dss](./resource-pooling-parameters.md#ss_enable_dss) | | +| [ss_enable_scrlock](./resource-pooling-parameters.md#ss_enable_scrlock) | | +| [ss_enable_scrlock_sleep_mode](./resource-pooling-parameters.md#ss_enable_scrlock_sleep_mode) | | +| [ss_enable_ssl](./resource-pooling-parameters.md#ss_enable_ssl) | | +| [ss_instance_id](./resource-pooling-parameters.md#ss_instance_id) | | +| [ss_interconnect_channel_count](./resource-pooling-parameters.md#ss_interconnect_channel_count) | | +| [ss_interconnect_type](./resource-pooling-parameters.md#ss_interconnect_type) | | +| [ss_interconnect_url](./resource-pooling-parameters.md#ss_interconnect_url) | | +| [ss_log_backup_file_count](./resource-pooling-parameters.md#ss_log_backup_file_count) | | +| [ss_log_level](./resource-pooling-parameters.md#ss_log_level) | | +| [ss_log_max_file_size](./resource-pooling-parameters.md#ss_log_max_file_size) | | +| [ss_ock_log_path](./resource-pooling-parameters.md#ss_ock_log_path) | | +| [ss_rdma_work_config](./resource-pooling-parameters.md#ss_rdma_work_config) | | +| [ss_recv_msg_pool_size](./resource-pooling-parameters.md#ss_recv_msg_pool_size) | | +| [ss_scrlock_server_bind_core](./resource-pooling-parameters.md#ss_scrlock_server_bind_core) | | +| [ss_scrlock_server_port](./resource-pooling-parameters.md#ss_scrlock_server_port) | | +| [ss_scrlock_worker_bind_core](./resource-pooling-parameters.md#ss_scrlock_worker_bind_core) | | +| [ss_scrlock_worker_count](./resource-pooling-parameters.md#ss_scrlock_worker_count) | | +| [ss_work_thread_count](./resource-pooling-parameters.md#ss_work_thread_count) | | +| [ssl](connection-and-authentication/security-and-authentication.md#ssl) | | +| [ssl_ca_file](connection-and-authentication/security-and-authentication.md#ssl_ca_file) | | +| [ssl_cert_file](connection-and-authentication/security-and-authentication.md#ssl_cert_file) | | +| [ssl_cert_notify_time](./connection-and-authentication/security-and-authentication.md#ssl_cert_notify_time) | | +| [ssl_ciphers](connection-and-authentication/security-and-authentication.md#ssl_ciphers) | | +| [ssl_crl_file](connection-and-authentication/security-and-authentication.md#ssl_crl_file) | | +| [ssl_key_file](connection-and-authentication/security-and-authentication.md#ssl_key_file) | | +| [ssl_renegotiation_limit](./connection-and-authentication/security-and-authentication.md#ssl_renegotiation_limit) | | +| [standard_conforming_strings](version-and-platform-compatibility/compatibility-with-earlier-versions.md#standard_conforming_strings) | | +| [standby_shared_buffers_fraction](resource-consumption/memory.md#standby_shared_buffers_fraction) | | +| [statement_timeout](default-settings-of-client-connection/statement-behavior.md#statement_timeout) | | +| [stats_temp_directory](statistics-during-the-database-running/query-and-index-statistics-collector.md#stats_temp_directory) | | +| [string_hash_compatible](developer-options.md#string_hash_compatible) | | +| [support_batch_bind](developer-options.md#support_batch_bind) | | +| [support_extended_features](version-and-platform-compatibility/platform-and-client-compatibility.md#support_extended_features) | | +| [sync_config_strategy](ha-replication/primary-server.md#sync_config_strategy) | | +| [synchronize_seqscans](version-and-platform-compatibility/compatibility-with-earlier-versions.md#synchronize_seqscans) | | +| [synchronous_commit](write-ahead-log/settings.md#synchronous_commit) | | +| [synchronous_standby_names](ha-replication/primary-server.md#synchronous_standby_names) | | +| [sysadmin_reserved_connections](connection-and-authentication/connection-settings.md#sysadmin_reserved_connections) | | +| [syslog_facility](error-reporting-and-logging/logging-destination.md#syslog_facility) | | +| [syslog_ident](error-reporting-and-logging/logging-destination.md#syslog_ident) | | +| [table_skewness_warning_rows](alarm-detection.md#table_skewness_warning_rows) | | +| [table_skewness_warning_threshold](alarm-detection.md#table_skewness_warning_threshold) | | +| [tcp_keepalives_count](connection-and-authentication/communication-library-parameters.md#tcp_keepalives_count) | | +| [tcp_keepalives_idle](connection-and-authentication/communication-library-parameters.md#tcp_keepalives_idle) | | +| [tcp_keepalives_interval](connection-and-authentication/communication-library-parameters.md#tcp_keepalives_interval) | | +| [tcp_user_timeout](./connection-and-authentication/communication-library-parameters.md#tcp_user_timeout) | | +| [td_compatible_truncation](version-and-platform-compatibility/platform-and-client-compatibility.md#td_compatible_truncation) | | +| [tde_cmk_id](security-configuration.md#tde_cmk_id) | | +| [temp_buffers](resource-consumption/memory.md#temp_buffers) | | +| [temp_file_limit](resource-consumption/disk-space.md#temp_file_limit) | | +| [temp_tablespaces](default-settings-of-client-connection/statement-behavior.md#temp_tablespaces) | | +| [thread_pool_attr](thread-pool.md#thread_pool_attr) | | +| [thread_pool_committer_max_retry_count](./thread-pool.md#thread_pool_committer_max_retry_count) | | +| [thread_pool_committerctl_max_retry_count](./thread-pool.md#thread_pool_stream_attr) | | +| [thread_pool_stream_attr](./thread-pool.md#thread_pool_stream_attr) | | +| [thread_pool_worker_num_per_committer](./thread-pool.md#thread_pool_worker_num_per_committer) | | +| [time_to_target_rpo](./write-ahead-log/archiving.md#time_to_target_rpo) | | +| [TimeZone](default-settings-of-client-connection/zone-and-formatting.md#timezone) | | +| [timezone_abbreviations](default-settings-of-client-connection/zone-and-formatting.md#timezone_abbreviations) | | +| [topsql_retention_time](load-management.md#topsql_retention_time) | | +| [trace_notify](developer-options.md#trace_notify) | | +| [trace_recovery_messages](developer-options.md#trace_recovery_messages) | | +| [trace_sort](developer-options.md#trace_sort) | | +| [track_activities](statistics-during-the-database-running/query-and-index-statistics-collector.md#track_activities) | | +| [track_activity_query_size](statistics-during-the-database-running/query-and-index-statistics-collector.md#track_activity_query_size) | | +| [track_counts](statistics-during-the-database-running/query-and-index-statistics-collector.md#track_counts) | | +| [track_functions](statistics-during-the-database-running/query-and-index-statistics-collector.md#track_functions) | | +| [track_io_timing](statistics-during-the-database-running/query-and-index-statistics-collector.md#track_io_timing) | | +| [track_sql_count](statistics-during-the-database-running/query-and-index-statistics-collector.md#track_sql_count) | | +| [track_stmt_details_size](query.md#track_stmt_details_size) | | +| [track_stmt_parameter](query.md#track_stmt_parameter) | | +| [track_stmt_retention_time](query.md#track_stmt_retention_time) | | +| [track_stmt_session_slot](query.md#track_stmt_session_slot) | | +| [track_stmt_standby_chain_size](query.md#track_stmt_standby_chain_size) | | +| [track_stmt_stat_level](query.md#track_stmt_stat_level) | | +| [track_thread_wait_status_interval](statistics-during-the-database-running/query-and-index-statistics-collector.md#track_thread_wait_status_interval) | | +| [transaction_deferrable](MogDB-transaction.md#transaction_deferrable) | | +| [transaction_isolation](MogDB-transaction.md#transaction_isolation) | | +| [transaction_pending_time](load-management.md#transaction_pending_time) | | +| [transaction_read_only](MogDB-transaction.md#transaction_read_only) | | +| [transform_null_equals](version-and-platform-compatibility/platform-and-client-compatibility.md#transform_null_equals) | | +| [transparent_encrypt_kms_region](miscellaneous-parameters.md#transparent_encrypt_kms_region) | | +| [transparent_encrypt_kms_url](miscellaneous-parameters.md#transparent_encrypt_kms_url) | | +| [transparent_encrypted_string](miscellaneous-parameters.md#transparent_encrypted_string) | | +| [try_vector_engine_strategy](query-planning/optimizer-method-configuration.md#try_vector_engine_strategy) | | +| [udf_memory_limit](guc-user-defined-functions.md#udf_memory_limit) | | +| [UDFWorkerMemHardLimit](guc-user-defined-functions.md#udfworkermemhardlimit) | | +| [uncontrolled_memory_context](resource-consumption/memory.md#uncontrolled_memory_context) | | +| [undo_limit_size_per_transaction](rollback-parameters.md#undo_limit_size_per_transaction) | | +| [undo_retention_time](flashback.md#undo_retention_time) | | +| [undo_space_limit_size](rollback-parameters.md#undo_space_limit_size) | | +| [undo_zone_count](reserved-parameters.md) | | +| [unique_sql_retention_time](./query.md#unique_sql_retention_time) | | +| [unix_socket_directory](connection-and-authentication/connection-settings.md#unix_socket_directory) | | +| [unix_socket_group](connection-and-authentication/connection-settings.md#unix_socket_group) | | +| [unix_socket_permissions](connection-and-authentication/connection-settings.md#unix_socket_permissions) | | +| [update_lockwait_timeout](lock-management.md#update_lockwait_timeout) | | +| [update_process_title](./statistics-during-the-database-running/query-and-index-statistics-collector.md#update_process_title) | | +| [upgrade_mode](upgrade-parameters.md#upgrade_mode) | | +| [uppercase_attribute_name](./version-and-platform-compatibility/platform-and-client-compatibility.md#uppercase_attribute_name) | | +| [use_elastic_search](security-configuration.md#use_elastic_search) | | +| [use_workload_manager](load-management.md#use_workload_manager) | | +| [user_metric_retention_time](load-management.md#user_metric_retention_time) | | +| [ustore_attr](miscellaneous-parameters.md#ustore_attr) | | +| [vacuum_cost_delay](resource-consumption/cost-based-vacuum-delay.md#vacuum_cost_delay) | | +| [vacuum_cost_limit](resource-consumption/cost-based-vacuum-delay.md#vacuum_cost_limit) | | +| [vacuum_cost_page_dirty](resource-consumption/cost-based-vacuum-delay.md#vacuum_cost_page_dirty) | | +| [vacuum_cost_page_hit](resource-consumption/cost-based-vacuum-delay.md#vacuum_cost_page_hit) | | +| [vacuum_cost_page_miss](resource-consumption/cost-based-vacuum-delay.md#vacuum_cost_page_miss) | | +| [vacuum_defer_cleanup_age](ha-replication/primary-server.md#vacuum_defer_cleanup_age) | | +| [vacuum_freeze_min_age](default-settings-of-client-connection/statement-behavior.md#vacuum_freeze_min_age) | | +| [vacuum_freeze_table_age](default-settings-of-client-connection/statement-behavior.md#vacuum_freeze_table_age) | | +| [vacuum_gtt_defer_check_age](global-temporary-table.md#vacuum_gtt_defer_check_age) | | +| [var_eq_const_selectivity](query-planning/other-optimizer-options.md#var_eq_const_selectivity) | | +| [version_retention_age](flashback.md#version_retention_age) | | +| [view_independent](miscellaneous-parameters.md#view_independent) | 5.0.1 | +| [wait_dummy_time](ha-replication/primary-server.md#wait_dummy_time) | | +| [wal_block_size](write-ahead-log/settings.md#wal_block_size) | | +| [wal_buffers](write-ahead-log/settings.md#wal_buffers) | | +| [wal_file_init_num](write-ahead-log/settings.md#wal_file_init_num) | | +| [wal_flush_delay](write-ahead-log/settings.md#wal_flush_delay) | | +| [wal_flush_timeout](write-ahead-log/settings.md#wal_flush_timeout) | | +| [wal_keep_segments](ha-replication/sending-server.md#wal_keep_segments) | | +| [wal_level](write-ahead-log/settings.md#wal_level) | | +| [wal_log_hints](write-ahead-log/settings.md#wal_log_hints) | | +| [wal_receiver_buffer_size](ha-replication/standby-server.md#wal_receiver_buffer_size) | | +| [wal_receiver_connect_retries](ha-replication/standby-server.md#wal_receiver_connect_retries) | | +| [wal_receiver_connect_timeout](ha-replication/standby-server.md#wal_receiver_connect_timeout) | | +| [wal_receiver_status_interval](ha-replication/standby-server.md#wal_receiver_status_interval) | | +| [wal_receiver_timeout](ha-replication/standby-server.md#wal_receiver_timeout) | | +| [wal_segment_size](write-ahead-log/settings.md#wal_segment_size) | | +| [wal_sender_timeout](ha-replication/sending-server.md#wal_sender_timeout) | | +| [wal_sync_method](write-ahead-log/settings.md#wal_sync_method) | | +| [wal_writer_delay](write-ahead-log/settings.md#wal_writer_delay) | | +| [walsender_max_send_size](ha-replication/primary-server.md#walsender_max_send_size) | | +| [walwriter_cpu_bind](write-ahead-log/settings.md#walwriter_cpu_bind) | | +| [walwriter_sleep_threshold](write-ahead-log/settings.md#walwriter_sleep_threshold) | | +| [wdr_snapshot_interval](system-performance-snapshot.md#wdr_snapshot_interval) | | +| [wdr_snapshot_query_timeout](system-performance-snapshot.md#wdr_snapshot_query_timeout) | | +| [wdr_snapshot_retention_days](system-performance-snapshot.md#wdr_snapshot_retention_days) | | +| [work_mem](resource-consumption/memory.md#work_mem) | | +| [xc_maintenance_mode](MogDB-transaction.md#xc_maintenance_mode) | | +| [xlog_file_path](./write-ahead-log/settings.md#xlog_file_path) | | +| [xlog_file_size](./write-ahead-log/settings.md#xlog_file_size) | | +| [xlog_lock_file_path](./write-ahead-log/settings.md#xlog_lock_file_path) | | +| [xloginsert_locks](lock-management.md#xloginsert_locks) | | +| [xmlbinary](default-settings-of-client-connection/statement-behavior.md#xmlbinary) | | +| [xmloption](default-settings-of-client-connection/statement-behavior.md#xmloption) | | +| [zero_damaged_pages](developer-options.md#zero_damaged_pages) | | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/guc-parameter-usage.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/guc-parameter-usage.md index 2e3004b5..d2e8a791 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/guc-parameter-usage.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/guc-parameter-usage.md @@ -1,18 +1,18 @@ ---- -title: GUC Parameter Usage -summary: GUC Parameter Usage -author: Zhang Cuiping -date: 2021-04-20 ---- - -# GUC Parameter Usage - -A database provides many operation parameters. Configurations of these parameters affect the behavior of the database system. Before modifying these parameters, learn the impact of these parameters on the database. Otherwise, unexpected results may occur. - -**Precautions** - -- If the value range of a parameter is a string, the string should comply with the naming conventions of the path and file name in the OS running the target database. - -- If the maximum value of a parameter is *INT_MAX*, the maximum parameter value varies by OS. - -- If the maximum value of a parameter is *DBL_MAX*, the maximum parameter value varies by OS. +--- +title: GUC Parameter Usage +summary: GUC Parameter Usage +author: Zhang Cuiping +date: 2021-04-20 +--- + +# GUC Parameter Usage + +A database provides many operation parameters. Configurations of these parameters affect the behavior of the database system. Before modifying these parameters, learn the impact of these parameters on the database. Otherwise, unexpected results may occur. + +**Precautions** + +- If the value range of a parameter is a string, the string should comply with the naming conventions of the path and file name in the OS running the target database. + +- If the maximum value of a parameter is *INT_MAX*, the maximum parameter value varies by OS. + +- If the maximum value of a parameter is *DBL_MAX*, the maximum parameter value varies by OS. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/primary-server.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/primary-server.md index 74f01924..2095c266 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/primary-server.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/primary-server.md @@ -239,7 +239,7 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided **Parameter description:** If this parameter is enabled, the primary node flushes the location that has achieved majority consistency with the current synchronous standby node during each transaction to a disk. When the primary node is faulty and the original primary node functions as the standby node to initiate a build request, the system checks whether the same confirmed LSN exists on the source node (new primary node). If it does not exist, the build fails to prevent the data of the original primary node from being overwritten by the build. -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md).. +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). **Value range:** Boolean @@ -260,6 +260,49 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided > >- After this function is enabled, the synchronization waiting time is prolonged due to persistent data. As a result, the performance of the primary and standby clusters with synchronous standby nodes is affected. According to the test data, the performance decreases by about 20% compared with that when this function is disabled. +## hadr_recovery_time_target + +**Parameter description:** Setting hadr_recovery_time_target in streaming disaster recovery mode enables the standby database instance to complete log writes and playback. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** Integer, 0 to 3600 (seconds) + +0 means that log flow control is not enabled, and 1~3600 means that the standby can complete log writing and playback within hadr_recovery_time_target time, which can ensure that the log writing and playback can be completed in hadr_recovery_time_target seconds when the primary database instance is switched with the standby database instance and ensure that the standby database instance If the hadr_recovery_time_target is set too small, it will affect the performance of the host, and if it is set too large, it will lose the flow control effect. + +**Default value:** 0 + +## hadr_recovery_point_target + +**Parameter description:** Setting hadr_recovery_point_target in streaming disaster recovery mode enables the rpo time for the standby database instance to complete log swiping. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** Integer, 0 to 3600 (seconds) + +0 means that log flow control is not turned on, and 1~3600 means that the standby machine can complete the swiping of logs within hadr_recovery_point_target time, which can ensure that the log gap between the main database instance and the standby database instance when switching can be within hadr_recovery_point_target seconds, and guarantee that the standby database instance rises to the main log volume. If the hadr_recovery_point_target is set too small, it will affect the performance of the host, and if it is set too large, it will lose the flow control effect. + +**Default value:** 0 + +## hadr_super_user_record_path + +**Parameter description:** This parameter is a streaming offsite disaster recovery parameter that indicates the path to the encrypted file storage for the hadr_disaster user in the standby database instance. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Modification Suggestion**: This parameter is automatically set by the Streaming Disaster Tolerance Password Delivery Tool and does not need to be manually added by the user. + +**Value range:** String + +**Default value:** NULL + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: +> +> - In a database instance that contains a host, a spare, and a cascaded spare, the host is the sender with respect to the spare, the spare is the receiver with respect to the host, the spare is the sender with respect to the cascaded spare, and the cascaded spare is the receiver with respect to the spare. +> - The sender's initiative to synchronize the configuration file with the receiver and the receiver's request to synchronize the configuration file with the sender are two separate events, both of which synchronize the configuration file. If you do not want the configuration file to be synchronized, you need to configure the receiving end to be none_node, the sending end can only be configured to be none_node if it is a standby, the sending end can only be configured to be none_node if it is a host, the host will not be synchronized with all the standbys if it is configured to be none_node, and it will be synchronized with the synchronized standby only if it is only_sync_node, not with the asynchronous standby. +> - Configuration parameter synchronization is specifically manifested in that the sender sends a configuration file that directly overrides the corresponding parameters in the receiver's configuration file. If you set a policy that the configuration file needs to be synchronized, after modifying the configuration parameters on the receiving end, the sending end will immediately overwrite the configuration parameters on the receiving end, so that the modifications on the receiving end do not take effect. +> - Even if you set a policy that the configuration file needs to be synchronized, there are still some configuration parameters that will not be synchronized. Included: “application_name”, “archive_command”, “audit_directory”, “available_zone”, “comm_control_port”, “comm_sctp_port”, “listen_addresses”, “log_directory”, “port”, “replconninfo1”, “replconninfo2”, “replconninfo3”, “replconninfo4”, “replconninfo5”, “replconninfo6”, “replconninfo7”, “replconninfo8”, “replconninfo9”, “replconninfo10”, “replconninfo11”, “replconninfo12”, “replconninfo13”, “replconninfo14”, “replconninfo15”, “replconninfo16”, “replconninfo17”, “replconninfo18”, “ssl”, “ssl_ca_file”, “ssl_cert_file”, “ssl_ciphers”, “ssl_crl_file”, “ssl_key_file”, “ssl_renegotiation_limit”, “ssl_cert_notify_time”, “synchronous_standby_names”, “local_bind_address”, “perf_directory”, “query_log_directory”, “asp_log_directory”, “streaming_router_port”, “enable_upsert_to_merge”, “archive_dest”, “recovery_min_apply_delay”, “sync_config_strategy”. + ## pgxc_node_name **Parameter description**: Specifies the name of a node. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/sending-server.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/sending-server.md index 02fecc41..6d714c97 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/sending-server.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/sending-server.md @@ -52,6 +52,16 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T **Default value**: **6s** +## logical_sender_timeout + +**Parameter description**: Sets the maximum wait time for this end to wait for logs to be received by the logical log receiver. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Integer, 0 ~ INT_MAX, in milliseconds (ms). + +**Default value**: 30s + ## max_replication_slots **Parameter description**: Specifies the number of log replication slots in the primary server. @@ -85,6 +95,18 @@ This parameter is a USERSET parameter. Set it based on instructions provided in **Default value**: **off** +## logical_decode_options_default + +**Parameter description**: Specifies the global default value for unspecified decoding options when logical decoding is initiated. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +Currently support to specify logical decode options include: parallel-decode-num, parallel-queue-size, max-txn-in-memory, max-reorderbuffer-in-memory, exclude-users. For the meaning of the options, please refer to [Example Logic Replication Code](../../../developer-guide/dev/2-development-based-on-jdbc/14-example-logic-replication-code.md). + +**Value range**: A comma-separated key=value string, e.g., 'parallel-decode-num=4,parallel-queue-size=128,exclude-users=userA'. Where the empty string indicates that the default values hard-coded using the program are used. + +**Default value**: "" + ## max_changes_in_memory **Parameter description**: Specifies the maximum number of DML statements cached in memory for a single transaction during logical decoding. @@ -120,6 +142,46 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T **Default value**: **false** +## repl_auth_mode + +**Parameter description**: This parameter sets the verification mode for primary and standby nodes replication and standby rebuild. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: +> +> - If UUID authentication is enabled on the host and the repl_uuid authentication code is configured with a non-empty string, then the primary and standby nodes need to be enabled and configured with the same repl_uuid authentication code, otherwise primary and standby log replication and standby nodes rebuild requests will be rejected by the host. +> - This parameter supports SIGHUP to dynamically load new values. After the modification, it does not affect the primary and standby nodes that have already been established, and it takes effect for subsequent primary and standby nodes replication requests and primary and standby nodes rebuild requests. +> - Supports the verification of standby rebuild under the Quorum and DCF protocols; supports the verification of primary and standby nodes replication under the Quorum protocol; does not support the verification of primary and standby nodes replication under the DCF protocol. +> - The UUID verification function is mainly to prevent data crosstalk and pollution caused by misconnection between the master and the standby, and is not used for security purposes. +> - This parameter does not support automatic synchronization between master and backup. + +**Value range**: Enumeration type + +- off disables UUID authentication. +- default disables UUID authentication. +- uuid enables UUID authentication. + +**Default value**: default + +## repl_uuid + +**Parameter description**: This parameter sets the UUID code to be used for primary and standby nodes authentication. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +>![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: +> +>- If UUID authentication is enabled on the host and the repl_uuid authentication code is configured with a non-empty string, then the primary and standby nodes need to be enabled and configured with the same repl_uuid authentication code, otherwise primary and standby log replication and standby nodes rebuild requests will be rejected by the host. +>- This parameter supports SIGHUP to dynamically load new values. After the modification, it does not affect the primary and standby nodes that have already been established, and it takes effect for subsequent primary and standby nodes replication requests and primary and standby nodes rebuild requests. +>- Supports the verification of standby rebuild under the Quorum and DCF protocols; supports the verification of primary and standby nodes replication under the Quorum protocol; does not support the verification of primary and standby nodes replication under the DCF protocol. +>- The UUID verification function is mainly to prevent data crosstalk and pollution caused by misconnection between the master and the standby, and is not used for security purposes. +>- This parameter does not support automatic synchronization between master and backup. + +**Value range**: String type. Length 0 - 63 characters, combination of letters and numbers, case insensitive, internally converted to lowercase uniformly stored. An empty string indicates that UUID authentication is not enabled. + +**Default value**: Empty string + ## replconninfoN **Parameter description**: Specifies the information about the *N* node to be listened to and authenticated by the current server (*N* ranges from 1 to 18). @@ -152,6 +214,30 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided **Default value**: empty +## enable_availablezone + +**Parameter description**: Sets whether cascaded standby nodes on this end can connect to standbys across available_zone. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on indicates that cascaded standby nodes can only connect to standbys in the same available_zone. + +- off indicates that the cascaded standby node can connect to standbys in different available_zones. + +**Default value**: off + +## max_keep_log_seg + +**Parameter description**: Flow control parameter, logical replication in the database node local will parse the physical logs into logical logs, when the number of physical log files not parsed is greater than this parameter will trigger the flow limit. If the number of physical log files that are not parsed is greater than this parameter, flow limiting will be triggered. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Integer, 0 ~ 2147483647. + +**Default value**: 0 + ## pgxc_node_name **Parameter description**: Specifies the name of a node. @@ -165,3 +251,17 @@ When a standby node requests to replicate logs on the primary node, if the **app **Value range**: a string **Default value**: current node name + +## enable_ddl_logical_record + +**Parameter description**: Controls whether or not to enable DDL support for database logic decoding. This parameter will control whether DDL-related wal logs are written in the new write wal log function. (Introduced in MogDB 5.0.8) + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on: when there is a DDL command, successful execution of the DDL command writes a wal log of type xl_logical_ddl_message to the wal log for logical decoding. + +- off: logical decoding does not support DDL operations. When set to off, the output of plugins such as wal2json will be empty change no matter how it is configured. + +**Default value**: off diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/standby-server.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/standby-server.md index 207963b4..cbeb8792 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/standby-server.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/standby-server.md @@ -168,4 +168,23 @@ When a standby node requests to replicate logs on the primary node, if the **app **Value range**: a string -**Default value**: current node name \ No newline at end of file +**Default value**: current node name + +## protect_standby + +**Parameter description**: This parameter is used to control whether the standby node role is protected. + +This parameter is a SIGHUP parameter. Set it based on Table 1 [GUC parameters](../appendix.md). + +**Value range**: Boolean + +- on means that the standby node cannot be upgraded or downgraded to a cascaded standby by using the failover or switchover command. +- off means that the standby node role is unprotected and can be upgraded or downgraded to a cascaded standby. + +**Default value:** off + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)Note: +> +> - If you need to configure the standby node to refuse to raise the master, you can set the protect_standby parameter to on; if you need to raise the master of this database, you should modify the protect_standby parameter to off first, and then raise the master of this database after executing reload. +> - This parameter does not take effect for the primary node or cascade backup. This parameter does not take effect on the primary node or cascade backup. It does not change with the value of the primary node parameter. +> - By default, dcf is not supported. If dcf is encountered, the check of protect_standby will be skipped. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/load-management.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/load-management.md index 7ebed757..a8ad8eee 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/load-management.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/load-management.md @@ -244,6 +244,16 @@ This parameter is a USERSET parameter. Set it based on instructions provided in **Default value**: empty +## memory_fault_percent + +**Parameter description**: Percentage of memory request failures during memory failure tests, used only in the DEBUG version. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range**: Integer, 0~2147483647 + +**Default value**: 0 + ## enable_bbox_dump **Parameter description**: Specifies whether the black box function is enabled. The core files can be generated even through the core dump mechanism is not configured in the system. @@ -412,3 +422,13 @@ This parameter is a USERSET parameter. Set it based on instructions provided in - Value greater than **0**: If transaction block statements and stored procedure statements have been queued for a time longer than the specified value, they are forcibly executed regardless of the current resource situation. **Default value**: **0** + +## current_logic_cluster + +**Parameter description**: Displays the current logical database instance name. + +This parameter is a INTERNAL parameter, is a fixed parameter, the user can not modify this parameter, can only view. + +**Value range**: string + +**Default value**: null diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/lock-management.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/lock-management.md index a63cffb1..58c292b3 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/lock-management.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/lock-management.md @@ -1,152 +1,152 @@ ---- -title: Lock Management -summary: Lock Management -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Lock Management - -In MogDB, a deadlock may occur when concurrently executed transactions compete for resources. This section describes parameters used for managing transaction locks. - -## deadlock_timeout - -**Parameter description**: Specifies the time, in milliseconds, to wait on a lock before checking whether there is a deadlock condition. When the applied lock exceeds the preset value, the system will check whether a deadlock occurs. - -- The check for deadlock is relatively expensive. Therefore, the server does not check it when waiting for a lock every time. Deadlocks do not frequently occur when the system is running. Therefore, the system just needs to wait on the lock for a while before checking for a deadlock. Increasing this value reduces the time wasted in needless deadlock checks, but slows down reporting of real deadlock errors. On a heavily loaded server, you may need to raise it. The value you have set needs to exceed the transaction time. By doing this, the possibility that a lock will be checked for deadlocks before it is released will be reduced. -- When **log_lock_waits** is set to **on**, **deadlock_timeout** determines a waiting time to write the lock waiting time information during query execution to logs. To study the lock delay, you can set **deadlock_timeout** to a value smaller than the normal value. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: an integer ranging from 1 to 2147483647. The unit is ms. - -**Default value**: **1s** - -## lockwait_timeout - -**Parameter description**: Specifies the timeout for attempts to acquire a lock. If the time spent in waiting for a lock exceeds the specified time, an error is reported. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to *INT_MAX*. The unit is ms. - -**Default value**: **20min** - -## update_lockwait_timeout - -**Parameter description**: Specifies the maximum duration that a lock waits for concurrent updates on a row to complete when the concurrent update feature is enabled. If the time spent in waiting for a lock exceeds the specified time, an error is reported. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to *INT_MAX*. The unit is ms. - -**Default value**: **2min** - -## max_locks_per_transaction - -**Parameter description**: Controls the average number of object locks allocated for each transaction. - -- The size of the shared lock table is calculated under the condition that a maximum of *N* independent objects need to be locked at any time. *N* = **max_locks_per_transaction** x (**max_connections** + **max_prepared_transactions**). Objects whose amount does not exceed the preset number can be locked simultaneously at any time. You may need to increase this value if many different tables are modified in a single transaction. This parameter can only be set at database start. -- Increasing the value of this parameter may cause MogDB to request more System V-shared memory than the OS's default configuration allows. -- When running a standby server, you must set this parameter to a value that is no less than that on the primary server. Otherwise, queries will not be allowed on the standby server. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range:** an integer ranging from 10 to *INT_MAX* - -**Default value**: **256** - -## max_pred_locks_per_transaction - -**Parameter description**: Controls the average number of predicate locks allocated for each transaction. - -- The size of the shared predicate lock table is calculated under the condition that a maximum of *N* independent objects need to be locked at any time. *N* = **max_pred_locks_per_transaction** x (**max_connections** + **max_prepared_transactions**). Objects whose amount does not exceed the preset number can be locked simultaneously at any time. You may need to increase this value if many different tables are modified in a single transaction. This parameter can only be set at server start. -- Increasing the value of this parameter may cause MogDB to request more System V-shared memory than the OS's default configuration allows. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range:** an integer ranging from 10 to *INT_MAX* - -**Default value**: **64** - -## gs_clean_timeout - -**Parameter description**: Specifies the average interval for clearing temporary tables on the primary node. - -- When the database connection is terminated abnormally, temporary tables may exist. In this case, you need to call the **gs_clean** tool to clear the temporary tables in the database. -- If this parameter is set to a larger value, the time for clearing MogDB temporary tables may be prolonged. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to 2147483. The unit is s. - -**Default value**: **1min** - -## partition_lock_upgrade_timeout - -**Parameter description**: Specifies the timeout for attempts to upgrade an exclusive lock (read allowed) to an access exclusive lock (read/write blocked) on a partitioned table during the execution of some query statements. If there are concurrent read transactions running, the lock upgrade will need to wait. This parameter sets the waiting timeout for lock upgrade attempts. - -- When you do MERGE PARTITION and CLUSTER PARTITION on a partitioned table, temporary tables are used for data rearrangement and file exchange. To concurrently perform as many operations as possible on the partitions, exclusive locks are acquired for the partitions during data rearrangement and access exclusive locks are acquired during file exchange. - -- Generally, a partition waits until it acquires a lock, or a timeout occurs if the partition waits for a period longer than the value specified by the **[lockwait_timeout](#lockwait_timeout)** parameter. - -- When doing MERGE PARTITION or CLUSTER PARTITION on a partitioned table, an access exclusive lock needs to be acquired during file exchange. If the lock fails to be acquired, the acquisition is retried at an interval of 50 ms until timeout occurs. The **partition_lock_upgrade_timeout** parameter specifies the time to wait before the lock acquisition attempt times out. - -- If this parameter is set to **-1**, the lock upgrade never times out. The lock upgrade is continuously retried until it succeeds. - - This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: an integer ranging from -1 to 3000. The unit is s. - -**Default value**: **1800** - -## fault_mon_timeout - -**Parameter description**: Specifies the period for detecting lightweight deadlocks. This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: an integer ranging from 0 to 1440. The unit is minute. - -**Default value**: **5min** - -## enable_online_ddl_waitlock - -**Parameter description**: Specifies whether to block DDL operations to wait for the release of MogDB locks, such as **pg_advisory_lock** and **pgxc_lock_for_backup**. This parameter is mainly used in online OM operations and you are not advised to modify the settings. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: Boolean - -- **on** indicates that DDL operations will be blocked to wait for the lock release. -- **off** indicates that DDL operations will not be blocked. - -**Default value**: **off** - -## xloginsert_locks - -**Parameter description**: Specifies the number of locks on concurrent write-ahead logging. This parameter is used to improve the efficiency of writing write-ahead logs. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range:** an integer ranging from 1 to 1000 - -**Default value**: **8** - -## num_internal_lock_partitions - -**Parameter description**: Specifies the number of internal lightweight lock partitions. It is mainly used for performance optimization in various scenarios. The content is organized in the KV format of keywords and numbers. Different types of locks are separated by commas (,). The sequence does not affect the setting result. For example, **CLOG_PART=256,CSNLOG_PART=512** is equivalent to **CSNLOG_PART=512,CLOG_PART=256**. If you set the same keyword multiple times, only the latest setting takes effect. For example, if you set **CLOG_PART** to **256**and**CLOG_PART** to **2**, the value of **CLOG_PART** is **2**. If no keyword is set, the default value is used. The usage description, maximum value, minimum value, and default value of each lock type are as follows: - -- **CLOG_PART**: number of Clog file controllers. Increasing the value of this parameter improves the Clog writing efficiency and transaction submission performance, but increases the memory usage. Decreasing the value of this parameter reduces the memory usage, but may increase the conflict of writing Clogs and affect the performance. The value ranges from 1 to 256. -- **CSNLOG_PART**: number of CSNLOG file controllers. Increasing the value of this parameter improves the CSNLOG log writing efficiency and transaction submission performance, but increases the memory usage. Decreasing the value of this parameter reduces the memory usage, but may increase the conflict of writing CSNLOG logs and affect the performance. The value ranges from 1 to 512. -- **LOG2_LOCKTABLE_PART**: two logarithms of the number of common table lock partitions. Increasing the value can improve the concurrency of obtaining locks in the normal process, but may increase the time required for transferring and clearing locks. When waiting events occur in **LockMgrLock**, you can increase the value to improve the performance. The minimum value is 4, that is, the number of lock partitions is 16. The maximum value is 16, that is, the number of lock partitions is 65536. -- **TWOPHASE_PART**: number of partitions of the two-phase transaction lock. Increasing the value can increase the number of concurrent two-phase transaction commits. The value ranges from 1 to 64. - -This parameter is a **POSTMASTER** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). - -**Value range**: a string - -**Default value**: - -- **CLOG_PART**: **256** -- **CSNLOG_PART**: **512** -- **LOG2_LOCKTABLE_PART**: **4** -- **TWOPHASE_PART**: **1** +--- +title: Lock Management +summary: Lock Management +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Lock Management + +In MogDB, a deadlock may occur when concurrently executed transactions compete for resources. This section describes parameters used for managing transaction locks. + +## deadlock_timeout + +**Parameter description**: Specifies the time, in milliseconds, to wait on a lock before checking whether there is a deadlock condition. When the applied lock exceeds the preset value, the system will check whether a deadlock occurs. + +- The check for deadlock is relatively expensive. Therefore, the server does not check it when waiting for a lock every time. Deadlocks do not frequently occur when the system is running. Therefore, the system just needs to wait on the lock for a while before checking for a deadlock. Increasing this value reduces the time wasted in needless deadlock checks, but slows down reporting of real deadlock errors. On a heavily loaded server, you may need to raise it. The value you have set needs to exceed the transaction time. By doing this, the possibility that a lock will be checked for deadlocks before it is released will be reduced. +- When **log_lock_waits** is set to **on**, **deadlock_timeout** determines a waiting time to write the lock waiting time information during query execution to logs. To study the lock delay, you can set **deadlock_timeout** to a value smaller than the normal value. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range**: an integer ranging from 1 to 2147483647. The unit is ms. + +**Default value**: **1s** + +## lockwait_timeout + +**Parameter description**: Specifies the timeout for attempts to acquire a lock. If the time spent in waiting for a lock exceeds the specified time, an error is reported. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range**: an integer ranging from 0 to *INT_MAX*. The unit is ms. + +**Default value**: **20min** + +## update_lockwait_timeout + +**Parameter description**: Specifies the maximum duration that a lock waits for concurrent updates on a row to complete when the concurrent update feature is enabled. If the time spent in waiting for a lock exceeds the specified time, an error is reported. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range**: an integer ranging from 0 to *INT_MAX*. The unit is ms. + +**Default value**: **2min** + +## max_locks_per_transaction + +**Parameter description**: Controls the average number of object locks allocated for each transaction. + +- The size of the shared lock table is calculated under the condition that a maximum of *N* independent objects need to be locked at any time. *N* = **max_locks_per_transaction** x (**max_connections** + **max_prepared_transactions**). Objects whose amount does not exceed the preset number can be locked simultaneously at any time. You may need to increase this value if many different tables are modified in a single transaction. This parameter can only be set at database start. +- Increasing the value of this parameter may cause MogDB to request more System V-shared memory than the OS's default configuration allows. +- When running a standby server, you must set this parameter to a value that is no less than that on the primary server. Otherwise, queries will not be allowed on the standby server. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** an integer ranging from 10 to *INT_MAX* + +**Default value**: **256** + +## max_pred_locks_per_transaction + +**Parameter description**: Controls the average number of predicate locks allocated for each transaction. + +- The size of the shared predicate lock table is calculated under the condition that a maximum of *N* independent objects need to be locked at any time. *N* = **max_pred_locks_per_transaction** x (**max_connections** + **max_prepared_transactions**). Objects whose amount does not exceed the preset number can be locked simultaneously at any time. You may need to increase this value if many different tables are modified in a single transaction. This parameter can only be set at server start. +- Increasing the value of this parameter may cause MogDB to request more System V-shared memory than the OS's default configuration allows. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** an integer ranging from 10 to *INT_MAX* + +**Default value**: **64** + +## gs_clean_timeout + +**Parameter description**: Specifies the average interval for clearing temporary tables on the primary node. + +- When the database connection is terminated abnormally, temporary tables may exist. In this case, you need to call the **gs_clean** tool to clear the temporary tables in the database. +- If this parameter is set to a larger value, the time for clearing MogDB temporary tables may be prolonged. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range**: an integer ranging from 0 to 2147483. The unit is s. + +**Default value**: **1min** + +## partition_lock_upgrade_timeout + +**Parameter description**: Specifies the timeout for attempts to upgrade an exclusive lock (read allowed) to an access exclusive lock (read/write blocked) on a partitioned table during the execution of some query statements. If there are concurrent read transactions running, the lock upgrade will need to wait. This parameter sets the waiting timeout for lock upgrade attempts. + +- When you do MERGE PARTITION and CLUSTER PARTITION on a partitioned table, temporary tables are used for data rearrangement and file exchange. To concurrently perform as many operations as possible on the partitions, exclusive locks are acquired for the partitions during data rearrangement and access exclusive locks are acquired during file exchange. + +- Generally, a partition waits until it acquires a lock, or a timeout occurs if the partition waits for a period longer than the value specified by the **[lockwait_timeout](#lockwait_timeout)** parameter. + +- When doing MERGE PARTITION or CLUSTER PARTITION on a partitioned table, an access exclusive lock needs to be acquired during file exchange. If the lock fails to be acquired, the acquisition is retried at an interval of 50 ms until timeout occurs. The **partition_lock_upgrade_timeout** parameter specifies the time to wait before the lock acquisition attempt times out. + +- If this parameter is set to **-1**, the lock upgrade never times out. The lock upgrade is continuously retried until it succeeds. + + This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range**: an integer ranging from -1 to 3000. The unit is s. + +**Default value**: **1800** + +## fault_mon_timeout + +**Parameter description**: Specifies the period for detecting lightweight deadlocks. This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range**: an integer ranging from 0 to 1440. The unit is minute. + +**Default value**: **5min** + +## enable_online_ddl_waitlock + +**Parameter description**: Specifies whether to block DDL operations to wait for the release of MogDB locks, such as **pg_advisory_lock** and **pgxc_lock_for_backup**. This parameter is mainly used in online OM operations and you are not advised to modify the settings. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range**: Boolean + +- **on** indicates that DDL operations will be blocked to wait for the lock release. +- **off** indicates that DDL operations will not be blocked. + +**Default value**: **off** + +## xloginsert_locks + +**Parameter description**: Specifies the number of locks on concurrent write-ahead logging. This parameter is used to improve the efficiency of writing write-ahead logs. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range:** an integer ranging from 1 to 1000 + +**Default value**: **8** + +## num_internal_lock_partitions + +**Parameter description**: Specifies the number of internal lightweight lock partitions. It is mainly used for performance optimization in various scenarios. The content is organized in the KV format of keywords and numbers. Different types of locks are separated by commas (,). The sequence does not affect the setting result. For example, **CLOG_PART=256,CSNLOG_PART=512** is equivalent to **CSNLOG_PART=512,CLOG_PART=256**. If you set the same keyword multiple times, only the latest setting takes effect. For example, if you set **CLOG_PART** to **256**and**CLOG_PART** to **2**, the value of **CLOG_PART** is **2**. If no keyword is set, the default value is used. The usage description, maximum value, minimum value, and default value of each lock type are as follows: + +- **CLOG_PART**: number of Clog file controllers. Increasing the value of this parameter improves the Clog writing efficiency and transaction submission performance, but increases the memory usage. Decreasing the value of this parameter reduces the memory usage, but may increase the conflict of writing Clogs and affect the performance. The value ranges from 1 to 256. +- **CSNLOG_PART**: number of CSNLOG file controllers. Increasing the value of this parameter improves the CSNLOG log writing efficiency and transaction submission performance, but increases the memory usage. Decreasing the value of this parameter reduces the memory usage, but may increase the conflict of writing CSNLOG logs and affect the performance. The value ranges from 1 to 512. +- **LOG2_LOCKTABLE_PART**: two logarithms of the number of common table lock partitions. Increasing the value can improve the concurrency of obtaining locks in the normal process, but may increase the time required for transferring and clearing locks. When waiting events occur in **LockMgrLock**, you can increase the value to improve the performance. The minimum value is 4, that is, the number of lock partitions is 16. The maximum value is 16, that is, the number of lock partitions is 65536. +- **TWOPHASE_PART**: number of partitions of the two-phase transaction lock. Increasing the value can increase the number of concurrent two-phase transaction commits. The value ranges from 1 to 64. + +This parameter is a **POSTMASTER** parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](appendix.md). + +**Value range**: a string + +**Default value**: + +- **CLOG_PART**: **256** +- **CSNLOG_PART**: **512** +- **LOG2_LOCKTABLE_PART**: **4** +- **TWOPHASE_PART**: **1** diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/miscellaneous-parameters.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/miscellaneous-parameters.md index cc8177de..1453ba4f 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/miscellaneous-parameters.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/miscellaneous-parameters.md @@ -27,6 +27,16 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided **Default value**: **off** +## enable_segment + +**Parameter description**: Specifies whether to use segmented page storage by default. When this parameter is on, segmented page storage is used by default. Note that it is automatically set to on during installation under resource pooling to improve ease of use. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range:** [off,on] + +**Default value:** off + ## reserve_space_for_nullable_atts **Parameter description**: Specifies whether to reserve space for the nullable attribute of an Ustore table. If this parameter is set to **on**, space is reserved for the nullable attribute of the Ustore table by default. @@ -215,17 +225,20 @@ This parameter is an POSTMASTER parameter. Set it based on instructions provided **Default value:** **10** -## group_concat_max_len +## enable_seqscan_fusion -**Parameter description**: In B-compatible mode, this parameter limits the length of the return value of GROUP_CONCAT. If the length exceeds the limit, the return value is truncated. +**Parameter description**: Controls whether SeqScan optimization is enabled. -Note: Currently, the maximum length that can be returned is 1073741823. If the length exceeds 1073741823, the out of memory error is reported. +This parameter is an SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](./appendix.md). -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](./appendix.md). +**Value range:** Boolean -**Value range**: 0 to 9223372036854775807 +- on means enable SeqScan optimization +- off means disable SeqScan optimization -**Default value**: **1024** +**Default value:** off + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Description**: This parameter can only optimize the execution time of the seqscan operator for the EXPLAIN ANALYZE statement. ## sql_ignore_strategy @@ -266,4 +279,91 @@ This parameter is a USERSET parameter. Set it based on instructions provided in - on: indicates that errors are reported for illegal characters - off: indicates no error reporting -**Default value**: off \ No newline at end of file +**Default value**: off + +## acceleration_with_compute_pool + +**Parameter description**: When a query contains OBS, this parameter is used to determine whether the query is accelerated through the compute resource pool. (Due to specification changes, this feature is no longer supported in the current version, so please do not use it) + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range:** Boolean + +- on: indicates that queries containing OBS or are accelerated through the compute resource pool when the compute resource pool is available, and the decision to accelerate the query through the compute resource pool is based on a cost evaluation. +- off: indicates that any query will not be accelerated through the compute resource pool. + +**Default value:** off + +## dfs_partition_directory_length + +**Parameter description**: The upper limit on the length of a directory name when constructing a partitioned directory for an HDFS VALUE partitioned table on an HDFS file system. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range:** 92-7999 + +**Default value:** 512 + +## max_resource_package + +**Parameter description**: Accelerates the upper limit on the number of threads per database node that can run tasks simultaneously. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range:** 0~2147483647 + +**Default value:** 0 + +## cluster_run_mode + +**Parameter description**: Sets the type of shared storage cluster. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range:** enumeration type + +- cluster_primary + +- cluster_standby + +**Default value:** cluster_primary + +## enable_event_trigger_a_mode + +**Parameter description**: Controls whether event triggers are enabled in A-compatible mode. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range:** Boolean + +**Default value:** off + +## enable_tidrangescan + +**Parameter description**: Controls whether the planner uses the tidrangescan plan. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range:** Boolean + +**Default value:** on + +## force_tidrangescan + +**Parameter description**: Controls whether the execution plan forces tidrangescan to be used in parallel for the gs_dump tool. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range:** Boolean + +**Default value:** off + +## plan_cache_type_validation + +**Parameter description**: Disable pbe result type checking to allow users to change plan cache results in real time. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range:** Boolean + +**Default value:** on \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/multi-level-cache-management-parameters.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/multi-level-cache-management-parameters.md new file mode 100644 index 00000000..c06d38b5 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/multi-level-cache-management-parameters.md @@ -0,0 +1,69 @@ +--- +title: Multi-Level Cache Management Parameters +summary: Multi-Level Cache Management Parameters +author: Guo Huan +date: 2024-04-11 +--- + +# Multi-Level Cache Management Parameters + +## enable_nvm + +**Parameter Description**: Whether to enable the multi-level cache management feature. Once enabled, it cannot be modified. + +This parameter is of the POSTMASTER type. Please refer to the corresponding setting method in the [GUC parameter configuration](appendix.md). + +**Value Range**: Boolean, on/off. on indicates that the multi-level cache management feature is enabled in the current installation, off indicates that it is not enabled. + +**Default Value**: off + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +> - When enabled, the original cache pool is divided into a Dram Buffer Pool and an Nvm Buffer Pool. The migration of pages between cache levels is controlled by access frequency, allowing hot data to reside in memory, warm data in NVM, and cold data on disk. +> - The parameters `nvm_buffers`, `nvm_file_path`, `bypass_nvm`, and `bypass_dram` will only take effect when `enable_nvm` is set to on. + +## nvm_buffers + +**Parameter Description**: The size of the Nvm Buffer Pool. + +This parameter is of the POSTMASTER type. Please refer to the corresponding setting method in the [GUC parameter configuration](appendix.md). + +**Value Range**: Integer, 0~2147483647 + +**Default Value**: 0 + +## nvm_file_path + +**Parameter Description**: The path for the NVM file. + +This parameter is of the POSTMASTER type. Please refer to the corresponding setting method in the [GUC parameter configuration](appendix.md). + +**Value Range**: String, NVM file path + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +> The NVM medium (e.g., SCM) is exposed to applications in App Direct mode through a file system interface. MogDB achieves byte-addressing of the NVM medium by mmap-ing the NVM file, effectively using it as a cache for MogDB. + +## bypass_nvm + +**Parameter Description**: The probability of bypassing the NVM cache pool and directly loading pages into the DRAM cache pool when there is a cache miss. + +This parameter is of the POSTMASTER type. Please refer to the corresponding setting method in the [GUC parameter configuration](appendix.md). + +**Value Range**: Float, 0.0~1.0 + +**Default Value**: 0.5 + +## bypass_dram + +**Parameter Description**: The probability of migrating pages from the NVM cache pool to the DRAM cache pool when there is a cache hit. + +This parameter is of the POSTMASTER type. Please refer to the corresponding setting method in the [GUC parameter configuration](appendix.md). + +**Value Range**: Float, 0.0~1.0 + +**Default Value**: 0.01 + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +> - For example, with the default value of 0.01, when a page hits in the NVM cache pool, there is a 1% probability that it will be migrated to the DRAM cache pool. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md index e310c717..1ffb6930 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md @@ -1,106 +1,106 @@ ---- -title: Genetic Query Optimizer -summary: Genetic Query Optimizer -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Genetic Query Optimizer - -This section describes parameters related to genetic query optimizer. The genetic query optimizer (GEQO) is an algorithm that plans queries by using heuristic searching. This algorithm reduces planning time for complex queries and the costs of producing plans are sometimes inferior to those found by the normal exhaustive-search algorithm. - -## geqo - -**Parameter description**: Specifies whether to enable the genetic query optimization. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> It is best not to turn it off in execution. **geqo_threshold** provides more subtle control of GEQO. - -**Value range**: Boolean - -- **on** indicates that the genetic query optimization is enabled. -- **off** indicates that the genetic query optimization is disabled. - -**Default value**: **on** - -## geqo_threshold - -**Parameter description**: Specifies the number of **FROM** items. Genetic query optimization is used to plan queries when the number of statements executed is greater than this value. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - For simpler queries it is best to use the regular, exhaustive-search planner, but for queries with many tables it is better to use GEQO to manage the queries. -> - A **FULL OUTER JOIN** construct counts as only one **FROM** item. - -**Value range**: an integer ranging from 2 to *INT_MAX* - -**Default value**: **12** - -## geqo_effort - -**Parameter description**: Controls the trade-off between planning time and query plan quality in GEQO. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> **geqo_effort** does not do anything directly. This parameter is only used to compute the default values for the other variables that influence GEQO behavior. If you prefer, you can manually set the other parameters instead. - -**Value range**: an integer ranging from 1 to 10 - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> Larger values increase the time spent in query planning, but also increase the probability that an efficient query plan is chosen. - -**Default value**: **5** - -## geqo_pool_size - -**Parameter description**: Controls the pool size used by GEQO, that is, the number of individuals in the genetic population. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 0 to *INT_MAX* - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> The value of this parameter must be at least **2**, and useful values are typically from **100** to **1000**. If this parameter is set to **0**, MogDB selects a proper value based on **geqo_effort** and the number of tables. - -**Default value**: **0** - -## geqo_generations - -**Parameter description**: Specifies the number of iterations of the GEQO. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 0 to *INT_MAX* - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> The value of this parameter must be at least **1**, and useful values are typically from **100** to **1000**. If it is set to **0**, a suitable value is chosen based on **geqo_pool_size**. - -**Default value**: **0** - -## geqo_selection_bias - -**Parameter description**: Specifies the selection bias used by GEQO. The selection bias is the selective pressure within the population. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** a floating point number ranging from 1.5 to 2.0 - -**Default value**: **2** - -## geqo_seed - -**Parameter description**: Specifies the initial value of the random number generator used by GEQO to select random paths through the join order search space. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** a floating point number ranging from 0.0 to 1.0 - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> Varying the value changes the set of join paths explored, and may result in a better or worse best path being found. - -**Default value**: **0** +--- +title: Genetic Query Optimizer +summary: Genetic Query Optimizer +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Genetic Query Optimizer + +This section describes parameters related to genetic query optimizer. The genetic query optimizer (GEQO) is an algorithm that plans queries by using heuristic searching. This algorithm reduces planning time for complex queries and the costs of producing plans are sometimes inferior to those found by the normal exhaustive-search algorithm. + +## geqo + +**Parameter description**: Specifies whether to enable the genetic query optimization. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> It is best not to turn it off in execution. **geqo_threshold** provides more subtle control of GEQO. + +**Value range**: Boolean + +- **on** indicates that the genetic query optimization is enabled. +- **off** indicates that the genetic query optimization is disabled. + +**Default value**: **on** + +## geqo_threshold + +**Parameter description**: Specifies the number of **FROM** items. Genetic query optimization is used to plan queries when the number of statements executed is greater than this value. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> +> - For simpler queries it is best to use the regular, exhaustive-search planner, but for queries with many tables it is better to use GEQO to manage the queries. +> - A **FULL OUTER JOIN** construct counts as only one **FROM** item. + +**Value range**: an integer ranging from 2 to *INT_MAX* + +**Default value**: **12** + +## geqo_effort + +**Parameter description**: Controls the trade-off between planning time and query plan quality in GEQO. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> **geqo_effort** does not do anything directly. This parameter is only used to compute the default values for the other variables that influence GEQO behavior. If you prefer, you can manually set the other parameters instead. + +**Value range**: an integer ranging from 1 to 10 + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> Larger values increase the time spent in query planning, but also increase the probability that an efficient query plan is chosen. + +**Default value**: **5** + +## geqo_pool_size + +**Parameter description**: Controls the pool size used by GEQO, that is, the number of individuals in the genetic population. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 0 to *INT_MAX* + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> The value of this parameter must be at least **2**, and useful values are typically from **100** to **1000**. If this parameter is set to **0**, MogDB selects a proper value based on **geqo_effort** and the number of tables. + +**Default value**: **0** + +## geqo_generations + +**Parameter description**: Specifies the number of iterations of the GEQO. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 0 to *INT_MAX* + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> The value of this parameter must be at least **1**, and useful values are typically from **100** to **1000**. If it is set to **0**, a suitable value is chosen based on **geqo_pool_size**. + +**Default value**: **0** + +## geqo_selection_bias + +**Parameter description**: Specifies the selection bias used by GEQO. The selection bias is the selective pressure within the population. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** a floating point number ranging from 1.5 to 2.0 + +**Default value**: **2** + +## geqo_seed + +**Parameter description**: Specifies the initial value of the random number generator used by GEQO to select random paths through the join order search space. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range:** a floating point number ranging from 0.0 to 1.0 + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> Varying the value changes the set of join paths explored, and may result in a better or worse best path being found. + +**Default value**: **0** diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md index 62b39b8e..00e058cd 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md @@ -92,3 +92,63 @@ This parameter is a USERSET parameter. Set it based on instructions provided in **Value range**: a floating point number ranging from 0 to *DBL_MAX* **Default value**: **0** + +## smp_thread_cost + +**Parameter description**: Indicates the cost of synchronizing the communication between the operator and a single thread and the rest of the threads when the operator is executed in parallel. Turning this value down can induce the optimizer to prefer parallelism. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Integer, 0~1000 + +**Default value**: 1000 + +## hash_agg_total_cost_ratio + +**Parameter description**: Sets the optimizer's estimate of the total overhead of processing hash aggregation operators. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Numeric, 0 ~ max_int + +**Default value**: 1 + +## hash_join_total_cost_ratio + +**Parameter description**: Sets the optimizer's estimate of the total overhead of processing hash join operators. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Numeric, 0 ~ max_int + +**Default value**: 1 + +## merge_join_total_cost_ratio + +**Parameter description**: Sets the optimizer's estimate of the total overhead of processing the merge join operator. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Numeric, 0 ~ max_int + +**Default value**: 1 + +## nestloop_total_cost_ratio + +**Parameter description**: Sets the optimizer's estimate of the total overhead of processing nested loop operators. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Numeric, 0 ~ max_int + +**Default value**: 1 + +## sort_agg_total_cost_ratio + +**Parameter description**: Sets the optimizer's estimate of the total overhead of processing the sorted aggregation operator. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Numeric, 0 ~ max_int + +**Default value**: 1 diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md index 1f9d57f7..947a3f45 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md @@ -1,341 +1,407 @@ ---- -title: Optimizer Method Configuration -summary: Optimizer Method Configuration -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Optimizer Method Configuration - -These configuration parameters provide a crude method of influencing the query plans chosen by the query optimizer. If the default plan chosen by the optimizer for a particular query is not optimal, a temporary solution is to use one of these configuration parameters to force the optimizer to choose a different plan. Better ways include adjusting the optimizer cost constants, manually running **ANALYZE**, increasing the value of the **default_statistics_target** configuration parameter, and increasing the amount of the statistics collected in specific columns using **ALTER TABLE SET STATISTICS**. - -## enable_bitmapscan - -**Parameter description**: Controls the query optimizer's use of bitmap-scan plan types. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of bitmap-scan plan types is enabled. -- **off** indicates that the query optimizer's use of bitmap-scan plan types is disabled. - -**Default value**: **on** - -## force_bitmapand - -**Parameter description**: Controls the query optimizer's use of BitmapAnd plan types. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of BitmapAnd plan types is enabled. -- **off** indicates that the query optimizer's use of BitmapAnd plan types is disabled. - -**Default value**: **off** - -## enable_hashagg - -**Parameter description**: Controls the query optimizer's use of Hash aggregation plan types. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of Hash aggregation plan types is enabled. -- **off** indicates that the query optimizer's use of Hash aggregation plan types is disabled. - -**Default value**: **on** - -## enable_hashjoin - -**Parameter description**: Controls the query optimizer's use of Hash-join plan types. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of Hash-join plan types is enabled. -- **off** indicates that the query optimizer's use of Hash-join plan types is disabled. - -**Default value**: **on** - -## enable_indexscan - -**Parameter description**: Controls the query optimizer's use of index-scan plan types. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of index-scan plan types is enabled. -- **off** indicates that the query optimizer's use of index-scan plan types is disabled. - -**Default value**: **on** - -## enable_indexonlyscan - -**Parameter description**: Controls the query optimizer's use of index-only-scan plan types. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of index-only-scan plan types is enabled. -- **off** indicates that the query optimizer's use of index-only-scan plan types is disabled. - -**Default value**: **on** - -## enable_material - -**Parameter description**: Controls the query optimizer's use of materialization. It is impossible to suppress materialization entirely, but setting this variable to **off** prevents the optimizer from inserting materialized nodes. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of materialization is enabled. -- **off** indicates that the query optimizer's use of materialization is disabled. - -**Default value**: **on** - -## enable_mergejoin - -**Parameter description**: Controls the query optimizer's use of merge-join plan types. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of merge-join plan types is enabled. -- **off** indicates that the query optimizer's use of merge-join plan types is disabled. - -**Default value**: **on** - -## enable_nestloop - -**Parameter description**: Controls the query optimizer's use of nested-loop join plan types to fully scan internal tables. It is impossible to suppress nested-loop joins entirely, but setting this variable to **off** encourages the optimizer to choose other methods if available. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of nested-loop join plan types is enabled. -- **off** indicates that the query optimizer's use of nested-loop join plan types is disabled. - -**Default value**: **on** - -## enable_index_nestloop - -**Parameter description**: Controls the query optimizer's use of the nested-loop join plan types to scan the parameterized indexes of internal tables. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of nested-loop join plan types is enabled. -- **off** indicates that the query optimizer's use of nested-loop join plan types is disabled. - -**Default value**: **on** - -## enable_seqscan - -**Parameter description**: Controls the query optimizer's use of sequential scan plan types. It is impossible to suppress sequential scans entirely, but setting this variable to **off** encourages the optimizer to choose other methods if available. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of sequential scan plan types is enabled. -- **off** indicates that the query optimizer's use of sequential scan plan types is disabled. - -**Default value**: **on** - -## enable_sort - -**Parameter description**: Controls the query optimizer's use of sort methods. It is impossible to suppress explicit sorts entirely, but setting this variable to **off** encourages the optimizer to choose other methods if available. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of sort methods is enabled. -- **off** indicates that the query optimizer's use of sort methods is disabled. - -**Default value**: **on** - -## enable_tidscan - -**Parameter description**: Controls the query optimizer's use of Tuple ID (TID) scan plan types. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the query optimizer's use of TID scan plan types is enabled. -- **off** indicates that the query optimizer's use of TID scan plan types is disabled. - -**Default value**: **on** - -## enable_kill_query - -**Parameter description**: In CASCADE mode, when a user is deleted, all the objects belonging to the user are deleted. This parameter specifies whether the queries of the objects belonging to the user can be unlocked when the user is deleted. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the unlocking is allowed. -- **off** indicates that the unlocking is not allowed. - -**Default value**: **off** - -## enforce_a_behavior - -**Parameter description**: Controls the rule matching modes of regular expressions. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the O matching rule is used. -- **off** indicates that the POSIX matching rule is used. - -**Default value**: **on** - -## max_recursive_times - -**Parameter description**: Specifies the maximum number of **WITH RECURSIVE** iterations. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 0 to *INT_MAX* - -**Default value**: **200** - -## enable_vector_engine - -**Parameter description**: Controls the query optimizer's use of vectorized executor. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates the query optimizer's use of vectorized executor is enabled. -- **off** indicates the query optimizer's use of vectorized executor is disabled. - -**Default value**: **on** - -## enable_change_hjcost - -**Parameter description**: Specifies whether the optimizer excludes internal table running costs when selecting the Hash Join cost path. If it is set to **on**, tables with a few records and high running costs are more possible to be selected. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the internal table running costs will be excluded. -- **off** indicates that the internal table running costs will not be excluded. - -**Default value**: **off** - -## enable_absolute_tablespace - -**Parameter description**: Controls whether the tablespace can use an absolute path. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that an absolute path can be used. -- **off** indicates that an absolute path cannot be used. - -**Default value**: **on** - -## enable_valuepartition_pruning - -**Parameter description**: Specifies whether the DFS partitioned table is dynamically or statically optimized. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the DFS partitioned table is dynamically or statically optimized. -- **off** indicates that the DFS partitioned table is not dynamically or statically optimized. - -**Default value**: **on** - -## qrw_inlist2join_optmode - -**Parameter description**: Specifies whether to enable inlist-to-join (inlist2join) query rewriting. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: a string - -- **disable** indicates that the inlist2join query rewriting is disabled. -- **cost_base** indicates that the cost-based inlist2join query rewriting is enabled. -- **rule_base** indicates that the forcible rule-based inlist2join query rewriting is enabled. -- A positive integer indicates the threshold of inlist2join query rewriting. If the number of elements in the list is greater than the threshold, the rewriting is performed. - -**Default value**: **cost_base** - -## skew_option - -**Parameter description**: Specifies whether an optimization policy is used. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: a string - -- **off** indicates that the policy is disabled. -- **normal** indicates that a radical policy is used. All possible skews are optimized. -- **lazy** indicates that a conservative policy is used. Uncertain skews are ignored. - -**Default value**: **normal** - -## default_limit_rows - -**Parameter description**: Specifies the default estimated number of limit rows for generating genericplan. If this parameter is set to a positive value, the positive value is used as the estimated number of limit rows. If this parameter is set to a negative value, the negative value is converted to a percentage and used as default estimated value, that is, -5 indicates 5%. - -**Value range**: a floating point number ranging from -100 to DBL_MAX - -**Default value**: **-10** - -## check_implicit_conversions - -**Parameter description**: Specifies whether to check candidate index paths generated for index columns that have implicit type conversions in a query. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that a check will be performed for candidate index paths generated for index columns that have implicit type conversion in a query. -- **off** indicates that a check will not be performed. - -**Default value**: **off** - -## cost_weight_index - -**Parameter description**: Specifies the cost weight of index_scan. - -This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: a floating point number ranging from 1e-10 to 1e+10. - -**Default value**: **1** - -## try_vector_engine_strategy - -**Parameter description**: Specifies the policy for processing row-store tables using the vectorized executor. By setting this parameter, queries containing row-store tables can be converted to vectorized execution plans for calculation, improving the execution performance of complex queries in AP-like scenarios. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Valid value**: enumerated values - -- **off** is the default value, which indicates that this function is disabled. That is, row-store tables will not be converted into vectorized execution plans for execution. -- **force**: Queries are forcibly converted to vectorized execution plans for execution no matter whether the base table to be queried is a row-store table, column-store table, or hybrid row-column store table, unless the query type or expression is not supporte by the vectorized executor. In this case, the performance may deteriorate in different query scenarios. -- **optimal**: On the basis of **force**, the optimizer determines whether to convert a query statement into a vectorized execution plan based on the query complexity to avoid performance deterioration after the conversion. - -**Default value**: **off** \ No newline at end of file +--- +title: Optimizer Method Configuration +summary: Optimizer Method Configuration +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Optimizer Method Configuration + +These configuration parameters provide a crude method of influencing the query plans chosen by the query optimizer. If the default plan chosen by the optimizer for a particular query is not optimal, a temporary solution is to use one of these configuration parameters to force the optimizer to choose a different plan. Better ways include adjusting the optimizer cost constants, manually running **ANALYZE**, increasing the value of the **default_statistics_target** configuration parameter, and increasing the amount of the statistics collected in specific columns using **ALTER TABLE SET STATISTICS**. + +## enable_adaptive_hashagg + +**Parameter description**: Indicates whether adaptive two-stage hash aggregation is enabled. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on means enabled. +- off means disabled. + +**Default value**: off + +## enable_incremental_sort + +**Parameter description**: Controls the sort steps used by the optimizer. When enabled, the optimizer preferentially generates incremental sort operators on partially ordered plans to improve sorting performance. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on means enabled. + +- off means disabled. + +**Default value**: on + +## enable_inner_unique_opt + +**Parameter description**: Controls the optimizer's use of Inner Unique optimization. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on means enabled. +- off means disabled. + +**Default value**: off + +## enable_broadcast + +**Parameter description**: Controls the optimizer's use of the broadcast distribution method for stream cost estimation. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on means enabled. +- off means disabled. + +**Default value**: on + +## enable_bitmapscan + +**Parameter description**: Controls the query optimizer's use of bitmap-scan plan types. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of bitmap-scan plan types is enabled. +- **off** indicates that the query optimizer's use of bitmap-scan plan types is disabled. + +**Default value**: **on** + +## force_bitmapand + +**Parameter description**: Controls the query optimizer's use of BitmapAnd plan types. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of BitmapAnd plan types is enabled. +- **off** indicates that the query optimizer's use of BitmapAnd plan types is disabled. + +**Default value**: **off** + +## enable_hashagg + +**Parameter description**: Controls the query optimizer's use of Hash aggregation plan types. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of Hash aggregation plan types is enabled. +- **off** indicates that the query optimizer's use of Hash aggregation plan types is disabled. + +**Default value**: **on** + +## enable_hashjoin + +**Parameter description**: Controls the query optimizer's use of Hash-join plan types. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of Hash-join plan types is enabled. +- **off** indicates that the query optimizer's use of Hash-join plan types is disabled. + +**Default value**: **on** + +## enable_indexscan + +**Parameter description**: Controls the query optimizer's use of index-scan plan types. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of index-scan plan types is enabled. +- **off** indicates that the query optimizer's use of index-scan plan types is disabled. + +**Default value**: **on** + +## enable_indexonlyscan + +**Parameter description**: Controls the query optimizer's use of index-only-scan plan types. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of index-only-scan plan types is enabled. +- **off** indicates that the query optimizer's use of index-only-scan plan types is disabled. + +**Default value**: **on** + +## enable_material + +**Parameter description**: Controls the query optimizer's use of materialization. It is impossible to suppress materialization entirely, but setting this variable to **off** prevents the optimizer from inserting materialized nodes. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of materialization is enabled. +- **off** indicates that the query optimizer's use of materialization is disabled. + +**Default value**: **on** + +## enable_mergejoin + +**Parameter description**: Controls the query optimizer's use of merge-join plan types. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of merge-join plan types is enabled. +- **off** indicates that the query optimizer's use of merge-join plan types is disabled. + +**Default value**: **on** + +## enable_nestloop + +**Parameter description**: Controls the query optimizer's use of nested-loop join plan types to fully scan internal tables. It is impossible to suppress nested-loop joins entirely, but setting this variable to **off** encourages the optimizer to choose other methods if available. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of nested-loop join plan types is enabled. +- **off** indicates that the query optimizer's use of nested-loop join plan types is disabled. + +**Default value**: **on** + +## enable_index_nestloop + +**Parameter description**: Controls the query optimizer's use of the nested-loop join plan types to scan the parameterized indexes of internal tables. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of nested-loop join plan types is enabled. +- **off** indicates that the query optimizer's use of nested-loop join plan types is disabled. + +**Default value**: **on** + +## enable_seqscan + +**Parameter description**: Controls the query optimizer's use of sequential scan plan types. It is impossible to suppress sequential scans entirely, but setting this variable to **off** encourages the optimizer to choose other methods if available. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of sequential scan plan types is enabled. +- **off** indicates that the query optimizer's use of sequential scan plan types is disabled. + +**Default value**: **on** + +## enable_sort + +**Parameter description**: Controls the query optimizer's use of sort methods. It is impossible to suppress explicit sorts entirely, but setting this variable to **off** encourages the optimizer to choose other methods if available. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of sort methods is enabled. +- **off** indicates that the query optimizer's use of sort methods is disabled. + +**Default value**: **on** + +## enable_tidscan + +**Parameter description**: Controls the query optimizer's use of Tuple ID (TID) scan plan types. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the query optimizer's use of TID scan plan types is enabled. +- **off** indicates that the query optimizer's use of TID scan plan types is disabled. + +**Default value**: **on** + +## enable_kill_query + +**Parameter description**: In CASCADE mode, when a user is deleted, all the objects belonging to the user are deleted. This parameter specifies whether the queries of the objects belonging to the user can be unlocked when the user is deleted. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the unlocking is allowed. +- **off** indicates that the unlocking is not allowed. + +**Default value**: **off** + +## enforce_a_behavior + +**Parameter description**: Controls the rule matching modes of regular expressions. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the O matching rule is used. +- **off** indicates that the POSIX matching rule is used. + +**Default value**: **on** + +## max_recursive_times + +**Parameter description**: Specifies the maximum number of **WITH RECURSIVE** iterations. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 0 to *INT_MAX* + +**Default value**: **200** + +## enable_vector_engine + +**Parameter description**: Controls the query optimizer's use of vectorized executor. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates the query optimizer's use of vectorized executor is enabled. +- **off** indicates the query optimizer's use of vectorized executor is disabled. + +**Default value**: **on** + +## enable_change_hjcost + +**Parameter description**: Specifies whether the optimizer excludes internal table running costs when selecting the Hash Join cost path. If it is set to **on**, tables with a few records and high running costs are more possible to be selected. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the internal table running costs will be excluded. +- **off** indicates that the internal table running costs will not be excluded. + +**Default value**: **off** + +## enable_absolute_tablespace + +**Parameter description**: Controls whether the tablespace can use an absolute path. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that an absolute path can be used. +- **off** indicates that an absolute path cannot be used. + +**Default value**: **on** + +## enable_valuepartition_pruning + +**Parameter description**: Specifies whether the DFS partitioned table is dynamically or statically optimized. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the DFS partitioned table is dynamically or statically optimized. +- **off** indicates that the DFS partitioned table is not dynamically or statically optimized. + +**Default value**: **on** + +## qrw_inlist2join_optmode + +**Parameter description**: Specifies whether to enable inlist-to-join (inlist2join) query rewriting. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: a string + +- **disable** indicates that the inlist2join query rewriting is disabled. +- **cost_base** indicates that the cost-based inlist2join query rewriting is enabled. +- **rule_base** indicates that the forcible rule-based inlist2join query rewriting is enabled. +- A positive integer indicates the threshold of inlist2join query rewriting. If the number of elements in the list is greater than the threshold, the rewriting is performed. + +**Default value**: **cost_base** + +## skew_option + +**Parameter description**: Specifies whether an optimization policy is used. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: a string + +- **off** indicates that the policy is disabled. +- **normal** indicates that a radical policy is used. All possible skews are optimized. +- **lazy** indicates that a conservative policy is used. Uncertain skews are ignored. + +**Default value**: **normal** + +## default_limit_rows + +**Parameter description**: Specifies the default estimated number of limit rows for generating genericplan. If this parameter is set to a positive value, the positive value is used as the estimated number of limit rows. If this parameter is set to a negative value, the negative value is converted to a percentage and used as default estimated value, that is, -5 indicates 5%. + +**Value range**: a floating point number ranging from -100 to DBL_MAX + +**Default value**: **-10** + +## check_implicit_conversions + +**Parameter description**: Specifies whether to check candidate index paths generated for index columns that have implicit type conversions in a query. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that a check will be performed for candidate index paths generated for index columns that have implicit type conversion in a query. +- **off** indicates that a check will not be performed. + +**Default value**: **off** + +## cost_weight_index + +**Parameter description**: Specifies the cost weight of index_scan. + +This parameter is a **USERSET** parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: a floating point number ranging from 1e-10 to 1e+10. + +**Default value**: **1** + +## try_vector_engine_strategy + +**Parameter description**: Specifies the policy for processing row-store tables using the vectorized executor. By setting this parameter, queries containing row-store tables can be converted to vectorized execution plans for calculation, improving the execution performance of complex queries in AP-like scenarios. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Valid value**: enumerated values + +- **off** is the default value, which indicates that this function is disabled. That is, row-store tables will not be converted into vectorized execution plans for execution. +- **force**: Queries are forcibly converted to vectorized execution plans for execution no matter whether the base table to be queried is a row-store table, column-store table, or hybrid row-column store table, unless the query type or expression is not supporte by the vectorized executor. In this case, the performance may deteriorate in different query scenarios. +- **optimal**: On the basis of **force**, the optimizer determines whether to convert a query statement into a vectorized execution plan based on the query complexity to avoid performance deterioration after the conversion. + +**Default value**: **off** + +## enable_expr_fusion + +**Parameter description**: Control SRF, expression spreading, cancellation of centralized Seq Scan projections, transfer states of shared aggregation functions, and Step step optimization properties. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- off: default value, means disable this function. +- on: indicates that the SRF, Expression Spreading, Cancel Centralized Seq Scan Projection, Shared Aggregate Function's Transfer State, and Step Step Optimization features are enabled at the same time. + +**Default value**: off \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/other-optimizer-options.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/other-optimizer-options.md index e2044558..f265fc46 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/other-optimizer-options.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/other-optimizer-options.md @@ -7,6 +7,36 @@ date: 2021-04-20 # Other Optimizer Options +## adaptive_hashagg_reduce_ratio_threshold + +**Parameter description**: Determines whether a two-stage aggregation query is appropriate. If the actual number of rows after aggregation / number of rows before aggregation is greater than the value of this parameter, it is considered unsuitable for a two-stage aggregation query. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Float, 0~1 + +**Default value**: 0.8 + +## adaptive_hashagg_min_rows + +**Parameter description**: Determine the minimum number of rows to reach for a two-stage aggregate query. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Integer, 1~max_int + +**Default value**: 1024 + +## adaptive_hashagg_allow_spill + +**Parameter description**: Allow the pre-polymerization phase to drop the disk when memory is low. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean, true or false + +**Default value**: false + ## explain_dna_file **Parameter description:** Sets explain_perf_mode to **run** to export object files in CSV format. @@ -88,19 +118,6 @@ This parameter is a USERSET parameter. Set it based on instructions provided in **Default value:** **off** -## partition_iterator_elimination - -**Parameter description**: Determines whether to eliminate the partition iteration operator to improve execution efficiency when the partition pruning result of a partitioned table is a partition. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../appendix.md). - -**Value range**: Boolean - -- **on** indicates that the partition iteration operator is eliminated. -- **off** indicates that the partition iteration operator is not eliminated. - -**Default value:** **off** - ## enable_functional_dependency **Parameter description**: Determines whether the statistics about multiple columns generated by ANALYZE contain functional dependency statistics and whether the functional dependency statistics are used to calculate the selection rate. @@ -157,6 +174,19 @@ This parameter is a USERSET parameter. Set it based on instructions provided in **Default value**: **magicset, reduce_orderby** +## enable_indexscan_optimization + +**Parameter description**: Controls whether btree index scans (IndexScan and IndexOnlyScan) are optimized under the astore engine. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean. + +- on: indicates enabled. +- off: Indicates disabled. + +**Default value**: off + ## enable_pbe_optimization **Parameter description**: Specifies whether the optimizer optimizes the query plan for statements executed in Parse Bind Execute (PBE) mode. @@ -185,6 +215,16 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided **Default value**: **off** +## gpc_clean_timeout + +**Parameter description**: With enable_global_plancache enabled, plans in the shared plan list are cleaned up if they have not been used for more than the gpc_clean_timeout. This parameter is used to control the retention time of unused shared plans. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Integer, 300~86400 in seconds + +**Default value**: 1800, i.e. 30min + ## enable_global_stats **Parameter description**: Specifies the current statistics collection mode, which can be global statistics collection or single-node statistics collection. By default, the global statistics collection mode is used. If this parameter is set to **off**, the statistics of the first node in are collected by default. In this case, the quality of the generated query plan may be affected. However, the information collection performance is optimal. Therefore, exercise caution when disabling this parameter. This parameter has been discarded in the current version. Do not set it. @@ -751,4 +791,63 @@ This parameter is a USERSET parameter. Set it based on instructions provided in **Value range**: Boolean. The value **true** indicates that the function is enabled, and the value **false** indicates that the function is disabled. -**Default value**: **false** \ No newline at end of file +**Default value**: **false** + +## show_fdw_remote_plan + +**Parameter description**: This parameter controls whether or not the contents of fdw's method for fetching remote data are printed in the explain. When there are foreign tables in the query, the database needs to use the ForeignScan operator to get the actual data from the remote server. If this parameter is turned on, the ForeignScan operators used will be numbered in the explain, and the method content of each ForeignScan operator to get remote data will be appended and printed to the explain result in order. The specific print content will call the special interface for remote plan printing of the FDW used, which is organized and filled in by the FDW itself. If the FDW does not support this interface, it will indicate that there is no relevant plan information. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean. + +- true means enabled. +- false means disabled. + +**Default value**: false + +## sort_key_pruning_level + +**Parameter description**: Used to control the optimizer's cropping rules for useless sort keys in subqueries. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: off、balanced、aggressive。Different values correspond to different degrees of optimization. + +- off:Disable useless sort key trimming in subqueries. + +- balanced:Conservative pruning strategy. The database optimizer only tries to prune completely useless sort keys and does not prune subqueries in aggregate operations. + +- aggressive:Relatively aggressive pruning strategy. The database optimizer tries to prune out all sort keys that may not be used in a collection operation. + +**Default value**: balanced + +## best_agg_plan + +**Parameter description**: Specifies a path for aggregation in a streaming environment. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Integer, 0-5 + +**Default value**: 0 + +## enable_accept_empty_str + +**Parameter description**: Controls whether the empty string is accepted as null. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean. + +**Default value**: off + +## enable_sse42 + +**Parameter description**: Controls whether the SSE 4.2 command is enabled. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean. + +**Default value**: off \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/query-planning.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/query-planning.md index a584821c..5314d11d 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/query-planning.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/query-planning.md @@ -1,21 +1,21 @@ ---- -title: Query Planning -summary: Query Planning -author: zhang cuiping -date: 2023-04-07 ---- - -# Query Planning - -This section describes the method configuration, cost constants, planning algorithm, and some configuration parameters for the optimizer. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> Two parameters are involved in the optimizer: -> -> - *INT_MAX* indicates the maximum value of the INT data type. The value is **2147483647**. -> - *DBL_MAX* indicates the maximum value of the FLOAT data type. - -- **[Optimizer Method Configuration](optimizer-method-configuration.md)** -- **[Optimizer Cost Constants](optimizer-cost-constants.md)** -- **[Genetic Query Optimizer](genetic-query-optimizer.md)** +--- +title: Query Planning +summary: Query Planning +author: zhang cuiping +date: 2023-04-07 +--- + +# Query Planning + +This section describes the method configuration, cost constants, planning algorithm, and some configuration parameters for the optimizer. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> Two parameters are involved in the optimizer: +> +> - *INT_MAX* indicates the maximum value of the INT data type. The value is **2147483647**. +> - *DBL_MAX* indicates the maximum value of the FLOAT data type. + +- **[Optimizer Method Configuration](optimizer-method-configuration.md)** +- **[Optimizer Cost Constants](optimizer-cost-constants.md)** +- **[Genetic Query Optimizer](genetic-query-optimizer.md)** - **[Other Optimizer Options](other-optimizer-options.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query.md index 5d12b385..d899160e 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/query.md @@ -103,6 +103,19 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T **Default value**: **on** +## track_stmt_parameter + +**Parameter description**: After enabling track_stmt_parameter, the executed statements recorded in the statement_history are no longer normalized, and the complete SQL statement information can be displayed to assist DBAs in locating the problem. For simple query, complete statement information is displayed; for PBE statement, complete statement information is displayed while each variable value information is appended, in the format of “query string; parameters:$1=value1,$2=value2,...”. “. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- on: Enables the function of displaying the complete SQL statement information. +- off: Disable the function of displaying the complete SQL statement information. + +**Default value**: off + ## track_stmt_session_slot **Parameter description**: Specifies the maximum number of full/slow SQL statements that can be cached in a session. If the number of full/slow SQL statements exceeds this value, new statements will not be traced until the flush thread flushes the cached statements to the disk to reserve idle space. @@ -169,6 +182,33 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** Some snapshot information comes from unique SQL statements. Therefore, when automatic elimination is enabled, if the selected start snapshot and end snapshot exceed the elimination time, the WDR report cannot be generated. +## enable_slow_query_log + +**Parameter description**: Whether to write slow query information to a log file is deprecated in this release. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- on: Indicates that the slow query information needs to be written to the log file. +- off: Indicates that you do not need to write the slow query information to the log file. + +**Default value**: on + +## query_log_file + +**Parameter description**: The GUC parameter enable_slow_query_log is set to ON, indicating that slow query records need to be written to a log file. query_log_file determines the name of the server's slow query log file, which is accessible only to the sysadmin user. Usually the log file name is generated according to strftime pattern, so it is possible to define the log file name in terms of system time, which is realized with % escape character, which is deprecated in this version. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: +> +> It is recommended that you use the % escape character to define log file names, otherwise it is difficult to manage log files effectively. + +**Value range**: string + +**Default value**: `slow_query_log-%Y-%m-%d_%H%M%S.log` + ## track_stmt_standby_chain_size **Parameter description**: Specifies the maximum memory and disk space occupied by fast/slow SQL statement records on the standby node. This parameter is a combination of parameters. This parameter is read every 60 seconds and records exceeding the retention period are deleted. Only the sysadmin user can access this parameter. @@ -188,4 +228,67 @@ Note that the memory values corresponding to the fast and slow SQL statements ca Data is cleared at a granularity of 16 MB. Therefore, a maximum of 16 MB data delay error may occur. -**Default value: 32, 1024, 16, 512** \ No newline at end of file +**Default value: 32, 1024, 16, 512** + +## asp_log_directory + +**Parameter description**: When asp_flush_mode is set to all or file, asp_log_directory determines the directory where the server asp log files are stored. It can be an absolute path, or a relative path (a path relative to the data directory), accessible only to the sysadmin user. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +>![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: +> +>When the value of asp_log_directory in the configuration file is an illegal path, the database instance cannot be restarted. + +>![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +>- Legal path: the user has read and write access to this path. +> +>- Illegal path: the user does not have read/write access to this path. + +**Value range**: string + +**Default value**: Specified during installation. + +## query_log_directory + +**Parameter description**: When enable_slow_query_log is set to on, query_log_directory determines the directory that holds the server's slow query log files, accessible only to the sysadmin user. It can be an absolute path, or a relative path (a path relative to the data directory), which is deprecated in this release. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +>![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: +> +> +>When the value of query_log_directory in the configuration file is an illegal path, the database instance fails to restart. + +>![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +>- Legal path: the user has read and write access to this path. +>- Illegal path: the user does not have read/write access to this path. + +**Value range**: string + +**Default value**: Specified during installation. + +## unique_sql_retention_time + +**Parameter description**: The interval for clearing the UNIQUE sql hash table, which defaults to 30 minutes. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Integer, 1 to 3650 in minutes. + +**Default value**: 30min + +## perf_directory + +**Parameter description**: perf_directory determines the directory of the performance view punching task output file, accessible only to the sysadmin user. It can be an absolute path, or a relative path (a path relative to the data directory). + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +- Legal path: the user has read and write access to this path. +- Illegal path: the user does not have read/write access to this path. + +**Value range**: string + +**Default value**: Specified during installation. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/reference-guide-guc-parameters.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/reference-guide-guc-parameters.md index 562fffe1..0b8e00c5 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/reference-guide-guc-parameters.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/reference-guide-guc-parameters.md @@ -42,11 +42,15 @@ date: 2023-04-07 - **[Scheduled Task](scheduled-task.md)** - **[Thread Pool](thread-pool.md)** - **[Backup and Restoration](backup-and-restoration-parameter.md)** -- **[Backup and Restoration](DCF-parameters-settings.md)** +- **[DCF Parameters Settings](DCF-parameters-settings.md)** - **[Flashback](flashback.md)** - **[Rollback Parameters](rollback-parameters.md)** - **[Reserved Parameters](reserved-parameters.md)** - **[AI Features](AI-features.md)** - **[Global SysCache Parameters](global-syscache-parameters.md)** +- **[Multi-Level Cache Management Parameters](multi-level-cache-management-parameters.md)** +- **[Resource Pooling Parameters](resource-pooling-parameters.md)** - **[Parameters Related to Efficient Data Compression Algorithms](parameters-related-to-efficient-data-compression-algorithms.md)** -- **[Writer Statement Parameters Supported by Standby Servers](writer-statement-parameters-supported-by-standby-server.md)** \ No newline at end of file +- **[Writer Statement Parameters Supported by Standby Servers](writer-statement-parameters-supported-by-standby-server.md)** +- **[Data Import and Export](data-import-export.md)** +- **[Delimiter](./delimiter.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/replication-parameters-of-two-database-instances.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/replication-parameters-of-two-database-instances.md index ccfb7c16..2d1b0114 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/replication-parameters-of-two-database-instances.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/replication-parameters-of-two-database-instances.md @@ -1,18 +1,18 @@ ---- -title: Replication Parameters of Two Database Instances -summary: Replication Parameters of Two Database Instances -author: zhang cuiping -date: 2022-10-24 ---- - -# Replication Parameters of Two Database Instances - -## RepOriginId - -**Parameter description**: This parameter is a session-level GUC parameter. In bidirectional logical replication, set it to a non-zero value to avoid infinite data replication. - -This parameter is a USERSET parameter. Set it based on **Method 3** provided in Table 1 [GUC parameters](./appendix.md). - -**Value range**: an integer ranging from 0 to 2147483647 - +--- +title: Replication Parameters of Two Database Instances +summary: Replication Parameters of Two Database Instances +author: zhang cuiping +date: 2022-10-24 +--- + +# Replication Parameters of Two Database Instances + +## RepOriginId + +**Parameter description**: This parameter is a session-level GUC parameter. In bidirectional logical replication, set it to a non-zero value to avoid infinite data replication. + +This parameter is a USERSET parameter. Set it based on **Method 3** provided in Table 1 [GUC parameters](./appendix.md). + +**Value range**: an integer ranging from 0 to 2147483647 + **Default value**: **0** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/reserved-parameters.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/reserved-parameters.md index be10f98f..5735ad10 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/reserved-parameters.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/reserved-parameters.md @@ -1,30 +1,30 @@ ---- -title: Reserved Parameters -summary: Reserved Parameters -author: Zhang Cuiping -date: 2021-11-08 ---- - -# Reserved Parameters - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The following parameters are reserved and do not take effect in this version. - -acce_min_datasize_per_thread - -cstore_insert_mode - -enable_constraint_optimization - -enable_hadoop_env - -enable_hdfs_predicate_pushdown - -enable_orc_cache - -schedule_splits_threshold - -backend_version - -undo_zone_count - +--- +title: Reserved Parameters +summary: Reserved Parameters +author: Zhang Cuiping +date: 2021-11-08 +--- + +# Reserved Parameters + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The following parameters are reserved and do not take effect in this version. + +acce_min_datasize_per_thread + +cstore_insert_mode + +enable_constraint_optimization + +enable_hadoop_env + +enable_hdfs_predicate_pushdown + +enable_orc_cache + +schedule_splits_threshold + +backend_version + +undo_zone_count + version_retention_age \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md index d6d79a48..1de08a61 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md @@ -119,3 +119,33 @@ This parameter is a USERSET parameter. Set it based on instructions provided in **Value range**: an integer ranging from 0 to 1000 **Default value**: **1** + +## checkpoint_flush_after + +**Parameter description**: Set the checkpointer thread to tell the operating system to start asynchronously flushing pages from the operating system cache to disk when the number of page flushes exceeds a set threshold. the disk page size in MogDB is 8kB. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Integer, 0~256 (0 means asynchronous disk flushing is turned off). For example, a value of 32 means that the checkpointer thread will perform asynchronous disk flushing after writing 32 consecutive disk pages, i.e., 32*8=256kB of disk space. + +**Default value**: 32 + +## bgwriter_flush_after + +**Parameter description**: Set the background writer thread to tell the operating system to start asynchronously flushing pages from the operating system cache to disk when the number of page flushes exceeds a set threshold. the disk page size is 8kB in MogDB. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Integer, 0~256 (0 means asynchronous disk flushing is disabled), unit page (8kB). For example, a value of 64 means that the background writer thread will asynchronously flush the disk after writing 64 consecutive disk pages, i.e., 64*8=512kB of disk space. + +**Default value**: 512kB (i.e. 64 pages) + +## backend_flush_after + +**Parameter description**: Set the backend thread to tell the operating system to start asynchronously flushing pages from the operating system cache to disk when the number of page flushes exceeds a set threshold. the disk page size in MogDB is 8kB. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Integer, 0~256 (0 means asynchronous disk flushing is disabled), unit page (8kB). For example, a value of 64 means that the backend thread will perform asynchronous flushing after writing 64 consecutive disk pages, i.e., 64*8=512kB of disk space. + +**Default value**: 0 diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/background-writer.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/background-writer.md index e2f433a8..a80cb4c0 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/background-writer.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/background-writer.md @@ -11,19 +11,6 @@ This section describes background writer parameters. The background writer proce It also mitigates performance deterioration caused by checkpoints because only a few of dirty pages need to be flushed to the disk when the checkpoints arrive. This mechanism, however, increases the overall net I/O load because while a repeatedly-dirtied page may otherwise be written only once per checkpoint interval, the background writer may write it several times as it is dirtied in the same interval. In most cases, continuous light loads are preferred, instead of periodical load peaks. The parameters discussed in this section can be set based on actual requirements. -## bgwriter_thread_num - -**Parameter description**: Specifies the number of bgwriter threads for flushing pages after the incremental checkpoint is enabled. Dirty pages to be evicted are flushed to disks, and non-dirty pages are placed in the candidate buffer chain. This parameter helps accelerate buffer eviction and improve performance. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 0 to 8 - -- To test the effect of disabling this feature during development, you can set this parameter to **0**. However, if this parameter is set to **0**, the value will be changed to **1** in the code and the feature cannot be disabled. -- If this parameter is set to a value ranging from 1 to 8, the corresponding number of background threads are started to maintain the candidate buffer chain. Dirty pages that meet the conditions are flushed to disks, and non-dirty pages are added to the candidate list. - -**Default value**: **2** - ## bgwriter_delay **Parameter description**: Specifies the interval at which the background writer writes dirty shared buffers. The background writer initiates write operations for some dirty shared buffers (the volume of data to be written is specified by the **bgwriter_lru_maxpages** parameter), sleep for the milliseconds specified by **bgwriter_delay**, and then restarts. @@ -38,20 +25,6 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T **Setting suggestion:** Reduce this value in slow data writing scenarios to reduce the checkpoint load. -bgwriter_delay - -**Parameter description**: Specifies the interval at which the background writer writes dirty shared buffers. The background writer initiates write operations for some dirty shared buffers (the volume of data to be written is specified by the **bgwriter_lru_maxpages** parameter), sleep for the milliseconds specified by **bgwriter_delay**, and then restarts. - -In many systems, the effective resolution of sleep delays is 10 milliseconds. Therefore, setting this parameter to a value that is not a multiple of 10 has the same effect as setting it to the next higher multiple of 10. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 10 to 10000. The unit is millisecond. - -**Default value**: **2s** - -**Setting suggestion:** Reduce this value in slow data writing scenarios to reduce the checkpoint load. - ## candidate_buf_percent_target **Parameter description**: Specifies the expected percentage of available buffers in the shared_buffer memory buffer in the candidate buffer chain when the incremental checkpoint is enabled. If the number of available buffers in the current candidate chain is less than the target value, the bgwriter thread starts flushing dirty pages that meet the requirements. @@ -161,4 +134,26 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided **Value range**: an integer, in the range [32,256] -**Default value**: **256** \ No newline at end of file +**Default value**: **256** + +## extreme_flush_dirty_page + +**Parameter description**: Whether or not to enable Extreme Brush Dirty Mode. Enabling it will make brushing faster but increase the write amplification. (Introduced in MogDB 5.0.8) + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +**Default value**: off + +**Note**: Please confirm that the bottleneck of the current system brush dirty slow is not in the system IO capacity before enabling this parameter. You can confirm that there is no bottleneck in disk IO through monitoring tools such as iostat and Node-exporter. For shared storage services, you should also confirm the IO capacity limit of the shared storage service. + +## checkpoint_target_time + +**Parameter description**: Maximum time to execute checkpoint. The smaller the value, the faster the brushing dirty, the smaller the actual time consumed to execute checkpoint, but the write amplification increases, when IO becomes a bottleneck, the value is very low and may affect the business; the corresponding upstream operations are: stop (stop), switchover (primary and standby nodes), and manually executing the checkpoint statement. (Introduced in MogDB 5.0.8) + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: 5~60s + +**Default value**: 30s \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/cost-based-vacuum-delay.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/cost-based-vacuum-delay.md index cfe31fda..4f7028a1 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/cost-based-vacuum-delay.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/cost-based-vacuum-delay.md @@ -1,76 +1,76 @@ ---- -title: Cost-based Vacuum Delay -summary: Cost-based Vacuum Delay -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Cost-based Vacuum Delay - -This feature allows administrators to reduce the I/O impact of the **VACUUM** and **ANALYZE** statements on concurrent database activities. It is often more important to prevent maintenance statements, such as **VACUUM** and **ANALYZE**, from affecting other database operations than to run them quickly. Cost-based vacuum delay provides a way for administrators to achieve this purpose. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> Certain vacuum operations hold critical locks and should be complete as quickly as possible. In MogDB, cost-based vacuum delays do not take effect during such operations. To avoid uselessly long delays in such cases, the actual delay is the larger of the two calculated values: -> -> - **vacuum_cost_delay** x **accumulated_balance**/**vacuum_cost_limit** -> - **vacuum_cost_delay** x 4 - -**Background** - -During the execution of the [ANALYZE | ANALYSE](../../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md) and [VACUUM](../../../reference-guide/sql-syntax/VACUUM.md) statements, the system maintains an internal counter that keeps track of the estimated cost of the various I/O operations that are performed. When the accumulated cost reaches a limit (specified by **vacuum_cost_limit**), the process performing the operation will sleep for a short period of time (specified by **vacuum_cost_delay**). Then, the counter resets and the operation continues. - -By default, this feature is disabled. To enable this feature, set **vacuum_cost_delay** to a positive value. - -## vacuum_cost_delay - -**Parameter description**: Specifies the length of time that a process will sleep when **vacuum_cost_limit** has been exceeded. - -In many systems, the effective resolution of the sleep length is 10 milliseconds. Therefore, setting this parameter to a value that is not a multiple of 10 has the same effect as setting it to the next higher multiple of 10. - -This parameter is usually set to a small value, such as 10 or 20 milliseconds. Adjusting vacuum's resource consumption is best done by changing other vacuum cost parameters. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 0 to 100. A positive number enables cost-based vacuum delay and **0** disables cost-based vacuum delay. - -**Default value**: **0** - -## vacuum_cost_page_hit - -**Parameter description**: Specifies the estimated cost for vacuuming a buffer found in the shared buffer. It represents the cost to lock the buffer pool, look up the shared hash table, and scan the content of the page. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 0 to 10000 - -**Default value**: **1** - -## vacuum_cost_page_miss - -**Parameter description**: Specifies the estimated cost for vacuuming a buffer read from the disk. It represents the cost to lock the buffer pool, look up the shared hash table, read the desired block from the disk, and scan the block. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 0 to 10000 - -**Default value:** **10** - -## vacuum_cost_page_dirty - -**Parameter description**: Specifies the estimated cost charged when vacuum modifies a block that was previously clean. It represents the extra I/O required to flush the dirty block out to disk again. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 0 to 1000 - -**Default value:** **20** - -## vacuum_cost_limit - -**Parameter description**: Specifies the cost limit. The vacuuming process will sleep if this limit is exceeded. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 1 to 10000 - -**Default value**: **200** +--- +title: Cost-based Vacuum Delay +summary: Cost-based Vacuum Delay +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Cost-based Vacuum Delay + +This feature allows administrators to reduce the I/O impact of the **VACUUM** and **ANALYZE** statements on concurrent database activities. It is often more important to prevent maintenance statements, such as **VACUUM** and **ANALYZE**, from affecting other database operations than to run them quickly. Cost-based vacuum delay provides a way for administrators to achieve this purpose. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> Certain vacuum operations hold critical locks and should be complete as quickly as possible. In MogDB, cost-based vacuum delays do not take effect during such operations. To avoid uselessly long delays in such cases, the actual delay is the larger of the two calculated values: +> +> - **vacuum_cost_delay** x **accumulated_balance**/**vacuum_cost_limit** +> - **vacuum_cost_delay** x 4 + +**Background** + +During the execution of the [ANALYZE | ANALYSE](../../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md) and [VACUUM](../../../reference-guide/sql-syntax/VACUUM.md) statements, the system maintains an internal counter that keeps track of the estimated cost of the various I/O operations that are performed. When the accumulated cost reaches a limit (specified by **vacuum_cost_limit**), the process performing the operation will sleep for a short period of time (specified by **vacuum_cost_delay**). Then, the counter resets and the operation continues. + +By default, this feature is disabled. To enable this feature, set **vacuum_cost_delay** to a positive value. + +## vacuum_cost_delay + +**Parameter description**: Specifies the length of time that a process will sleep when **vacuum_cost_limit** has been exceeded. + +In many systems, the effective resolution of the sleep length is 10 milliseconds. Therefore, setting this parameter to a value that is not a multiple of 10 has the same effect as setting it to the next higher multiple of 10. + +This parameter is usually set to a small value, such as 10 or 20 milliseconds. Adjusting vacuum's resource consumption is best done by changing other vacuum cost parameters. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 0 to 100. A positive number enables cost-based vacuum delay and **0** disables cost-based vacuum delay. + +**Default value**: **0** + +## vacuum_cost_page_hit + +**Parameter description**: Specifies the estimated cost for vacuuming a buffer found in the shared buffer. It represents the cost to lock the buffer pool, look up the shared hash table, and scan the content of the page. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 0 to 10000 + +**Default value**: **1** + +## vacuum_cost_page_miss + +**Parameter description**: Specifies the estimated cost for vacuuming a buffer read from the disk. It represents the cost to lock the buffer pool, look up the shared hash table, read the desired block from the disk, and scan the block. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 0 to 10000 + +**Default value:** **10** + +## vacuum_cost_page_dirty + +**Parameter description**: Specifies the estimated cost charged when vacuum modifies a block that was previously clean. It represents the extra I/O required to flush the dirty block out to disk again. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 0 to 1000 + +**Default value:** **20** + +## vacuum_cost_limit + +**Parameter description**: Specifies the cost limit. The vacuuming process will sleep if this limit is exceeded. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 1 to 10000 + +**Default value**: **200** diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/kernel-resource-usage.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/kernel-resource-usage.md index 4249e382..892d8f97 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/kernel-resource-usage.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/kernel-resource-usage.md @@ -1,41 +1,41 @@ ---- -title: Kernel Resource Usage -summary: Kernel Resource Usage -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Kernel Resource Usage - -This section describes kernel resource parameters. Whether these parameters take effect depends on OS settings. - -## max_files_per_process - -**Parameter description**: Specifies the maximum number of simultaneously open files allowed by each server process. If the kernel is enforcing a proper limit, setting this parameter is not required. - -However, on some platforms, such as most Berkeley Software Distribution (BSD) systems, the kernel allows individual processes to open much more files than the system can support. If the message "Too many open files" is displayed, set this parameter to a smaller value. Generally, the system must meet this requirement: Number of file descriptors >= Maximum number of concurrent statements x Number of database nodes x **max_files_per_process** x 3 - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 25 to 2147483647 - -**Default value**: **1000** - -## shared_preload_libraries - -**Parameter description:** Specifies one or more shared libraries to be preloaded at server start. If multiple libraries are to be loaded, separate their names using commas (,). For example, **$libdir/mylib** will cause **mylib.so** (or on some platforms, **mylib.sl**) to be preloaded before the loading of the standard library directory. - -You can preinstall the MogDB's stored procedure library using the **$libdir/pl** *XXX* syntax as described in the preceding text. *XXX* can only be **pgsql**, **perl**, **tcl**, or **python**. - -By preloading a shared library and initializing it as required, the library startup time is avoided when the library is first used. However, the time to start each new server process may increase, even if that process never uses the library. Therefore, set this parameter only for libraries that will be used in most sessions. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - If a specified library is not found, the MogDB service will fail to start. -> - Each MogDB-supported library has a special mark that is checked to guarantee compatibility. Therefore, libraries that do not support MogDB cannot be loaded in this way. - -**Value range**: a string - -**Default value**: empty +--- +title: Kernel Resource Usage +summary: Kernel Resource Usage +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Kernel Resource Usage + +This section describes kernel resource parameters. Whether these parameters take effect depends on OS settings. + +## max_files_per_process + +**Parameter description**: Specifies the maximum number of simultaneously open files allowed by each server process. If the kernel is enforcing a proper limit, setting this parameter is not required. + +However, on some platforms, such as most Berkeley Software Distribution (BSD) systems, the kernel allows individual processes to open much more files than the system can support. If the message "Too many open files" is displayed, set this parameter to a smaller value. Generally, the system must meet this requirement: Number of file descriptors >= Maximum number of concurrent statements x Number of database nodes x **max_files_per_process** x 3 + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 25 to 2147483647 + +**Default value**: **1000** + +## shared_preload_libraries + +**Parameter description:** Specifies one or more shared libraries to be preloaded at server start. If multiple libraries are to be loaded, separate their names using commas (,). For example, **$libdir/mylib** will cause **mylib.so** (or on some platforms, **mylib.sl**) to be preloaded before the loading of the standard library directory. + +You can preinstall the MogDB's stored procedure library using the **$libdir/pl** *XXX* syntax as described in the preceding text. *XXX* can only be **pgsql**, **perl**, **tcl**, or **python**. + +By preloading a shared library and initializing it as required, the library startup time is avoided when the library is first used. However, the time to start each new server process may increase, even if that process never uses the library. Therefore, set this parameter only for libraries that will be used in most sessions. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> +> - If a specified library is not found, the MogDB service will fail to start. +> - Each MogDB-supported library has a special mark that is checked to guarantee compatibility. Therefore, libraries that do not support MogDB cannot be loaded in this way. + +**Value range**: a string + +**Default value**: empty diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/memory.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/memory.md index 200cf71c..5633b85e 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/memory.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/memory.md @@ -340,4 +340,55 @@ If **enable_global_syscache** and **enable_thread_pool** are enabled, this param **Value range**: an integer ranging from 1 x 1024 to 512 x 1024. The unit is kB. -**Default value:** **256 MB** \ No newline at end of file +**Default value:** **256 MB** + +## memory_trace_level + +**Parameter description**: Control level for logging memory request information after dynamic memory usage exceeds 90% of the maximum dynamic memory. This parameter only takes effect when use_workload_manager and enable_memory_limit are enabled. This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: enumerated values + +- none: Indicates that memory request information is not recorded. +- level1: The following message is logged after dynamic memory usage exceeds 90% of the maximum dynamic memory and the logged memory information is saved in the $GAUSSLOG/mem_log directory. + - Global memory overview. + - Memory usage of the top 20 memory contexts among all memory contexts of the instance, session, and thread types. + - The totalsize, freesize fields for each memory context. +- level2: The following information is logged after the dynamic memory usage exceeds 90% of the maximum dynamic memory, and the logged memory information is saved in the $GAUSSLOG/mem_log directory. + - Global memory overview. + - Memory usage of the top 20 memory contexts among all memory contexts of the instance, session, and thread types. + - The totalsize, freesize fields for each memory context. + - Detailed information about all memory requests on each memory context, including the file where the memory is requested, line number and size. + +**Default value:** level1 + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: +> +> - If this parameter is set to level2, it will record the memory request details (file, line, size fields) of each memory context, which will have a big impact on the performance, so you need to set it carefully. +> - The recorded memory snapshot information can be queried by the system function gs_get_history_memory_detail(cstring), please refer to “SQL Reference > Functions and Operators > Statistical Information Functions” for details. +> - The recorded memory context is obtained by summarizing all the renamed memory contexts of the same type. + +## resilience_memory_reject_percent + +**Parameter description**: The dynamic memory occupancy percentage used to control memory overload escapes. This parameter only takes effect when the GUC parameters use_workload_manager and enable_memory_limit are enabled. This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: String, length greater than 0 + +This parameter is divided into recover_memory_percent, overload_memory_percent 2 parts, the specific meaning of these 2 parts is as follows: + +- recover_memory_percent: memory from overload state to restore the normal state of dynamic memory use of the maximum dynamic memory percentage, when the dynamic memory use is less than the maximum dynamic memory multiplied by the corresponding percentage of the value, to stop overload escape and release the new connection to access, take the value of 0 to 100, set to how much indicates how much percent. +- overload_memory_percent: the percentage of dynamic memory usage to the maximum dynamic memory in case of memory overload, when the dynamic memory usage is greater than the maximum dynamic memory multiplied by the corresponding percentage, it means the current memory has been overloaded, triggering the overload escape kill session and forbidding the access of new connections, the value is from 0 to 100, the value is set to how much means how much. + +**Default value:** '0,0' to disable the memory overload escape function. + +**Example**: + +``` +resilience_memory_reject_percent = '70,90' +``` + +Indicates that memory usage exceeds 90% of the maximum memory limit and then prohibits new connections from accessing and kills the stacked session, and stops killing the session and allows new connections to access it when the memory recovers to less than 70% of the maximum memory during the kill session. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: +> +> - Maximum dynamic memory and used dynamic memory can be obtained by querying the gs_total_memory_detail view. Maximum dynamic memory: max_dynamic_memory and used dynamic memory: dynamic_used_memory. +> - If the percentage of this parameter is set too small, the memory overload escape process will be triggered frequently, which will cause the executing session to be forced to exit and the new connection to fail to be accessed for a short period of time, so you need to set it carefully according to the actual memory usage. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/resource-consumption.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/resource-consumption.md index ca3353dd..65718b86 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/resource-consumption.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-consumption/resource-consumption.md @@ -1,15 +1,15 @@ ---- -title: Resource Consumption -summary: Resource Consumption -author: zhang cuiping -date: 2023-04-07 ---- - -# Resource Consumption - -- **[Memory](memory.md)** -- **[Disk Space](disk-space.md)** -- **[Kernel Resource Usage](kernel-resource-usage.md)** -- **[Cost-based Vacuum Delay](cost-based-vacuum-delay.md)** -- **[Background Writer](background-writer.md)** +--- +title: Resource Consumption +summary: Resource Consumption +author: zhang cuiping +date: 2023-04-07 +--- + +# Resource Consumption + +- **[Memory](memory.md)** +- **[Disk Space](disk-space.md)** +- **[Kernel Resource Usage](kernel-resource-usage.md)** +- **[Cost-based Vacuum Delay](cost-based-vacuum-delay.md)** +- **[Background Writer](background-writer.md)** - **[Asynchronous I/O Operations](asynchronous-io-operations.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-pooling-parameters.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-pooling-parameters.md new file mode 100644 index 00000000..80200058 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/resource-pooling-parameters.md @@ -0,0 +1,284 @@ +--- +title: Resource Pooling Parameters +summary: Resource Pooling Parameters +author: Guo Huan +date: 2023-10-29 +--- + +# Resource Pooling Parameters + +## ss_enable_dss + +**Parameter Description**: Whether to enable the DSS mode, specified by the user during installation and deployment. + +This parameter is of the POSTMASTER type. The database cannot start normally after modification; please do not change it. + +**Value Range**: Boolean, on, off. on indicates that the current installation and deployment enable DSS mode, off indicates that it is not enabled. + +**Default Value**: off + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +> - The `ss_enable_dss` parameter defaults to off in single node. +> - To enable resource pooling mode, this parameter needs to be set to on. +> - After OM automatically installs resource pooling mode, this parameter defaults to on. + +## ss_enable_dms + +**Parameter Description**: Whether to enable the DMS feature, specified by the user during installation and deployment. + +This parameter is of the POSTMASTER type. The database cannot start normally after modification; please do not change it. + +**Value Range**: Boolean, on, off. on indicates that the current installation and deployment enable DMS functionality, off indicates that it is not enabled. + +**Default Value**: off + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +> - The `ss_enable_dms` parameter defaults to off in single node. +> - To enable resource pooling mode, this parameter needs to be set to on. +> - After OM automatically installs resource pooling mode, this parameter defaults to on. + +## ss_enable_ssl + +**Parameter Description**: Whether to enable the SSL. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Boolean, on, off. on indicates that SSL is enabled, off indicates that it is not enabled. + +**Default Value**: on + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +> - The prerequisite for this parameter to take effect is that MogDB's SSL parameter is set to on (master switch). +> - Manual modification of this parameter requires changes and restarts on all nodes to take effect; modifying and restarting a single node will result in a timeout failure. +> - If this parameter is turned off, the primary and standby machines will not use SSL connections, which poses a risk of impersonation attacks and information leakage. + +## ss_enable_catalog_centralized + +**Parameter Description**: Resource directory storage mode, not allowed to be modified. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). This parameter is not allowed to be modified + +**Value Range**: Boolean, on, off. on indicates that DRC resources are only stored on the primary node, off indicates that resources are distributed, meaning each node needs to store a portion of DRC resources according to certain rules. + +**Default Value**: on + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: The `ss_enable_catalog_centralized` currently does not support modification and only supports using the default value. + +## ss_instance_id + +**Parameter Description**: Resource pooling instance ID, generated based on the DN specified by the user during installation, this parameter cannot be modified. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Default Value**: Integer. Value range 0~63. + +## ss_dss_vg_name + +**Parameter Description**: The volume group name specified by the user during installation, this parameter cannot be modified. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: String + +**Default Value**: The user-defined data volume group name during installation. + +## ss_dss_conn_path + +**Parameter Description**: The socket file path used by the DSS instance process, this parameter cannot be modified. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: String + +**Default Value**: “ UDS:${DSS_HOME}/.dss_unix_d_socket”, DSS_HOME is user-defined during installation. + +## ss_interconnect_channel_count + +**Parameter Description**: The number of MES communication channel connections. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Integer, 1~32 + +**Default Value**: 16 + +## ss_work_thread_count + +**Parameter Description**: The number of MES working threads. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Integer, 16~128 + +**Default Value**: 32 + +## ss_recv_msg_pool_size + +**Parameter Description**: The size of the MES message reception pool. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Integer, 1MB~1024MB, unit: MB + +**Default Value**: 16MB + +## ss_interconnect_type + +**Parameter Description**: The type of MES communication protocol. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: String type, TCP, RDMA + +**Default Value**: TCP + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: RDMA communication relies on CX5 network cards and depends on the OCK RDMA dynamic library. Ensure it is correctly configured before enabling. + +## ss_interconnect_url + +**Parameter Description**: The URL for MES communication between nodes. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: String, format: "node id:ip:port, node id:ip:port, ……." + +**Default Value**: User-defined during installation. + +## ss_rdma_work_config + +**Parameter Description**: RDMA user-space poll start and end CPU. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: String, "start cpu end cpu" (space-separated), e.g., "10 15" + +**Default Value**: "" + +## ss_ock_log_path + +**Parameter Description**: The path of the OCK RDMA log file. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: String + +**Default Value**: "" + +## ss_enable_scrlock + +**Parameter Description**: Whether to use scrlock. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Boolean, on, off. on indicates to enable the use of scrlock, off indicates not to enable. + +**Default Value**: off + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: Scrlock requires the use of CX5 network cards and depends on the OCK RDMA dynamic library. Ensure it is correctly configured before enabling. + +## ss_enable_scrlock_sleep_mode + +**Parameter Description**: Whether to enable scrlock's sleep mode. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Boolean, on, off. on indicates to enable the use of scrlock's sleep mode, off indicates not to enable. + +**Default Value**: on + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: Scrlock sleep mode will reduce the CPU occupancy of scrlock but will increase the latency of scrlock. + +## ss_scrlock_server_port + +**Parameter Description**: The listening port number of the scrlock server. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Integer, 0~65535 + +**Default Value**: 8000 + +## ss_scrlock_worker_count + +**Parameter Description**: The number of scrlock client workers. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Integer, 2~16 + +**Default Value**: 2 + +## ss_scrlock_worker_bind_core + +**Parameter Description**: Scrlock worker start and end CPU. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: String, "start cpu end cpu" (space-separated), e.g., "10 15" + +**Default Value**: "" + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +> - This parameter can be left blank, in which case scrlock workers are not bound to a CPU. +> - The number of start and end CPUs indicated by this parameter must be no less than 2. + +## ss_scrlock_server_bind_core + +**Parameter Description**: Scrlock server start and end CPU. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: String, "start cpu end cpu" (space-separated), e.g., "10 15" + +**Default Value**: "" + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: When this parameter is empty, it is equivalent to "0 0", meaning the scrlock server occupies CPU 0. + +## ss_log_level + +**Parameter Description**: The log level for resource pooling. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +The value 7 represents run logs, and 255 represents both debug logs and run logs being enabled. + +**Value Range**: Integer, 0~887. + +**Default Value**: 7 + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: In high-concurrency scenarios, it is not recommended to configure debug logs. If debug logs are configured, and other parameters use default values, message threads may frequently print logs. In such scenarios, there might be message thread busy handling CM query node status requests, which could lead to a timeout and thus display the standby node status as not normal. +> + +## ss_log_backup_file_count + +**Parameter Description**: The number of backup log files. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Integer, 0~1024. + +**Default Value**: 10 + +## ss_log_max_file_size + +**Parameter Description**: The maximum size of the log files. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Integer, 1024~4194304, units in kB. + +**Default Value**: 10MB (i.e., 10240 kB) + +## ss_enable_aio + +**Parameter Description**: Whether to enable the DSS asynchronous IO function to improve the speed at which dirty pages are flushed to disk under resource pooling. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value Range**: Boolean, on, off. on to enable, off to disable. + +**Default Value**: on \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/scheduled-task.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/scheduled-task.md index e627563e..480a3d94 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/scheduled-task.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/scheduled-task.md @@ -1,42 +1,42 @@ ---- -title: Scheduled Task -summary: Scheduled Task -author: Zhang Cuiping -date: 2021-06-15 ---- - -# Scheduled Task - -## job_queue_processes - -**Parameter description:** Specifies the number of jobs that can be concurrently executed. This parameter is a POSTMASTER parameter. You can set it using **gs_guc**, and you need to restart MogDB to make the setting take effect. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: 0 to 1000 - -Function: - -- Setting **job_queue_processes** to **0** indicates that the scheduled job function is disabled and that no job will be executed. (Enabling scheduled jobs may affect the system performance. At sites where this function is not required, you are advised to disable it.) -- Setting **job_queue_processes** to a value that is greater than **0** indicates that the scheduled job function is enabled and this value is the maximum number of jobs that can be concurrently processed. - -After the scheduled job function is enabled, the job_scheduler thread polls the **pg_job** system catalog at a scheduled interval. The scheduled job check is performed every second by default. - -Too many concurrent jobs consume many system resources, so you need to set the number of concurrent jobs to be processed. If the current number of concurrent jobs reaches the value of **job_queue_processes** and some of them expire, these jobs will be postponed to the next polling period. Therefore, you are advised to set the polling interval (the **Interval** parameter of the **submit** interface) based on the execution duration of each job to avoid the problem that jobs in the next polling period cannot be properly processed because of overlong job execution time. - -Note: If the number of concurrent jobs is large and the value is too small, these jobs will wait in queues. However, a large parameter value leads to large resource consumption. You are advised to set this parameter to **100** and change it based on the system resource condition. - -**Default value**: **10** - -## enable_prevent_job_task_startup - -**Parameter description**: Specifies whether to start the job thread. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: Boolean - -- **on** indicates that the job thread is not started. -- **off** indicates that the job thread is started. - -**Default value**: **off** +--- +title: Scheduled Task +summary: Scheduled Task +author: Zhang Cuiping +date: 2021-06-15 +--- + +# Scheduled Task + +## job_queue_processes + +**Parameter description:** Specifies the number of jobs that can be concurrently executed. This parameter is a POSTMASTER parameter. You can set it using **gs_guc**, and you need to restart MogDB to make the setting take effect. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: 0 to 1000 + +Function: + +- Setting **job_queue_processes** to **0** indicates that the scheduled job function is disabled and that no job will be executed. (Enabling scheduled jobs may affect the system performance. At sites where this function is not required, you are advised to disable it.) +- Setting **job_queue_processes** to a value that is greater than **0** indicates that the scheduled job function is enabled and this value is the maximum number of jobs that can be concurrently processed. + +After the scheduled job function is enabled, the job_scheduler thread polls the **pg_job** system catalog at a scheduled interval. The scheduled job check is performed every second by default. + +Too many concurrent jobs consume many system resources, so you need to set the number of concurrent jobs to be processed. If the current number of concurrent jobs reaches the value of **job_queue_processes** and some of them expire, these jobs will be postponed to the next polling period. Therefore, you are advised to set the polling interval (the **Interval** parameter of the **submit** interface) based on the execution duration of each job to avoid the problem that jobs in the next polling period cannot be properly processed because of overlong job execution time. + +Note: If the number of concurrent jobs is large and the value is too small, these jobs will wait in queues. However, a large parameter value leads to large resource consumption. You are advised to set this parameter to **100** and change it based on the system resource condition. + +**Default value**: **10** + +## enable_prevent_job_task_startup + +**Parameter description**: Specifies whether to start the job thread. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- **on** indicates that the job thread is not started. +- **off** indicates that the job thread is started. + +**Default value**: **off** diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/security-configuration.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/security-configuration.md index 69b5f602..165b006a 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/security-configuration.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/security-configuration.md @@ -1,94 +1,80 @@ ---- -title: Security Configuration -summary: Security Configuration -author: Zhang Cuiping -date: 2021-11-08 ---- - -# Security Configuration - -## elastic_search_ip_addr - -**Parameter description**: Specifies the IP address of the Elasticsearch system. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: a string - -**Default value**: **'https:127.0.0.1'** - -## enable_security_policy - -**Parameter description**: Specifies whether the unified audit and dynamic data masking policies take effect. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: Boolean - -**on**: The security policy is enabled. - -**off**: The security policy is disabled. - -**Default value**: **off** - -## use_elastic_search - -**Parameter description**: Specifies whether to send unified audit logs to Elasticsearch. If **enable_security_policy** and this parameter are enabled, unified audit logs are sent to Elasticsearch through HTTP or HTTPS (used by default). After this parameter is enabled, ensure that the Elasticsearch service corresponding to **elastic_search_ip_addr**can be properly connected. Otherwise, the process fails to be started. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: Boolean - -**on**: Unified audit logs are sent to Elasticsearch. - -**off**: Unified audit logs are not sent to Elasticsearch. - -**Default value**: **off** - -## is_sysadmin - -**Parameter description**: Specifies whether the current user is an initial user. - -This parameter is a fixed INTERNAL parameter and cannot be modified. - -**Value range**: Boolean - -**on** indicates that the user is an initial user. - -**off** indicates that the user is not an initial user. - -**Default value**: **off** - -## enable_tde - -**Parameter description**: Specifies whether to enable the TDE function. Set this parameter to **on** before creating an encrypted table. If this parameter is set to **off**, new encrypted tables cannot be created. The created encrypted table is decrypted only when data is read and is not encrypted when the data is written. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: Boolean - -**on**: The TDE function is enabled. - -**off**: The TDE function is disabled. - -**Default value**: **off** - -## tde_cmk_id - -**Parameter description**: Specifies the CMK ID of the database instance used by the TDE function. The ID is generated by KMS. The CMK of the database instance is used to encrypt the DEK. When the DEK needs to be decrypted, a request packet needs to be sent to KMS. The DEK ciphertext and the ID of the corresponding CMK are sent to KMS. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](./appendix.md). - -**Value range**: a string - -**Default value**: **""** - -## block_encryption_mode - -**Parameter description:** Specifies the block encryption mode used by the aes_encrypt and aes_decrypt functions for encryption and decryption. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](./appendix.md). - -**Value range:** enumerated values. Valid values are **aes-128-cbc**, **aes-192-cbc**, **aes-256-cbc**, **aes-128-cfb1**, **aes-192-cfb1**, **aes-256-cfb1**, **aes-128-cfb8**, **aes-192-cfb8**, **aes-256-cfb8**, **aes-128-cfb128**, **aes-192-cfb128**, **aes-256-cfb128**, **aes-128-ofb**, **aes-192-ofb**, and **aes-256-ofb**. **aes** indicates the encryption or decryption algorithm. **128**, **192**, and **256** indicate the key lengths (unit: bit). **cbc**, **cfb1**, **cfb8**, **cfb128**, **ofb** indicate the block encryption or decryption mode. - +--- +title: Security Configuration +summary: Security Configuration +author: Zhang Cuiping +date: 2021-11-08 +--- + +# Security Configuration + +## elastic_search_ip_addr + +**Parameter description**: Specifies the IP address of the Elasticsearch system. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: a string + +**Default value**: **'https:127.0.0.1'** + +## enable_security_policy + +**Parameter description**: Specifies whether the unified audit and dynamic data masking policies take effect. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +**on**: The security policy is enabled. + +**off**: The security policy is disabled. + +**Default value**: **off** + +## use_elastic_search + +**Parameter description**: Specifies whether to send unified audit logs to Elasticsearch. If **enable_security_policy** and this parameter are enabled, unified audit logs are sent to Elasticsearch through HTTP or HTTPS (used by default). After this parameter is enabled, ensure that the Elasticsearch service corresponding to **elastic_search_ip_addr**can be properly connected. Otherwise, the process fails to be started. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +**on**: Unified audit logs are sent to Elasticsearch. + +**off**: Unified audit logs are not sent to Elasticsearch. + +**Default value**: **off** + +## enable_tde + +**Parameter description**: Specifies whether to enable the TDE function. Set this parameter to **on** before creating an encrypted table. If this parameter is set to **off**, new encrypted tables cannot be created. The created encrypted table is decrypted only when data is read and is not encrypted when the data is written. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +**on**: The TDE function is enabled. + +**off**: The TDE function is disabled. + +**Default value**: **off** + +## tde_cmk_id + +**Parameter description**: Specifies the CMK ID of the database instance used by the TDE function. The ID is generated by KMS. The CMK of the database instance is used to encrypt the DEK. When the DEK needs to be decrypted, a request packet needs to be sent to KMS. The DEK ciphertext and the ID of the corresponding CMK are sent to KMS. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](./appendix.md). + +**Value range**: a string + +**Default value**: **""** + +## block_encryption_mode + +**Parameter description:** Specifies the block encryption mode used by the aes_encrypt and aes_decrypt functions for encryption and decryption. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](./appendix.md). + +**Value range:** enumerated values. Valid values are **aes-128-cbc**, **aes-192-cbc**, **aes-256-cbc**, **aes-128-cfb1**, **aes-192-cfb1**, **aes-256-cfb1**, **aes-128-cfb8**, **aes-192-cfb8**, **aes-256-cfb8**, **aes-128-cfb128**, **aes-192-cfb128**, **aes-256-cfb128**, **aes-128-ofb**, **aes-192-ofb**, and **aes-256-ofb**. **aes** indicates the encryption or decryption algorithm. **128**, **192**, and **256** indicate the key lengths (unit: bit). **cbc**, **cfb1**, **cfb8**, **cfb128**, **ofb** indicate the block encryption or decryption mode. + **Default value**: **aes-128-cbc** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/statistics-during-the-database-running/performance-statistics.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/statistics-during-the-database-running/performance-statistics.md index 7040baee..06883cf8 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/statistics-during-the-database-running/performance-statistics.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/statistics-during-the-database-running/performance-statistics.md @@ -1,35 +1,35 @@ ---- -title: Performance Statistics -summary: Performance Statistics -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Performance Statistics - -During the running of the database, the lock access, disk I/O operation, and invalid message processing are involved. All these operations are the bottleneck of the database performance. The performance statistics provided by MogDB can facilitate the performance fault location. - -## Generating Performance Statistics Logs - -**Parameter description**: For each query, the following four parameters record the performance statistics of corresponding modules in the server log: - -- The **og_parser_stats** parameter records the performance statistics of a parser in the server log. -- The **log_planner_stats** parameter records the performance statistics of a query optimizer in the server log. -- The **log_executor_stats** parameter records the performance statistics of an executor in the server log. -- The **log_statement_stats** parameter records the performance statistics of the whole statement in the server log. - -All these parameters can only provide assistant analysis for administrators, which are similar to the getrusage() of the Linux OS. - -These parameters are SUSET parameters. Set them based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - The **log_statement_stats** records the total statement statistics whereas other parameters record statistics only about their corresponding modules. -> - The **log_statement_stats** parameter cannot be enabled together with any parameter recording statistics about a module. - -**Value range**: Boolean - -- **on** indicates that performance statistics are recorded. -- **off** indicates that performance statistics are not recorded. - -**Default value**: **off** +--- +title: Performance Statistics +summary: Performance Statistics +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Performance Statistics + +During the running of the database, the lock access, disk I/O operation, and invalid message processing are involved. All these operations are the bottleneck of the database performance. The performance statistics provided by MogDB can facilitate the performance fault location. + +## Generating Performance Statistics Logs + +**Parameter description**: For each query, the following four parameters record the performance statistics of corresponding modules in the server log: + +- The **og_parser_stats** parameter records the performance statistics of a parser in the server log. +- The **log_planner_stats** parameter records the performance statistics of a query optimizer in the server log. +- The **log_executor_stats** parameter records the performance statistics of an executor in the server log. +- The **log_statement_stats** parameter records the performance statistics of the whole statement in the server log. + +All these parameters can only provide assistant analysis for administrators, which are similar to the getrusage() of the Linux OS. + +These parameters are SUSET parameters. Set them based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> +> - The **log_statement_stats** records the total statement statistics whereas other parameters record statistics only about their corresponding modules. +> - The **log_statement_stats** parameter cannot be enabled together with any parameter recording statistics about a module. + +**Value range**: Boolean + +- **on** indicates that performance statistics are recorded. +- **off** indicates that performance statistics are not recorded. + +**Default value**: **off** diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md index 937c2530..e2342067 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md @@ -1,141 +1,155 @@ ---- -title: Query and Index Statistics Collector -summary: Query and Index Statistics Collector -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Query and Index Statistics Collector - -The query and index statistics collector is used to collect statistics during database running. The statistics include the times of inserting and updating a table and index, the number of disk blocks and tuples, and the time required for the last cleanup and analysis on each table. The statistics can be viewed by querying system view families pg_stats and pg_statistic. The following parameters are used to set the statistics collection feature in the server scope. - -## track_activities - -**Parameter description:** Collects statistics about the commands that are being executed in session. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the statistics collection function is enabled. -- **off** indicates that the statistics collection function is disabled. - -**Default value**: **on** - -## track_counts - -**Parameter description:** Collects statistics about database activities. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the statistics collection function is enabled. -- **off** indicates that the statistics collection function is disabled. - -**Default value**: **on** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**NOTE:** -> -> When the database to be cleaned up is selected from the AutoVacuum automatic cleanup process, the database statistics are required. In this case, the default value is set to **on**. - -## track_io_timing - -**Parameter description:** Collects statistics about I/O timing in the database. The I/O timing statistics can be queried by using the **pg_stat_database** parameter. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- If this parameter is set to **on**, the collection function is enabled. In this case, the collector repeatedly queries the operating system at the current time. As a result, large number of costs may occur on some platforms. Therefore, the default value is set to **off**. -- **off** indicates that the statistics collection function is disabled. - -**Default value**: **off** - -## track_functions - -**Parameter description:** Collects statistics of the number and duration of function invocations. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> When the SQL functions are set to inline functions queried by the invoking, these SQL functions cannot be traced no matter these functions are set or not. - -**Value range**: enumerated values - -- **pl** indicates that only procedural language functions are traced. -- **all** indicates that SQL language functions area traced. -- **none** indicates that the function tracing function is disabled. - -**Default value**: **none** - -## track_activity_query_size - -**Parameter description**: Specifies byte counts of the current running commands used to trace each active session. - -This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: an integer ranging from 100 to 102400 - -**Default value:** **1024** - -## stats_temp_directory - -**Parameter description**: Specifies the directory for saving temporary statistics. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> If a RAM-based file system directory is used, the actual I/O cost can be lowered and the performance can be improved. - -**Value range**: a string - -**Default value**: **pg_stat_tmp** - -## track_thread_wait_status_interval - -**Parameter description**: Specifies the interval of collecting the thread status information. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: 0 to 1 day. The unit is min. - -**Default value**: **30min** - -## enable_save_datachanged_timestamp - -**Parameter description**: Specifies whether to record the time when **INSERT**, **UPDATE**, **DELETE**, or **EXCHANGE**/**TRUNCATE**/**DROP** **PARTITION** is performed on table data. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the time when an operation is performed on table data will be recorded. -- **off** indicates that the time when an operation is performed on table data will not be recorded. - -**Default value**: **on** - -## track_sql_count - -**Parameter description**: Collects statistics about the statements (**SELECT**, **INSERT**, **UPDATE**, **MERGE INTO**, and **DELETE**) that are being executed in a session. - -In the x86-based centralized deployment scenario, the hardware configuration specifications are 32-core CPU and 256 GB memory. When the Benchmark SQL 5.0 tool is used to test performance, the performance fluctuates by about 0.8% by enabling or disabling this parameter. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the statistics collection function is enabled. -- **off** indicates that the auditing function is disabled. - -**Default value: on** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - The **track_sql_count** parameter is restricted by the **track_activities** parameter when the **gs_sql_count** or **pgxc_sql_count** view is queried. -> - If **track_activities** is set to **on** and **track_sql_count** is set to **off**, a warning message indicating that **track_sql_count** is disabled will be displayed in logs when the **gs_sql_count**view is queried. -> - If both **track_activities** and **track_sql_count** are set to **off**, two warning messages indicating that **track_activities** is disabled and **track_sql_count** is disabled will be displayed in logs when the views are queried. -> - If **track_activities** is set to **off** and **track_sql_count** is set to **on**, a warning message indicating that **track_activities** is disabled will be displayed in logs when the views are queried. -> - If **track_sql_count** is set to **off**, querying the **gs_sql_count** or **pgxc_sql_count** view returns **0**. +--- +title: Query and Index Statistics Collector +summary: Query and Index Statistics Collector +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Query and Index Statistics Collector + +The query and index statistics collector is used to collect statistics during database running. The statistics include the times of inserting and updating a table and index, the number of disk blocks and tuples, and the time required for the last cleanup and analysis on each table. The statistics can be viewed by querying system view families pg_stats and pg_statistic. The following parameters are used to set the statistics collection feature in the server scope. + +## track_activities + +**Parameter description:** Collects statistics about the commands that are being executed in session. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the statistics collection function is enabled. +- **off** indicates that the statistics collection function is disabled. + +**Default value**: **on** + +## track_counts + +**Parameter description:** Collects statistics about database activities. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the statistics collection function is enabled. +- **off** indicates that the statistics collection function is disabled. + +**Default value**: **on** + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**NOTE:** +> +> When the database to be cleaned up is selected from the AutoVacuum automatic cleanup process, the database statistics are required. In this case, the default value is set to **on**. + +## track_io_timing + +**Parameter description:** Collects statistics about I/O timing in the database. The I/O timing statistics can be queried by using the **pg_stat_database** parameter. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- If this parameter is set to **on**, the collection function is enabled. In this case, the collector repeatedly queries the operating system at the current time. As a result, large number of costs may occur on some platforms. Therefore, the default value is set to **off**. +- **off** indicates that the statistics collection function is disabled. + +**Default value**: **off** + +## track_functions + +**Parameter description:** Collects statistics of the number and duration of function invocations. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> +> When the SQL functions are set to inline functions queried by the invoking, these SQL functions cannot be traced no matter these functions are set or not. + +**Value range**: enumerated values + +- **pl** indicates that only procedural language functions are traced. +- **all** indicates that SQL language functions area traced. +- **none** indicates that the function tracing function is disabled. + +**Default value**: **none** + +## track_activity_query_size + +**Parameter description**: Specifies byte counts of the current running commands used to trace each active session. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: an integer ranging from 100 to 102400 + +**Default value:** **1024** + +## stats_temp_directory + +**Parameter description**: Specifies the directory for saving temporary statistics. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> +> If a RAM-based file system directory is used, the actual I/O cost can be lowered and the performance can be improved. + +**Value range**: a string + +**Default value**: **pg_stat_tmp** + +## track_thread_wait_status_interval + +**Parameter description**: Specifies the interval of collecting the thread status information. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: 0 to 1 day. The unit is min. + +**Default value**: **30min** + +## enable_save_datachanged_timestamp + +**Parameter description**: Specifies whether to record the time when **INSERT**, **UPDATE**, **DELETE**, or **EXCHANGE**/**TRUNCATE**/**DROP** **PARTITION** is performed on table data. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the time when an operation is performed on table data will be recorded. +- **off** indicates that the time when an operation is performed on table data will not be recorded. + +**Default value**: **on** + +## track_sql_count + +**Parameter description**: Collects statistics about the statements (**SELECT**, **INSERT**, **UPDATE**, **MERGE INTO**, and **DELETE**) that are being executed in a session. + +In the x86-based centralized deployment scenario, the hardware configuration specifications are 32-core CPU and 256 GB memory. When the Benchmark SQL 5.0 tool is used to test performance, the performance fluctuates by about 0.8% by enabling or disabling this parameter. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the statistics collection function is enabled. +- **off** indicates that the auditing function is disabled. + +**Default value: on** + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> +> - The **track_sql_count** parameter is restricted by the **track_activities** parameter when the **gs_sql_count** or **pgxc_sql_count** view is queried. +> - If **track_activities** is set to **on** and **track_sql_count** is set to **off**, a warning message indicating that **track_sql_count** is disabled will be displayed in logs when the **gs_sql_count**view is queried. +> - If both **track_activities** and **track_sql_count** are set to **off**, two warning messages indicating that **track_activities** is disabled and **track_sql_count** is disabled will be displayed in logs when the views are queried. +> - If **track_activities** is set to **off** and **track_sql_count** is set to **on**, a warning message indicating that **track_activities** is disabled will be displayed in logs when the views are queried. +> - If **track_sql_count** is set to **off**, querying the **gs_sql_count** or **pgxc_sql_count** view returns **0**. + +## update_process_title + +**Parameter description**: Controls the collection of statistics on process name updates that occur each time the server receives a new SQL statement. + +This parameter is a INTERNAL parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on indicates that the collection function is enabled. + +- off indicates that the collection function is disabled. + +**Default value**: off diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/statistics-during-the-database-running/statistics-during-the-database-running.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/statistics-during-the-database-running/statistics-during-the-database-running.md index 96d41a3d..b26f2e3a 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/statistics-during-the-database-running/statistics-during-the-database-running.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/statistics-during-the-database-running/statistics-during-the-database-running.md @@ -1,11 +1,11 @@ ---- -title: Statistics During the Database Running -summary: Statistics During the Database Running -author: zhang cuiping -date: 2023-04-07 ---- - -# Statistics During the Database Running - -- **[Query and Index Statistics Collector](query-and-index-statistics-collector.md)** +--- +title: Statistics During the Database Running +summary: Statistics During the Database Running +author: zhang cuiping +date: 2023-04-07 +--- + +# Statistics During the Database Running + +- **[Query and Index Statistics Collector](query-and-index-statistics-collector.md)** - **[Performance Statistics](performance-statistics.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/thread-pool.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/thread-pool.md index 0edf98f0..ab69fb0a 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/thread-pool.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/thread-pool.md @@ -1,40 +1,238 @@ ---- -title: Thread Pool -summary: Thread Pool -author: Zhang Cuiping -date: 2021-06-15 ---- - -# Thread Pool - -## enable_thread_pool - -**Parameter description**: Specifies whether to enable the thread pool function. This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: Boolean - -- **on** indicates that the thread pool function is enabled. -- **off** indicates that the thread pool function is disabled. - -**Note**:The HA port is required for logical replication to connect to the database when thread pool is enabled. - -**Default value**: off - -## thread_pool_attr - -**Parameter description**: Specifies the detailed attributes of the thread pool function. This parameter is valid only when **enable_thread_pool** is set to **on**. This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: a string, consisting of one or more characters. - -This parameter consists of three parts: thread_num, group_num, and cpubind_info. The meanings of the three parts are as follows: - -- **thread_num** indicates the total number of threads in the thread pool. The value ranges from 0 to 4096. The value **0** indicates that the database automatically configures the number of threads in the thread pool based on the number of CPU cores. If the value is greater than **0**, the number of threads in the thread pool is the same as the value of **thread_num**. -- **group_num** indicates the number of thread groups in the thread pool. The value ranges from 0 to 64. The value **0** indicates that the database automatically configures the number of thread groups in the thread pool based on the number of NUMA groups. If the value is greater than **0**, the number of thread groups in the thread pool is the same as the value of **group_num**. -- **cpubind_info** indicates whether the thread pool is bound to a core. The available configuration modes are as follows: - 1. **'(nobind)'**: The thread is not bound to a core. - 2. **'(allbind)'**: Use all CPU cores that can be queried in the current system to bind threads. - 3. **'(nodebind: 1, 2)'**: Use the CPU cores in NUMA groups 1 and 2 to bind threads. - 4. **'(cpubind: 0-30)'**: Use the CPU cores 0 to 30 to bind threads. This parameter is case-insensitive. - 5. **'(numabind: 0-30)'**: Use CPU cores 0 to 30 in the NUMA group to bind threads. This parameter is case-insensitive. - -**Default value**: **'16, 2, (nobind)'** +--- +title: Thread Pool +summary: Thread Pool +author: Zhang Cuiping +date: 2021-06-15 +--- + +# Thread Pool + +## enable_thread_pool + +**Parameter description**: Specifies whether to enable the thread pool function. This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- **on** indicates that the thread pool function is enabled. +- **off** indicates that the thread pool function is disabled. + +**Default value**: off + +## thread_pool_attr + +**Parameter description**: Specifies the detailed attributes of the thread pool function. This parameter is valid only when **enable_thread_pool** is set to **on**. This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: a string, consisting of one or more characters. + +This parameter consists of three parts: thread_num, group_num, and cpubind_info. The meanings of the three parts are as follows: + +- **thread_num** indicates the total number of threads in the thread pool. The value ranges from 0 to 4096. The value **0** indicates that the database automatically configures the number of threads in the thread pool based on the number of CPU cores. If the value is greater than **0**, the number of threads in the thread pool is the same as the value of **thread_num**. +- **group_num** indicates the number of thread groups in the thread pool. The value ranges from 0 to 64. The value **0** indicates that the database automatically configures the number of thread groups in the thread pool based on the number of NUMA groups. If the value is greater than **0**, the number of thread groups in the thread pool is the same as the value of **group_num**. +- **cpubind_info** indicates whether the thread pool is bound to a core. The available configuration modes are as follows: + 1. **'(nobind)'**: The thread is not bound to a core. + 2. **'(allbind)'**: Use all CPU cores that can be queried in the current system to bind threads. + 3. **'(nodebind: 1, 2)'**: Use the CPU cores in NUMA groups 1 and 2 to bind threads. + 4. **'(cpubind: 0-30)'**: Use the CPU cores 0 to 30 to bind threads. This parameter is case-insensitive. + 5. **'(numabind: 0-30)'**: Use CPU cores 0 to 30 in the NUMA group to bind threads. This parameter is case-insensitive. + +**Default value**: **'16, 2, (nobind)'** + +## thread_pool_stream_attr + +**Parameter description**: Specifies the detailed attributes of the stream thread pool function. This parameter is valid only when **enable_thread_pool** is set to **on**. Only the **sysadmin** user can access this parameter. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: String, length greater than 0 + +This parameter is divided into 4 parts, 'stream_thread_num, stream_proc_ratio ,group_num ,cpubind_info', the specific meanings of these 4 parts are as follows: + +- **stream_thread_num** indicates the total number of threads in the stream thread pool. The value ranges from 0 to 4096. The value **0** indicates that the database automatically configures the number of threads in the thread pool based on the number of CPU cores. If the value is greater than **0**, the number of threads in the thread pool is the same as the value of **stream_thread_num**. You are advised to set the thread pool size based on the hardware configuration. The formula is as follows: Value of **stream_thread_num** = Number of CPU cores x 3–5. The maximum value of **stream_thread_num** is **4096**. +- **stream_proc_ratio** indicates the ratio of proc resources reserved for stream threads. The value is a floating point number. The default value is **0.2**. The reserved proc resources are calculated as follows: **stream_proc_ratio** x **stream_thread_num**. +- **group_num** indicates the number of thread groups in the thread pool, the value range is 0~64, where 0 means that the database automatically configures the number of thread groups in the thread pool according to the number of NUMA groups in the system, and if the value of the parameter is greater than 0, the number of thread groups in the pool is equal to group_num. thread_pool_stream_attr's group_num should be consistent with the group_num configuration and usage of thread_pool_attr. group_num of thread_pool_attr needs to be consistent with the configuration and usage of group_num of thread_pool_attr. If it is set to a different value, the group_num of thread_pool_attr prevails. +- **cpubind_info** indicates the configuration parameter for whether the thread pool is core-bound or not. Optional configuration methods are: 1. '(nobind)', threads do not bind cores; 2. '(allbind)', utilize all CPU cores that can be queried by the current system to bind cores for threads; 3. '(nodebind: 1, 2)', utilize CPU cores in NUMA groups 1,2 to bind cores; 4. '(cpubind: 0-30) ', utilize CPU cores 0-30 for core binding; 5. '(numabind: 0-30)', utilize CPU cores 0-30 within NUMA group for core binding. This parameter is not case-sensitive. cpubind_info of thread_pool_stream_attr needs to be consistent with the configuration and usage of cpubind_info of thread_pool_attr. If it is set to a different value, cpubind_info of thread_pool_attr prevails. + +**Default value**: + +stream_thread_num: 16 + +stream_proc_ratio: 0.2 + +group_num, cpubind_info + +## resilience_threadpool_reject_cond + +**Parameter description**: Used to control the percentage of stacked sessions for thread pool overload escapes. This parameter only takes effect when the GUC parameters use_workload_manager and enable_thread_pool are turned on. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: String, length greater than 0 + +This parameter is divided into recover_threadpool_percent, overload_threadpool_percent 2 parts, the specific meaning of these 2 parts is as follows: + +- recover_threadpool_percent: the percentage of the number of threads initially set for the thread pool that the thread pool returns to the normal state, when the number of sessions that have been accessed is less than the percentage corresponding to the number of threads initially set for the thread pool multiplied by the value, the overload escape will be stopped and new connections will be released to access the thread pool, the value ranges from 0 to INT_MAX, the value is set to a certain number, which means how many percent. +- overload_threadpool_percent: the percentage of the accessed sessions in the overloaded thread pool, when the number of accessed sessions is greater than the initial number of thread pool multiplied by the corresponding percentage of the value, it means that the current thread pool has been overloaded, and the overload escape kill session is triggered, and the access of new connections is prohibited, the value is 0~INT_MAX, and the value is set to how much indicates the percentage of new connections. MAX, set to how much indicates how many percent. + +**Default value**: '0,0' to disable thread pool escape. + +**Example**: + +``` +resilience_threadpool_reject_cond = '100,200' +``` + +Indicates that the number of stacked sessions exceeds 200% of the initial number of threads set in the thread pool and then prohibits new connections and kills the stacked sessions, and stops killing sessions and allows new connections when the number of sessions returns to less than 100% of the initial number of threads set in the thread pool during the kill session. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: +> +> - The number of sessions that have been piled up can be obtained by querying the pg_stat_activity view for how many pieces of data are available, and a small number of background threads need to be filtered; the number of threads in the thread pool set for the initial trial thread pool can be obtained by querying the thread_pool_attr parameter. +> - If the percentage of this parameter is set too small, the thread pool overload escape process will be triggered frequently, which will make the executing session be forced to exit, and the new connection fails to be accessed for a short period of time, so it needs to be set carefully according to the actual thread pool usage. + +## enable_ios + +**Parameter description**: Controls whether the IOS service is enabled. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- on means enabled. +- off means disabled. + +**Default value**: off + +## enable_heap_async_prefetch + +**Parameter description**: Controls whether pre-fetching is enabled for Astore Full Table Scan class scenarios. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- on means enabled. +- off means disabled. + +**Default value**: off (This parameter is invalid when enable_ios = off.) + +## enable_uheap_async_prefetch + +**Parameter description**: Controls whether pre-fetching is enabled for Ustore Full Table Scan class scenarios. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- on means enabled. +- off means disabled. + +**Default value**: off (This parameter is invalid when enable_ios = off.) + +## ios_worker_num + +**Parameter description**: The number of ios_workers inside the IOS thread pool. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Integer. The minimum value is 1 and the maximum value is 100. + +**Default value**: 4 + +## parallel_scan_gap + +**Parameter description**: The number of pages processed in a single pass per worker thread when parallel scanning is enabled (query_dop > 1). + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Integer. The minimum value is 64 and the maximum value is 4096. + +**Default value**: 128 + +## ios_batch_read_size + +**Parameter description**: The number of pre-fetch pages that ios_worker sends down to disk per batch. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Integer. The minimum value is 64 and the maximum value is 1024. + +**Default value**: 64 + +## max_requests_per_worker + +**Parameter description**: The maximum queue depth for each ios_worker. When exceeded, the ios_worker thread cannot accept new requests until one of the ios_workers has been processed and moved out of the task queue. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Integer. The minimum value is 1 and the maximum value is 10. + +**Default value**: 2 + +## min_table_block_num_enable_ios + +**Parameter description**: The Astore table size threshold for triggering a pre-fetch. A pre-fetch may be triggered only if the total number of data pages in the table is greater than or equal to this threshold. The current data page size is 8kB. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Integer. The minimum value is 65536 (i.e. 512MB) and the maximum value is 6553600 (i.e. 512GB). + +**Default value**: 131072(1GB) + +## min_uheap_table_block_num_enable_ios + +**Parameter description**: The Ustore table size threshold for triggering a pre-fetch. Pre-fetch may be triggered only if the total number of data pages in the table is greater than or equal to this threshold. The current data page size is 8kB. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Integer. The minimum value is 65536 (i.e. 512MB) and the maximum value is 6553600 (i.e. 512GB). + +**Default value**: 131072(1GB) + +## prefetch_protect_time + +**Parameter description**: Pre-fetch buffer maximum protection time. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Integer. The unit is milliseconds. The minimum value is 100 and the maximum value is 10000. + +**Default value**: 500 + +## ios_status_update_gap + +**Parameter description**: The time interval for updating the IOS performance status. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Integer. The unit is second. The minimum value is 1 and the maximum value is 100. + +**Default value**: 1 + +## thread_pool_committer_max_retry_count + +**Parameter description**: Sets the maximum number of retries before the thread pool committer sleeps. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Integer, 1-2147483647 + +**Default value**: 10 + +## thread_pool_committerctl_max_retry_count + +**Parameter description**: Sets the maximum number of retries before the thread pool committerctl sleeps. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Integer, 1-2147483647 + +**Default value**: 10 + +## thread_pool_worker_num_per_committer + +**Parameter description**: Sets the ratio of the number of worker threads to the number of committer threads in the thread pool. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Integer, 1-2147483647 + +**Default value**: 8 diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/upgrade-parameters.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/upgrade-parameters.md index f455408b..c4d1cef0 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/upgrade-parameters.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/upgrade-parameters.md @@ -1,47 +1,24 @@ ---- -title: Upgrade Parameters -summary: Upgrade Parameters -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Upgrade Parameters - -## IsInplaceUpgrade - -**Parameter description**: Specifies whether an upgrade is ongoing. This parameter cannot be modified by users. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: Boolean - -- **on** indicates an upgrade is ongoing. -- **off** indicates no upgrade is ongoing. - -**Default value**: **off** - -## inplace_upgrade_next_system_object_oids - -**Parameter description**: Indicates the OID of a new system object during the in-place upgrade. This parameter cannot be modified by users. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: a string - -**Default value**: empty - -## upgrade_mode - -**Parameter description**: Specifies the upgrade mode. - -This parameter is a fixed INTERNAL parameter and cannot be modified. - -**Value range**: an integer ranging from 0 to *INT_MAX* - -- **0** indicates that no upgrade is ongoing. -- **1** indicates that a local upgrade is ongoing. -- **2** indicates that a grayscale upgrade is ongoing. - -**Default value**: **0** - +--- +title: Upgrade Parameters +summary: Upgrade Parameters +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Upgrade Parameters + +## upgrade_mode + +**Parameter description**: Specifies the upgrade mode. + +This parameter is a fixed INTERNAL parameter and cannot be modified. + +**Value range**: an integer ranging from 0 to *INT_MAX* + +- **0** indicates that no upgrade is ongoing. +- **1** indicates that a local upgrade is ongoing. +- **2** indicates that a grayscale upgrade is ongoing. + +**Default value**: **0** + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Special case: When the gray upgrade is used, if the major version upgrade policy is selected, that is, the upgrade script needs to be executed and the binary package needs to be replaced, the value of **upgrade_mode** is set to **2**; If the minor version upgrade policy is selected, that is, only the binary package needs to be replaced, the value of **upgrade_mode** is not set to **2**. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md index c57d178b..31e5b5a0 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md @@ -1,148 +1,148 @@ ---- -title: Compatibility with Earlier Versions -summary: Compatibility with Earlier Versions -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Compatibility with Earlier Versions - -This section describes the parameter control of the downward compatibility and external compatibility features of the MogDB database. A backward compatible database supports applications of earlier versions. This section describes parameters used for controlling backward compatibility of a database. - -## array_nulls - -**Parameter description**: controls whether the array input parser recognizes unquoted NULL as a null array element. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that null values can be entered in arrays. -- **off** indicates backward compatibility with the old behavior. Arrays containing **NULL** values can still be created when this parameter is set to **off**. - -**Default value**: **on** - -## backslash_quote - -**Parameter description**: controls whether a single quotation mark can be represented by \' in a string text. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> When the string text meets the SQL standards, \ has no other meanings. This parameter only affects the handling of non-standard-conforming string texts, including escape string syntax (E'…'). - -**Value range**: enumerated values - -- **on** indicates that the use of \' is always allowed. -- **off** indicates that the use of \' is rejected. -- **safe_encoding** indicates that the use of \' is allowed only when client encoding does not allow ASCII \ within a multibyte character. - -**Default value**: **safe_encoding** - -## escape_string_warning - -**Parameter description**: specifies a warning on directly using a backslash () as an escape in an ordinary string. - -- Applications that wish to use a backslash () as an escape need to be modified to use escape string syntax (E'…'). This is because the default behavior of ordinary strings is now to treat the backslash as an ordinary character in each SQL standard. -- This variable can be enabled to help locate codes that need to be changed. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -**Default value**: **on** - -## lo_compat_privileges - -**Parameter description**: Specifies whether to enable backward compatibility for the privilege check of large objects. - -This parameter is a SUSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -**on** indicates that the privilege check is disabled when users read or modify large objects. This setting is compatible with versions earlier than PostgreSQL 9.0. - -**Default value**: **off** - -## quote_all_identifiers - -**Parameter description:** When the database generates SQL, this parameter forcibly quotes all identifiers even if they are not keywords. This will affect the output of EXPLAIN as well as the results of functions, such as pg_get_viewdef. For details, see the **--quote-all-identifiers** parameter of **gs_dump**. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates the forcible quotation function is enabled. -- **off** indicates the forcible quotation function is disabled. - -**Default value**: **off** - -## sql_inheritance - -**Parameter description**: Specifies whether to inherit semantics. This parameter specifies the access policy of descendant tables. **off** indicates that subtables cannot be accessed by commands. That is, the ONLY keyword is used by default. This setting is compatible with earlier versions. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the subtable can be accessed. -- **off** indicates that the subtable cannot be accessed. - -**Default value**: **on** - -## standard_conforming_strings - -**Parameter description**: Specifies whether ordinary string texts ('…') treat backslashes as ordinary texts as specified in the SQL standard. - -- Applications can check this parameter to determine how string texts will be processed. -- It is recommended that characters be escaped by using the escape string syntax (E'…'). - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the function is enabled. -- **off** indicates that the function is disabled. - -**Default value**: **on** - -## synchronize_seqscans - -**Parameter description**: Specifies sequential scans of tables to synchronize with each other. Concurrent scans read the same data block about at the same time and share the I/O workload. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that a scan may start in the middle of the table and then "wrap around" the end to cover all rows to synchronize with the activity of scans already in progress. This may result in unpredictable changes in the row ordering returned by queries that have no ORDER BY clause. -- **off** indicates that the scan always starts from the table heading. - -**Default value**: **on** - -## enable_beta_features - -**Parameter description**: Specifies whether to enable some features that are not officially released and are used only for POC verification. Exercise caution when enabling these extended features because they may cause errors in some scenarios. This parameter usually does not take effect in every LTS version, such as version 2.0 and 3.0; this parameter usually takes effect in every preview version, such as 2.1 and future 3.1. there are no features affected by this parameter in MogDB version 3.0. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that the features are enabled and forward compatible, but may incur errors in certain scenarios. -- **off** indicates that the features are disabled. - -**Default value**: **off** - -## default_with_oids - -**Parameter description**: Specifies whether **CREATE TABLE** and **CREATE TABLE AS** include an **OID** field in newly-created tables if neither **WITH OIDS** nor **WITHOUT OIDS** is specified. It also determines whether OIDs will be included in tables created by **SELECT INTO**. - -It is not recommended that OIDs be used in user tables. Therefore, this parameter is set to **off** by default. When OIDs are required for a particular table, **WITH OIDS** needs to be specified during the table creation. - -This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: Boolean - -- **on** indicates that **CREATE TABLE** and **CREATE TABLE AS** can include an **OID** field in newly-created tables. -- **off** indicates that **CREATE TABLE** and **CREATE TABLE AS** cannot include any OID field in newly-created tables. - +--- +title: Compatibility with Earlier Versions +summary: Compatibility with Earlier Versions +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Compatibility with Earlier Versions + +This section describes the parameter control of the downward compatibility and external compatibility features of the MogDB database. A backward compatible database supports applications of earlier versions. This section describes parameters used for controlling backward compatibility of a database. + +## array_nulls + +**Parameter description**: controls whether the array input parser recognizes unquoted NULL as a null array element. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that null values can be entered in arrays. +- **off** indicates backward compatibility with the old behavior. Arrays containing **NULL** values can still be created when this parameter is set to **off**. + +**Default value**: **on** + +## backslash_quote + +**Parameter description**: controls whether a single quotation mark can be represented by \' in a string text. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> When the string text meets the SQL standards, \ has no other meanings. This parameter only affects the handling of non-standard-conforming string texts, including escape string syntax (E'…'). + +**Value range**: enumerated values + +- **on** indicates that the use of \' is always allowed. +- **off** indicates that the use of \' is rejected. +- **safe_encoding** indicates that the use of \' is allowed only when client encoding does not allow ASCII \ within a multibyte character. + +**Default value**: **safe_encoding** + +## escape_string_warning + +**Parameter description**: specifies a warning on directly using a backslash () as an escape in an ordinary string. + +- Applications that wish to use a backslash () as an escape need to be modified to use escape string syntax (E'…'). This is because the default behavior of ordinary strings is now to treat the backslash as an ordinary character in each SQL standard. +- This variable can be enabled to help locate codes that need to be changed. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +**Default value**: **on** + +## lo_compat_privileges + +**Parameter description**: Specifies whether to enable backward compatibility for the privilege check of large objects. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +**on** indicates that the privilege check is disabled when users read or modify large objects. This setting is compatible with versions earlier than PostgreSQL 9.0. + +**Default value**: **off** + +## quote_all_identifiers + +**Parameter description:** When the database generates SQL, this parameter forcibly quotes all identifiers even if they are not keywords. This will affect the output of EXPLAIN as well as the results of functions, such as pg_get_viewdef. For details, see the **--quote-all-identifiers** parameter of **gs_dump**. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates the forcible quotation function is enabled. +- **off** indicates the forcible quotation function is disabled. + +**Default value**: **off** + +## sql_inheritance + +**Parameter description**: Specifies whether to inherit semantics. This parameter specifies the access policy of descendant tables. **off** indicates that subtables cannot be accessed by commands. That is, the ONLY keyword is used by default. This setting is compatible with earlier versions. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the subtable can be accessed. +- **off** indicates that the subtable cannot be accessed. + +**Default value**: **on** + +## standard_conforming_strings + +**Parameter description**: Specifies whether ordinary string texts ('…') treat backslashes as ordinary texts as specified in the SQL standard. + +- Applications can check this parameter to determine how string texts will be processed. +- It is recommended that characters be escaped by using the escape string syntax (E'…'). + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the function is enabled. +- **off** indicates that the function is disabled. + +**Default value**: **on** + +## synchronize_seqscans + +**Parameter description**: Specifies sequential scans of tables to synchronize with each other. Concurrent scans read the same data block about at the same time and share the I/O workload. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that a scan may start in the middle of the table and then "wrap around" the end to cover all rows to synchronize with the activity of scans already in progress. This may result in unpredictable changes in the row ordering returned by queries that have no ORDER BY clause. +- **off** indicates that the scan always starts from the table heading. + +**Default value**: **on** + +## enable_beta_features + +**Parameter description**: Specifies whether to enable some features that are not officially released and are used only for POC verification. Exercise caution when enabling these extended features because they may cause errors in some scenarios. This parameter usually does not take effect in every LTS version, such as version 2.0 and 3.0; this parameter usually takes effect in every preview version, such as 2.1 and future 3.1. there are no features affected by this parameter in MogDB version 3.0. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that the features are enabled and forward compatible, but may incur errors in certain scenarios. +- **off** indicates that the features are disabled. + +**Default value**: **off** + +## default_with_oids + +**Parameter description**: Specifies whether **CREATE TABLE** and **CREATE TABLE AS** include an **OID** field in newly-created tables if neither **WITH OIDS** nor **WITHOUT OIDS** is specified. It also determines whether OIDs will be included in tables created by **SELECT INTO**. + +It is not recommended that OIDs be used in user tables. Therefore, this parameter is set to **off** by default. When OIDs are required for a particular table, **WITH OIDS** needs to be specified during the table creation. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- **on** indicates that **CREATE TABLE** and **CREATE TABLE AS** can include an **OID** field in newly-created tables. +- **off** indicates that **CREATE TABLE** and **CREATE TABLE AS** cannot include any OID field in newly-created tables. + **Default value**: **off** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md index 758bee5a..23db400d 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md @@ -34,6 +34,18 @@ This parameter is a USERSET parameter. Set it based on instructions provided in **Default value**: **DD-Mon-YYYY HH:MI:SS.FF AM** +## group_concat_max_len + +**Parameter description**: This parameter is used with the function GROUP_CONCAT to limit the length of its return value and truncate it if it is too long. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: 0-9223372036854775807 + +**Default value**: 1024 + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: The maximum length that can be returned is 1073741823, beyond which there will be an out of memory error. + ## max_function_args **Parameter description**: Specifies the maximum number of parameters allowed for a function. @@ -96,6 +108,30 @@ This parameter is an INTERNAL parameter. It can be viewed but cannot be modified > - This parameter can be set only by dbcompatibility when you run the [CREATE DATABASE](../../../reference-guide/sql-syntax/CREATE-DATABASE.md) command to create a database. > - In the database, this parameter must be set to a specific value. It can be set to **A** or **B** and cannot be changed randomly. Otherwise, the setting is not consistent with the database behavior. +## b_format_behavior_compat_options + +**Parameter description**: Database B mode compatibility behavior configuration items, the value of this parameter consists of several configuration items separated by commas. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: String + +**Default value**: "" + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note:** +> +> - Currently only supported [Table 1 Compatibility B Mode Configuration Items](#bmode). +> - When configuring multiple compatibility configurations, adjacent configurations are separated by commas, e.g.: set b_format_behavior_compat_options='enable_set_variables,set_session_transaction'; + +**Table 1** Compatibility B Mode Configuration Items + +| Configuration Item | Compatibility Behavior | +| ----------------------- | ------------------------------------------------------------ | +| enable_set_variables | The set syntax enhances control switch.
- When this configuration is not set, the set custom variable, set [global \|session] syntax is not supported.
- Setting this configuration supports the above syntax in B-compatible mode, e.g. *set @v1 = 1;*。 | +| set_session_transaction | set session transaction control switch.
- When this configuration is not set, set session transaction is equivalent to set local transaction.
- When this configuration is set, B-compatible mode is supported using the above syntax to modify the current session transaction characteristics. | +| enable_modify_column | ALTER TABLE MODIFY semantic control switch.
- Without this configuration, “ALTER TABLE table_name MODIFY column_name data_type;” modifies only the data type of the column.
- When setting this configuration, “ALTER TABLE table_name MODIFY column_name data_type;” modifies the entire column definition. | +| default_collation | Default character order forward compatibility switch.
- When this configuration is not set, the field is in default character order when the character set or character order of the character type field is not explicitly specified and the table-level character order is also empty.
- When this configuration is set, the character order of the character type field inherits the table-level character order when the table-level character order is not empty, and is set to the default character order corresponding to the database code when it is empty. | + ## enable_set_variables_b_format **Parameter description:** Specifies whether the function of customizing user variables is supported in the MY-compatible database. @@ -119,47 +155,352 @@ This parameter is a USERSET parameter. Set it based on instructions provided in > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** > -> - Currently, only compatibility configuration items in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md) are supported. > - Multiple items are separated by commas (,), for example, **set behavior_compat_options='end_month_calculate,display_leading_zero';**. -**Table 1** Compatibility configuration items - -| Configuration Item | Behavior | -| :------------------------------ | :----------------------------------------------------------- | -| display_leading_zero | Specifies how floating point numbers are displayed.
- If this item is not specified, for a decimal number between -1 and 1, the 0 before the decimal point is not displayed. For example, 0.25 is displayed as **.25**.
- If this item is specified, for a decimal number between -1 and 1, the 0 before the decimal point is displayed. For example, 0.25 is displayed as **0.25**. | -| end_month_calculate | Specifies the calculation logic of the add_months function.
Assume that the two parameters of the add_months function are **param1** and **param2**, and that the month of **param1** and **param2** is **result**.
- If this item is not specified, and the **Day** of **param1** indicates the last day of a month shorter than **result**, the **Day** in the calculation result will equal that in **param1**. For example:
`mogdb=# select add_months('2018-02-28',3) from sys_dummy; add_months ---------------------- 2018-05-28 00:00:00 (1 row)`
- If this item is specified, and the **Day** of **param1** indicates the last day of a month shorter than **result**, the **Day** in the calculation result will equal that in **result**. For example:
`mogdb=# select add_months('2018-02-28',3) from sys_dummy; add_months ---------------------- 2018-05-31 00:00:00 (1 row)` | -| compat_analyze_sample | Specifies the sampling behavior of the ANALYZE operation.
If this item is specified, the sample collected by the ANALYZE operation will be limited to around 30,000 records, DBnode memory consumption and maintaining the stability of ANALYZE. | -| bind_schema_tablespace | Binds a schema with the tablespace with the same name.
If a tablespace name is the same as *sche_name*, **default_tablespace** will also be set to *sche_name* if **search_path** is set to *sche_name*. | -| bind_procedure_searchpath | Specifies the search path of the database object for which no schema name is specified.
If no schema name is specified for a stored procedure, the search is performed in the schema to which the stored procedure belongs.
If the stored procedure is not found, the following operations are performed:
- If this item is not specified, the system reports an error and exits.
- If this item is specified, the search continues based on the settings of **search_path**. If the issue persists, the system reports an error and exits. | -| correct_to_number | Controls the compatibility of the to_number() result.
If this item is specified, the result of the to_number() function is the same as that of PG11. Otherwise, the result is the same as that of the O database. | -| unbind_divide_bound | Controls the range check on the result of integer division.
If this item is specified, you do not need to check the range of the division result. For example, the result of INT_MIN/(-1) can be *INT_MAX*+1. If this item is not specified, an out-of-bounds error is reported because the result is greater than*INT_MAX*. | -| return_null_string | Specifies how to display the empty result (empty string ") of the lpad() and rpad() functions.
- If this item is not specified, the empty string is displayed as **NULL**.
`mogdb=# select length(lpad('123',0,'*')) from sys_dummy; length ---- (1 row)`
- If this item is specified, the empty string is displayed as single quotation marks (").
`mogdb=# select length(lpad('123',0,'*')) from sys_dummy; length ---- 0 (1 row)` | -| compat_concat_variadic | Specifies the compatibility of variadic results of the concat() and concat_ws() functions.
If this item is specified and a concat function has a parameter of the variadic type, different result formats in O and Teradata are retained. If this item is not specified and a concat function has a parameter of the variadic type, the result format of O is retained for both O and Teradata. This option has no effect on MY because MY has no variadic type. | -| merge_update_multi | When MERGE INTO… WHEN MATCHED THEN UPDATE (see **MERGE INTO**) and INSERT… ON DUPLICATE KEY UPDATE (see **INSERT**) are used, control the UPDATE behavior if a piece of target data in the target table conflicts with multiple pieces of source data.
If this item is specified and the preceding scenario exists, the system performs multiple UPDATE operations on the conflicting row. If this item is not specified and the preceding scenario exists, an error is reported, that is, the MERGE or INSERT operation fails. | -| hide_tailing_zero | Numeric shows the configuration item. If this parameter is not set, numeric shows the configuration item based on the specified precision. When this parameter is set, hide "0" at the end of the decimal point.
`set behavior_compat_options='hide_tailing_zero'; select cast(123.123 as numeric(15,10)); numeric ---- 123.123 (1 row)` | -| rownum_type_compat | Specifies the ROWNUM type. The default value is **INT8**. After this parameter is specified, the value is changed to **NUMERIC**. | -| aformat_null_test | Determines the logic for checking whether the row type is not null. When this parameter is set, if a column in a row is not null, **true** is returned.
When this parameter is not set, if all columns in a row are not null, **true** is returned. | -| aformat_regexp_match | Determines the matching behavior of regular expression functions.
When this parameter is set and **sql_compatibility** is set to **A** or **B**, the options supported by the **flags** parameter of the regular expression are changed as follows:
1. By default, the character '\n' cannot be matched.
2. When **flags** contains the **n** option, the character '\n' can be matched.
3. The **regexp_replace(source, pattern replacement)** function replaces all matching substrings.
4. **regexp_replace(source, pattern, replacement, flags)** returns null when the value of **flags** is **“** or null.
Otherwise, the meanings of the options supported by the **flags** parameter of the regular expression are as follows:
1. By default, the character '\n' can be matched.
2. The **n** option in **flags** indicates that the multi-line matching mode is used.
3. The **regexp_replace(source, pattern replacement)** function replaces only the first matched substring.
4. If the value of **flags** is **“** or null, the return value of **regexp_replace(source, pattern, replacement, flags)** is the character string after replacement. | -| compat_cursor | Determines the compatibility behavior of implicit cursor states. If this parameter is set and the O compatibility mode is used, the effective scope of implicit cursor states (**SQL %FOUND**, **SQL %NOTFOUND**, **SQL %ISOPNE** and **SQL %ROWCOUNT**) are extended only the currently executed function to all subfunctions invoked by this function. | -| proc_outparam_override | Determines the reloading of output parameters of a stored procedure. After this parameter is enabled, the stored procedure can be properly invoked even if only the output parameters of the stored procedure are different. | -| proc_implicit_for_loop_variable | Determines the behavior of the **FOR_LOOP** query statement in a stored procedure.When this parameter is set, if **rec** has been defined in the **FOR rec IN query LOOP** statement, the defined **rec** variable is not reused and a new variable is created. Otherwise, the defined **rec** variable is reused and no new variable is created. | -| allow_procedure_compile_check | Determines the compilation check of the **SELECT** and **OPEN CURSOR** statements in a stored procedure. If this parameter is set, when the **SELECT**, **OPEN CURSOR FOR**, **CURSOR %rowtype**, or **for rec in** statement is executed in a stored procedure, the stored procedure cannot be created if the queried table does not exist, and the compilation check of the trigger function is not supported. If the queried table exists, the stored procedure is successfully created. | -| char_coerce_compat | Determines the behavior when char(n) types are converted to other variable-length string types. By default, spaces at the end are omitted when the char(n) type is converted to other variable-length string types. After this parameter is enabled, spaces at the end are not omitted during conversion. In addition, if the length of the char(n) type exceeds the length of other variable-length string types, an error is reported. This parameter is valid only when **sql_compatibility** is set to **A**. | -| pgformat_substr | Controls the performance of substr(str, from, for) in different scenarios. By default, if the value of **from** is less than 0, substr counts from the end of the string. If the value of **for** is less than 1, substr returns NULL. After this parameter is enabled, if the value of **from** is less than 0, substr counts from the first (-from + 1) bit of the character string. If the value of **for** is less than 0, substr reports an error. This parameter is valid only when `sql_compatibility` is set to `PG`. | - -## plpgsql.variable_conflict - -**Parameter description**: Specifies the priority of a stored procedure variable and a table column that have the same name. +The currently supported compatibility configuration items and their behavioral controls are listed below: + +- display_leading_zero + + Specifies how floating point numbers are displayed. + + - If this item is not specified, for a decimal number between -1 and 1, the 0 before the decimal point is not displayed. For example: + + ```sql + MogDB=# select 0.1231243 as a, 0.1231243::numeric as b,0.1231243::integer(10,3) as c, length(0.1242343) as d; + a | b | c | d + ----------+----------+------+--- + .1231243 | .1231243 | .123 | 8 + (1 row) + ``` + + - If this item is specified, for a decimal number between -1 and 1, the 0 before the decimal point is displayed. For example: + + ```sql + MogDB=# select 0.1231243 as a, 0.1231243::numeric as b,0.1231243::integer(10,3) as c, length(0.1242343) as d; + a | b | c | d + -----------+-----------+-------+--- + 0.1231243 | 0.1231243 | 0.123 | 9 + (1 row) + ``` + +- end_month_calculate + + Specifies the calculation logic of the add_months function. + + Assume that the two parameters of the add_months function are **param1** and **param2**, and that the month of **param1** and **param2** is **result**. + + - If this item is not specified, and the **Day** of **param1** indicates the last day of a month shorter than **result**, the **Day** in the calculation result will equal that in **param1**. For example: + + ```sql + MogDB=# select add_months('2018-02-28',3) from sys_dummy; + add_months + \---------------------\ + 2018-05-28 00:00:00 + (1 row) + ``` + + - If this item is specified, and the **Day** of **param1** indicates the last day of a month shorter than **result**, the **Day** in the calculation result will equal that in **result**. For example: + + ```sql + MogDB=# select add_months('2018-02-28',3) from sys_dummy; + add_months + \---------------------\ + 2018-05-31 00:00:00 + (1 row) + ``` + +- compat_analyze_sample + + Specifies the sampling behavior of the ANALYZE operation. + If this item is specified, the sample collected by the ANALYZE operation will be limited to around 30,000 records, DBnode memory consumption and maintaining the stability of ANALYZE. + +- bind_schema_tablespace + + Binds a schema with the tablespace with the same name. + If a tablespace name is the same as *sche_name*, **default_tablespace** will also be set to *sche_name* if **search_path** is set to *sche_name*. + +- bind_procedure_searchpath + + Specifies the search path of the database object for which no schema name is specified. + + If no schema name is specified for a stored procedure, the search is performed in the schema to which the stored procedure belongs. + + If the stored procedure is not found, the following operations are performed: + + - If this item is not specified, the system reports an error and exits. + - If this item is specified, the search continues based on the settings of **search_path**. If the issue persists, the system reports an error and exits. + +- correct_to_number + + Controls the compatibility of the to_number() result. + + If this item is specified, the result of the to_number() function is the same as that of PG11. Otherwise, the result is the same as that of the O database. + +- unbind_divide_bound + + Controls the range check on the result of integer division. + + If this item is specified, you do not need to check the range of the division result. For example, the result of INT_MIN/(-1) can be INT_MAX +1. If this item is not specified, an out-of-bounds error is reported because the result is greater than INT_MAX. + +- return_null_string + + Specifies how to display the empty result (empty string ") of the lpad() and rpad() functions. + + If this item is not specified, the empty string is displayed as **NULL**. + + ```sql + MogDB=# select length(lpad('123',0,'*')) from sys_dummy; + length + \--------\ + (1 row) + ``` + + If this item is specified, the empty string is displayed as single quotation marks ("). + + ```sql + MogDB=# select length(lpad('123',0,'*')) from sys_dummy; + length + \--------\ + 0 + (1 row) + ``` + +- compat_concat_variadic + + Specifies the compatibility of variadic results of the concat() and concat_ws() functions. + + If this item is specified and a concat function has a parameter of the variadic type, different result formats in O and Teradata are retained. If this item is not specified and a concat function has a parameter of the variadic type, the result format of O is retained for both O and Teradata. This option has no effect on MY because MY has no variadic type. + +- merge_update_multi + + When MERGE INTO… WHEN MATCHED THEN UPDATE (see [MERGE INTO](../../../reference-guide/sql-syntax/MERGE-INTO.md)) and INSERT… ON DUPLICATE KEY UPDATE (see [INSERT](../../../reference-guide/sql-syntax/INSERT.md)) are used, control the UPDATE behavior if a piece of target data in the target table conflicts with multiple pieces of source data. + + If this item is specified and the preceding scenario exists, the system performs multiple UPDATE operations on the conflicting row. If this item is not specified and the preceding scenario exists, an error is reported, that is, the MERGE or INSERT operation fails. + +- plstmt_implicit_savepoint + + Controls whether the execution of update statements in a procedure has separate sub-transactions. + + If this configuration item is set, implicit savepoints are turned on before each update statement in the procedure, and the default fallback in the EXCEPTION block is to the most recent savepoint, thus ensuring that only changes to failed statements are fallbacked. This option is for compatibility with the EXCEPTION behavior of the O database. + +- hide_tailing_zero + + Numeric shows the configuration item. If this parameter is not set, numeric shows the configuration item based on the specified precision. When this parameter is set, hide "0" at the end of the decimal point. + + ```sql + MogDB=# set behavior_compat_options='hide_tailing_zero'; + MogDB=# select cast(123.123 as numeric(15,10)) as a, to_char(cast(123.123 as numeric(15,10)), '999D999999'); + a | to_char + ---------+---------- + 123.123 | 123.123 + (1 row) + MogDB=# set behavior_compat_options=''; + MogDB=# select cast(123.123 as numeric(15,10)) as a, to_char(cast(123.123 as numeric(15,10)), '999D999999'); + a | to_char + ----------------+------------- + 123.1230000000 | 123.123000 + (1 row) + ``` + +- rownum_type_compat + + Specifies the ROWNUM type. The default value is **INT8**. After this parameter is specified, the value is changed to **NUMERIC**. + +- aformat_null_test + + Determines the logic for checking whether the row type is not null. When this parameter is set, if a column in a row is not null, **true** is returned. + + When this parameter is not set, if all columns in a row are not null, **true** is returned. + +- aformat_regexp_match + + Determines the matching behavior of regular expression functions. + + When this parameter is set and **sql_compatibility** is set to **A** or **B**, the options supported by the **flags** parameter of the regular expression are changed as follows: + + 1. By default, the character '\n' cannot be matched. + + 2. When **flags** contains the **n** option, the character '\n' can be matched. + 3. The **regexp_replace(source, pattern replacement)** function replaces all matching substrings. + 4. **regexp_replace(source, pattern, replacement, flags)** returns null when the value of **flags** is **“** or null. + + Otherwise, the meanings of the options supported by the **flags** parameter of the regular expression are as follows: + + 1. By default, the character '\n' can be matched. + 2. The **n** option in **flags** indicates that the multi-line matching mode is used. + 3. The **regexp_replace(source, pattern replacement)** function replaces only the first matched substring. + 4. If the value of **flags** is **“** or null, the return value of **regexp_replace(source, pattern, replacement, flags)** is the character string after replacement. + +- compat_cursor + + Determines the compatibility behavior of implicit cursor states. If this parameter is set and the O compatibility mode is used, the effective scope of implicit cursor states (**SQL %FOUND**, **SQL %NOTFOUND**, **SQL %ISOPNE** and **SQL %ROWCOUNT**) are extended only the currently executed function to all subfunctions invoked by this function. + +- proc_outparam_override + + Controls the overloading behavior of the out parameter of a procedure. If this parameter is turned on, the procedure can be called normally if only the out parameter is different. When this option is set, a function or procedure that contains an out parameter must explicitly call the out parameter. + + Whether a function or procedure contains an out parameter can be seen by `\df function name`, for example: + + ```sql + MogDB=# \df DBE_PERF.get_global_bgwriter_stat + List of functions + -[ RECORD 1 ]-------+---------------------------------------------------------------------- + Schema | dbe_perf + Name | get_global_bgwriter_stat + Result data type | SETOF record + Argument data types | OUT node_name name, OUT checkpoints_timed bigint, OUT checkpoints_req bigint, OUT checkpoint_write_time double precision, OUT checkpoint_sync_time double precision, OUT buffers_checkpoint bigint, OUT buffers_clean bigint, OUT maxwritten_clean bigint, OUT buffers_backend bigint, OUT buffers_backend_fsync bigint, OUT buffers_alloc bigint, OUT stats_reset timestamp with time zone + Type | normal + fencedmode | f + propackage | f + prokind | f + ``` + + The out/inout parameter needs to be passed as a variable, not a constant; when overriding is turned off, the out parameter can be called without explicitly under the perform operation. + + The description of the proc_outparam_override option and the behavior of the perform operation is as follows: + + 1. When the proc_outparam_override option is turned off, for the out parameter, the form does not support passing in a constant, and must pass in a variable; for the inout parameter, the form supports passing in a constant, the reason is that the role of the form is to ignore the out parameter, and at this time passing a constant to the inout parameter actually passes a constant to the in parameter. The reason for this is that the purpose of the form is to ignore the out parameter. + + 2. When you turn on the proc_outparam_override option, the form supports passing variables and constants to the out parameter, but it will report an error when you pass a constant, because the out parameter needs a variable to receive its value, and you can't assign a value to a constant when you pass a constant, so you need to pass a variable. The reason is that the out parameter needs a variable to receive its value, and when you pass in a constant, you can no longer assign a value to the constant. + +- proc_implicit_for_loop_variable + + Determines the behavior of the **FOR_LOOP** query statement in a stored procedure.When this parameter is set, if **rec** has been defined in the **FOR rec IN query LOOP** statement, the defined **rec** variable is not reused and a new variable is created. Otherwise, the defined **rec** variable is reused and no new variable is created. + +- allow_procedure_compile_check + + Determines the compilation check of the **SELECT** and **OPEN CURSOR** statements in a stored procedure. If this parameter is set, when the **SELECT**, **OPEN CURSOR FOR**, **CURSOR %rowtype**, or **for rec in** statement is executed in a stored procedure, the stored procedure cannot be created if the queried table does not exist, and the compilation check of the trigger function is not supported. If the queried table exists, the stored procedure is successfully created. + +- char_coerce_compat + + Determines the behavior when char(n) types are converted to other variable-length string types. By default, spaces at the end are omitted when the char(n) type is converted to other variable-length string types. After this parameter is enabled, spaces at the end are not omitted during conversion. In addition, if the length of the char(n) type exceeds the length of other variable-length string types, an error is reported. This parameter is valid only when **sql_compatibility** is set to **A**. + +- truncate_numeric_tail_zero + + numeric displays the configuration items. When this option is not set, the default precision of the numeric is displayed. When this item is set, all scenarios outputting numeric will hide the last zero after the decimal point, except for the case of to_char(numeric, format) which displays the set precision. for example: + + ```sql + MogDB=# set behavior_compat_options='truncate_numeric_tail_zero'; + MogDB=# select cast(123.123 as numeric(15,10)) as a, to_char(cast(123.123 as numeric(15,10)), '999D999999'); + a | to_char + ---------+------------- + 123.123 | 123.123000 + (1 row) + MogDB=# set behavior_compat_options=''; + MogDB=# select cast(123.123 as numeric(15,10)) as a, to_char(cast(123.123 as numeric(15,10)), '999D999999'); + a | to_char + ----------------+------------- + 123.1230000000 | 123.123000 + (1 row) + ``` + +- pgformat_substr + + Controls the performance of substr(str, from, for) in different scenarios. By default, if the value of **from** is less than 0, substr counts from the end of the string. If the value of **for** is less than 1, substr returns NULL. After this parameter is enabled, if the value of **from** is less than 0, substr counts from the first (-from + 1) bit of the character string. If the value of **for** is less than 0, substr reports an error. This parameter is valid only when `sql_compatibility` is set to `PG`. + +- allow_orderby_undistinct_column + + If the compatibility mode is B, when this parameter is enabled, the select statement supports order by followed by columns that are not in distinct. For example: `select distinct a from test order by b;` + + > Note: This parameter only supports distinct, does not support distinct on, and when the DOLPHIN plugin exists does not take effect, the function is transferred to the dolphin.sql_mode parameter control. dolphin.sql_mode is equivalent to enable this option when you do not set the sql_mode_full_group option. + +- select_into_return_null + + In B or PG compatibility mode, with this parameter enabled, the procedure statement `SELECT select_expressions INTO [STRICT] target FROM ...` allows variables to be assigned a NULL value if STRICT is not specified and the query result is null. + +- convert_string_digit_to_numeric + + Controls whether numeric constants represented as strings in a table are converted to numeric types for comparison. + + ```sql + MogDB=# create table test1(c1 int, c2 varchar); + MogDB=# insert into test1 values(2, '1.1'); + MogDB=# set behavior_compat_options = ''; + MogDB=# select * from test1 where c2 > 1; + ERROR: invalid input syntax for type bigint: "1.1" + MogDB=# set behavior_compat_options = 'convert_string_digit_to_numeric'; + MogDB=# select * test1 from where c2 > 1; + c1 | c2 + ----+----- + 2 | 1.1 + (1 row) + ``` + +- plsql_security_definer + + When this parameter is enabled, the procedure is created with definer privileges by default. + +- skip_insert_gs_source + + When this parameter is enabled, PL/SQL objects are no longer inserted into the DBE_PLDEVELOPER.gs_source table when they are created. + +- compat_sort_group_column + + When this parameter is enabled, the behavior of the GROUP/ORDER BY clause is consistent with Oracle and the constants no longer affect the GROUP/ORDER BY result set. This parameter takes effect only when the [sql_compatibility](#sql_compatibility) parameter value is A. + +- sql_implicit_savepoint + + This option is used to control whether to roll back the entire transaction when a single SQL error occurs in the transaction. When this option is set, a single SQL error in a transaction will not affect the commit of other SQL, and the commit will retain the results of the correctly executed SQL. This option is only available in A-compatible mode. + +- accept_empty_str + + In A-compatible mode, when this parameter is disabled, MogDB will treat the empty string as NULL; otherwise, it will accept the empty string normally. Example: + + ```sql + MogDB=# set behavior_compat_options='accept_empty_str'; + MogDB=# select '' is null; + ?column? + ---------- + f + (1 row) + MogDB=# set behavior_compat_options=''; + MogDB=# select '' is null; + ?column? + ---------- + t + (1 row) + ``` + +- set_procedure_current_schema + + If this parameter is enabled and the compiled function (including the function in the package) or procedure has caller privileges, set the search path of the function (including the function in the package) or procedure to the current_schema at the time of execution. The current_schema at the time of execution. + +- compat_oracle_txn_control + + - Configure this option to enable select to auto-commit transactions when the driver is in non-autocommit mode. + - If the driver is in autocommit mode (autocommit = on), enabling this option will cause the driver's autocommit mode not to take effect when the driver version is JDBC 5.0.0.6/5.0.0.7, Psycopg2 5.0.0.4, or ODBC 5.0.0.2. Subsequent versions of the driver will resolve this conflict. The temporary solution is to disable this parameter in the driver connection string. + - The compat_oracle_txn_control option is not allowed to be modified by `set behavior_compat_options` after JDBC 5.0.0.8 and Psycopg2 5.0.0.5. + - `select` does not auto-commit after JDBC `setSavepoint`. + +- bpchar_coerce_compat + + Control bpchar and text operations to implicitly convert text to bpchar, so that bpchar_col = 'xxx'::text conditions can be directly applied to indexes or partitioned cuts to improve query efficiency. + +- allow_like_indexable + + Configuring this option automatically creates indexes that support fuzzy matching (introduced since version 5.0.4). + +## plsql_compile_check_options + +**Parameter description**: Database compatibility behavior configuration items, the value of this parameter consists of several configuration items separated by commas. This parameter is a USERSET parameter. Set it based on instructions provided in Table 2 [Methods for setting GUC parameters](../../../reference-guide/guc-parameters/appendix.md). **Value range**: String -- **error** indicates that an error is reported during compilation if the name of a stored procedure variable and that of a table column are the same. -- **use_variable** indicates that the variable is preferentially used if the name of a stored procedure variable and that of a table column are the same. -- **use_column** indicates that the column is preferentially used if the name of a stored procedure variable and that of a table column are the same. +**Default value**: "" -**Default value**: **error** +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +> - Only the Table 2 compatibility configuration item is currently supported. +> - When configuring multiple compatibility configuration items, adjacent configuration items are separated by commas. For example: set plsql_compile_check_options='for_loop,outparam'; + +**Table 2** compatibility configuration item + +| Configuration Item | Compatibility Behavior | +| :----------------- | :----------------------------------------------------------- | +| for_loop | When this item is set to control the behavior of FOR_LOOP query statements in stored procedures, if rec is already defined in a FOR rec IN query LOOP statement, the rec variable already defined is not reused and a new variable is created. Otherwise, the already defined rec variable is reused and no new variable is created. (Same as proc_implicit_for_loop_variable, with subsequent wrap-up) | +| outparam | The out overload condition has an overloaded function; the out out reference constants will be checked, prohibiting the out out reference from being a constant to report an error. | ## td_compatible_truncation @@ -178,6 +519,19 @@ This parameter is a USERSET parameter. Set it based on instructions provided in **Default value**: **off** +## uppercase_attribute_name + +**Parameter description**: Sets the column names to be returned to the client in uppercase. This parameter is restricted to A-compatible mode and centralized environments. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on enables the column name to be returned to the client in uppercase. +- off disable the column name to be returned to the client in uppercase. + +**Default value**: off + ## lastval_supported **Parameter description**: Specifies whether to enable the **lastval** function. @@ -190,3 +544,69 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided - **off** indicates that the **lastval** function is not supported. Additionally, the **nextval** function supports push-down. **Default value**: **off** + +## enable_custom_parser + +This parameter is not supported in the current version. + +## enable_date_operator_sub_oracle + +**Parameter description**: Controls whether the subtraction of two date type fields is enabled to return a numeric type result indicating the number of days between the two dates. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on means the operator that uses two dates subtracted as numeric. +- off means to use MogDB's original processing method, the two dates are subtracted by the Interval operator. + +**Default value**: off + +## proc_inparam_immutable + +**Parameter description**: Controls whether package constants are used as default values for function or procedure entry parameters. Applies to A-compatible mode. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on: MogDB entry parameter only IN type can use default value, and FUNCTION/PROCEDURE can not modify the entry parameter, consistent with ORACLE. Modification of the value of the PACKAGE variable affects the behavior of the function entry parameter and Oracle to maintain consistency. +- off: does not support package constants as default values for function or procedure entry parameters. + +**Default value**: on + +## ora_dblink_col_case_sensitive + +**Parameter description**: Controls whether Oracle dblink columns are case-sensitive. The default is case insensitive. + +This parameter is a SUSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +**Default value**: off + +## enable_mergeinto_subqueryalias + +**Parameter description**: Controls whether a target table alias can be used when the source table of merge into using is a subquery in A mode. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on means that the target table alias can be used +- off means you cannot use the target table alias. + +**Default value**: off + +## enable_multitable_update + +**Parameter description**: MogDB only supports multi-table update operation in B mode by default, enable this parameter to support using multi-table update function in A mode. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on indicates support for using the multi-table update feature in A-mode. +- off indicates that the multi-table update function is not supported in A-mode. + +**Default value**: off diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/version-and-platform-compatibility/version-and-platform-compatibility.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/version-and-platform-compatibility/version-and-platform-compatibility.md index fbcd6b9f..d0d925a2 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/version-and-platform-compatibility/version-and-platform-compatibility.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/version-and-platform-compatibility/version-and-platform-compatibility.md @@ -1,11 +1,11 @@ ---- -title: Version and Platform Compatibility -summary: Version and Platform Compatibility -author: zhang cuiping -date: 2023-04-07 ---- - -# Version and Platform Compatibility - -- **[Compatibility with Earlier Versions](compatibility-with-earlier-versions.md)** +--- +title: Version and Platform Compatibility +summary: Version and Platform Compatibility +author: zhang cuiping +date: 2023-04-07 +--- + +# Version and Platform Compatibility + +- **[Compatibility with Earlier Versions](compatibility-with-earlier-versions.md)** - **[Platform and Client Compatibility](platform-and-client-compatibility.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/wait-events.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/wait-events.md index 4467cfb3..15e0181e 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/wait-events.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/wait-events.md @@ -1,23 +1,23 @@ ---- -title: Wait Events -summary: Wait Events -author: Zhang Cuiping -date: 2021-04-20 ---- - -# Wait Events - -## enable_instr_track_wait - -**Parameter description**: Specifies whether to enable real-time collection of wait event information. - -In the x86-based centralized deployment scenario, the hardware configuration specifications are 32-core CPU and 256 GB memory. When the Benchmark SQL 5.0 tool is used to test performance, the performance fluctuates by about 1.4% by enabling or disabling this parameter. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). - -**Value range**: Boolean - -- **on** indicates that the function of collecting wait event information is enabled. -- **off** indicates that the function of collecting wait event information is disabled. - -**Default value**: **on** +--- +title: Wait Events +summary: Wait Events +author: Zhang Cuiping +date: 2021-04-20 +--- + +# Wait Events + +## enable_instr_track_wait + +**Parameter description**: Specifies whether to enable real-time collection of wait event information. + +In the x86-based centralized deployment scenario, the hardware configuration specifications are 32-core CPU and 256 GB memory. When the Benchmark SQL 5.0 tool is used to test performance, the performance fluctuates by about 1.4% by enabling or disabling this parameter. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](appendix.md). + +**Value range**: Boolean + +- **on** indicates that the function of collecting wait event information is enabled. +- **off** indicates that the function of collecting wait event information is disabled. + +**Default value**: **on** diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/archiving.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/archiving.md index 291d5cfc..1dfed1d0 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/archiving.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/archiving.md @@ -13,7 +13,7 @@ date: 2021-04-20 This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: > > When **wal_level** is set to **minimal**, the **archive_mode** parameter is unavailable. @@ -30,7 +30,7 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: > > - If both **archive_dest** and **archive_command**are configured, WALs are preferentially saved to the directory specified by **archive_dest**. The command configured by **archive_command**does not take effect. > @@ -64,7 +64,7 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: > > - If both **archive_dest** and **archive_command**are configured, WALs are preferentially saved to the directory specified by **archive_dest**. The command configured by **archive_command**does not take effect. > - If the string is a relative path, it is relative to the data directory. The following is an example: @@ -85,7 +85,7 @@ archive_timeout is suitable for scenarios where active-standby streaming replica This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: > > - The server is forced to switch to a new WAL segment file when the period specified by this parameter has elapsed since the last file switch. > - Archived files that are closed early due to a forced switch are still of the same length as full files. Therefore, a very short **archive_timeout** will bloat the archive storage. You are advised to set **archive_timeout** to **60s**. @@ -93,3 +93,34 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T **Value range**: an integer ranging from 0 to *INT_MAX*. The unit is second. **0** indicates that archiving timeout is disabled. **Default value**: **0** + +## archive_interval + +**Parameter description**: Indicates the archiving interval. In distributed standby archiving and OBS archiving scenarios, a request is sent to the standby to archive all WALs every archive interval, in which case the standby may archive a certain xlog data multiple times, to avoid data loss caused by some files being lost during the archive_timeout period and not archived in time. This parameter is only used for distributed or standby archiving, when you enable it, you need to create additional archive slots, and it is not in use at the moment. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: +> +> - Forces archiving of log files when the time set in this parameter is exceeded. +> - Since archiving has IO operations, it must not be archived too frequently, nor can it be set to have a large impact on the PITR RPO It is recommended that the default value be used. + +**Value range**: Integer, 1 ~ 1000, in seconds. + +**Default value**: 1 + +## time_to_target_rpo + +**Parameter description**: In dual database instance offsite disaster recovery mode, set the time_to_target_rpo seconds allowed to the recovery point that has been archived to OBS when an abnormal occurrence occurs in the primary database instance. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Integer, 0 to 3600 (seconds) + +In dual database instance offsite disaster recovery mode, the primary database instance logs will be archived to OBS. + +0 means that log flow control is not enabled, and 1~3600 means to set the time_to_target_rpo seconds allowed from the time of abnormal occurrence of the primary database instance to the recovery point that has been archived to OBS, to ensure that the length of time when the primary database instance crashes due to a disaster, the most data that may be lost, is within the allowable range. + +Setting time_to_target_rpo too small will affect the performance of the host and setting it too large will lose the flow control effect. + +**Default value**: 0 diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/log-replay.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/log-replay.md index 3e31ff20..bfe9ed12 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/log-replay.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/log-replay.md @@ -155,4 +155,29 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T - **'nodebind: 1, 2'**: Use the CPU cores in NUMA groups 1 and 2 to bind threads. - **'cpubind: 0-30'**: Use the CPU cores 0 to 30 to bind threads. -**Default value**: **'nobind'** \ No newline at end of file +**Default value**: **'nobind'** + +**Table 1** Parameter Setting Reference for Different CPU, Memory and Deployment Modes + +| number | Number of CPUs | RAM(GB) | Hybrid deployment or not | recovery_parse_workers | recovery_redo_workers | Total number of playback threads | note | +| :----- | :------------- | :------ | :----------------------- | :--------------------- | :-------------------- | :------------------------------- | :----------------------------------------------------------- | +| 1 | 4 | - | - | 1 | 1 | - | Not recommended | +| 2 | 8 | - | yes | 1 | 1 | - | Not recommended | +| 3 | 8 | 64 | no | 1 | 1 | - | Not recommended | +| 4 | 16 | 128 | yes | 1 | 1 | - | Not recommended | +| 5 | 16 | 128 | no | 2 | 3 | 15 | - | +| 6 | 32 | 256 | yes | 2 | 2 | 13 | - | +| 7 | 32 | 256 | no | 2 | 8 | 25 | - | +| 8 | 64 | 512 | yes | 2 | 4 | 17 | - | +| 9 | 64 | 512 | no | 2 | 8 | 25 | Greater than this specification are in accordance with this parameter | +| 10 | 96 | 768 | - | 2 | 8 | 25 | Greater than this specification are in accordance with this parameter | + +## enable_walrcv_reply_dueto_commit + +**Parameter description**: Controls whether walreceiver feedback lsn is enabled after a commit record completes redo. + +This parameter is a SIGHUP parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +**Default value**: off \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/settings.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/settings.md index eec988d0..b27cccb1 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/settings.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/settings.md @@ -254,6 +254,36 @@ This parameter is a POSTMASTER parameter. Set it based on instructions provided **Default value**: **10** +## xlog_file_path + +**Parameter description**: Path to the xlog log shared disk for dual database instance shared storage scenarios. This parameter is configured by the OM during the initialization of the database system and is not recommended to be modified by the user. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: String + +**Default value**: NULL + +## xlog_file_size + +**Parameter description**: The size of the xlog log shared disk for dual database instance shared storage scenarios. This parameter is configured by the OM during the initialization of the database system and is not recommended to be modified by the user. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Long integer, 5053733504\~576460752303423487, in bytes. + +**Default value**: 549755813888 + +## xlog_lock_file_path + +**Parameter description**: Path to the lock file that is preempted by the xlog log sharing disk in a dual database instance shared storage scenario. This parameter is configured by the OM during the initialization of the database system and is not recommended to be modified by the user. + +This parameter is a POSTMASTER parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: String + +**Default value**: NULL + ## force_promote **Parameter description**: Specifies whether to enable the forcible switchover function on the standby node. @@ -286,4 +316,17 @@ This parameter is a SIGHUP parameter. Set it based on instructions provided in T **Value range**: an integer ranging from 0 to 90000000 (μs) -**Default value**: **1** \ No newline at end of file +**Default value**: **1** + +## autocommit + +**Parameter description**: Sets whether the transaction is autocommitted or not, and can be changed to false only in B-compatible mode. + +This parameter is a USERSET parameter. Set it based on instructions provided in Table 1 [GUC parameters](../../../reference-guide/guc-parameters/appendix.md). + +**Value range**: Boolean + +- on means enable transaction autocommit. +- off means disable transaction autocommit. + +**Default value**: on \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/write-ahead-log.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/write-ahead-log.md index 0b71486a..f7888bf4 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/write-ahead-log.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/write-ahead-log.md @@ -1,13 +1,13 @@ ---- -title: Write Ahead Log -summary: Write Ahead Log -author: zhang cuiping -date: 2023-04-07 ---- - -# Write Ahead Log - -- **[Settings](settings.md)** -- **[Settings](checkpoints.md)** -- **[Log Replay](log-replay.md)** +--- +title: Write Ahead Log +summary: Write Ahead Log +author: zhang cuiping +date: 2023-04-07 +--- + +# Write Ahead Log + +- **[Settings](settings.md)** +- **[Settings](checkpoints.md)** +- **[Log Replay](log-replay.md)** - **[Archiving](archiving.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/writer-statement-parameters-supported-by-standby-server.md b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/writer-statement-parameters-supported-by-standby-server.md index 809d0cf7..16adb98a 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/writer-statement-parameters-supported-by-standby-server.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/guc-parameters/writer-statement-parameters-supported-by-standby-server.md @@ -1,27 +1,27 @@ ---- -title: Writer Statement Parameters Supported by Standby Servers -summary: Writer Statement Parameters Supported by Standby Servers -author: zhang cuiping -date: 2023-04-07 ---- - -# Writer Statement Parameters Supported by Standby Servers - -## enable_remote_excute - -**Parameter description**: specifies whether to allow standby servers to perform writer statements. If this function is enabled, it cannot be modified. - -This parameter is a **POSTMASTER** parameter. Set it based on instructions provided in Table 1 [GUC parameters](./appendix.md). - -**Value range**: **true** or **false** - -**true** indicates that standby servers are allowed to perform writer statements. **off** indicates that this function is disabled. - -**Default value:** **off** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: -> -> - **enable_remote_execute** is set to **false** in standalone mode. -> - To use this function, the parameter needs to be set to **true**. -> - Once the function is enabled, standby servers are allowed to perform writer statements and DDL operations. It supports simple and extended queries. When this function is enabled, the read statements are still performed on standby servers and the writer statements are forwarded to primary servers to perform. +--- +title: Writer Statement Parameters Supported by Standby Servers +summary: Writer Statement Parameters Supported by Standby Servers +author: zhang cuiping +date: 2023-04-07 +--- + +# Writer Statement Parameters Supported by Standby Servers + +## enable_remote_excute + +**Parameter description**: specifies whether to allow standby servers to perform writer statements. If this function is enabled, it cannot be modified. + +This parameter is a **POSTMASTER** parameter. Set it based on instructions provided in Table 1 [GUC parameters](./appendix.md). + +**Value range**: **true** or **false** + +**true** indicates that standby servers are allowed to perform writer statements. **off** indicates that this function is disabled. + +**Default value:** **off** + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: +> +> - **enable_remote_execute** is set to **false** in standalone mode. +> - To use this function, the parameter needs to be set to **true**. +> - Once the function is enabled, standby servers are allowed to perform writer statements and DDL operations. It supports simple and extended queries. When this function is enabled, the read statements are still performed on standby servers and the writer statements are forwarded to primary servers to perform. > - Once the function is enabled, all SQL statements including read statements are forwarded to a primary server after a transaction is opened on a standby server. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/memory/GS_SHARED_MEMORY_DETAIL.md b/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/memory/GS_SHARED_MEMORY_DETAIL.md deleted file mode 100644 index ec9af76e..00000000 --- a/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/memory/GS_SHARED_MEMORY_DETAIL.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: GS_SHARED_MEMORY_DETAIL -summary: GS_SHARED_MEMORY_DETAIL -author: Guo Huan -date: 2021-04-19 ---- - -# GS_SHARED_MEMORY_DETAIL - -**GS_SHARED_MEMORY_DETAIL** queries the usage information about shared memory contexts on the current node. - -**Table 1** GS_SHARED_MEMORY_DETAIL columns - -| Name | Type | Description | -| :---------- | :------- | :----------------------------------------------- | -| contextname | text | Name of the memory context | -| level | smallint | Level of the memory context | -| parent | text | Name of the parent memory context | -| totalsize | bigint | Total size of the shared memory (unit: byte) | -| freesize | bigint | Remaining size of the shared memory (unit: byte) | -| usedsize | bigint | Used size of the shared memory (unit: byte) | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/memory/memory-schema.md b/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/memory/memory-schema.md index ff088c33..575a55fa 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/memory/memory-schema.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/memory/memory-schema.md @@ -9,5 +9,4 @@ date: 2023-04-07 - **[MEMORY_NODE_DETAIL](MEMORY_NODE_DETAIL.md)** - **[GLOBAL_MEMORY_NODE_DETAIL](GLOBAL_MEMORY_NODE_DETAIL.md)** -- **[GS_SHARED_MEMORY_DETAIL](GS_SHARED_MEMORY_DETAIL.md)** - **[GLOBAL_SHARED_MEMORY_DETAIL](GLOBAL_SHARED_MEMORY_DETAIL.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/alias.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/alias.md index a85d8ffb..51e67fe8 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/alias.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/alias.md @@ -1,54 +1,54 @@ ---- -title: Aliases -summary: Aliases -author: zhang cuiping -date: 2023-04-07 ---- - -# Aliases - -SQL can rename a table or a column. The name is the alias of the table or the column. Aliases are created to improve the readability of table names or column names. In SQL, **AS** is used to create an alias. - -## Syntax - -- Column alias syntax - - ``` - SELECT - { * | [column [ AS ] output_name, ...] } - [ FROM from_item [, ...] ] - [ WHERE condition ]; - ``` - -- Table alias syntax - - ``` - SELECT column1, column2.... - FROM table_name AS output_name - WHERE [condition]; - ``` - -## Parameter Description - -- **output_name** - - You may use the **AS output_name** clause to give an alias for an output column. The alias is used for displaying the output column. The **name**, **value**, and **type** keywords can be used as column aliases. - -## Examples - -Use **C** to indicate the alias of the **customer_t1** table to query data in the table. - -```sql -MogDB=# SELECT c.c_first_name,c.amount FROM customer_t1 AS c; - c_first_name | amount ---------------+-------- - Grace | 1000 - Grace | - | - Joes | 2200 - James | 5000 - Local | 3000 - Lily | 1000 - Lily | 2000 -(8 rows) +--- +title: Aliases +summary: Aliases +author: zhang cuiping +date: 2023-04-07 +--- + +# Aliases + +SQL can rename a table or a column. The name is the alias of the table or the column. Aliases are created to improve the readability of table names or column names. In SQL, **AS** is used to create an alias. + +## Syntax + +- Column alias syntax + + ``` + SELECT + { * | [column [ AS ] output_name, ...] } + [ FROM from_item [, ...] ] + [ WHERE condition ]; + ``` + +- Table alias syntax + + ``` + SELECT column1, column2.... + FROM table_name AS output_name + WHERE [condition]; + ``` + +## Parameter Description + +- **output_name** + + You may use the **AS output_name** clause to give an alias for an output column. The alias is used for displaying the output column. The **name**, **value**, and **type** keywords can be used as column aliases. + +## Examples + +Use **C** to indicate the alias of the **customer_t1** table to query data in the table. + +```sql +MogDB=# SELECT c.c_first_name,c.amount FROM customer_t1 AS c; + c_first_name | amount +--------------+-------- + Grace | 1000 + Grace | + | + Joes | 2200 + James | 5000 + Local | 3000 + Lily | 1000 + Lily | 2000 +(8 rows) ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/extended-functions.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/extended-functions.md index f6e5583a..172f30cd 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/extended-functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/extended-functions.md @@ -1,36 +1,36 @@ ---- -title: Extended Functions -summary: Extended Functions -author: Zhang Cuiping -date: 2021-05-18 ---- - -# Extended Functions - -The following table lists the extended functions supported by MogDB. These functions are for reference only. - - - - - - - - - - - - - - - - - - - - - - - - - -
CategoryNameDescription
Access privilege inquiry functionhas_sequence_privilege(user, sequence, privilege)Queries whether a specified user has privilege for sequences.
has_sequence_privilege(sequence, privilege)Queries whether the current user has privilege for sequence.
Trigger functionpg_get_triggerdef(oid)Gets CREATE [ CONSTRAINT ] TRIGGER command for triggers.
pg_get_triggerdef(oid, boolean)Gets CREATE [ CONSTRAINT ] TRIGGER command for triggers.
+--- +title: Extended Functions +summary: Extended Functions +author: Zhang Cuiping +date: 2021-05-18 +--- + +# Extended Functions + +The following table lists the extended functions supported by MogDB. These functions are for reference only. + + + + + + + + + + + + + + + + + + + + + + + + + +
CategoryNameDescription
Access privilege inquiry functionhas_sequence_privilege(user, sequence, privilege)Queries whether a specified user has privilege for sequences.
has_sequence_privilege(sequence, privilege)Queries whether the current user has privilege for sequence.
Trigger functionpg_get_triggerdef(oid)Gets CREATE [ CONSTRAINT ] TRIGGER command for triggers.
pg_get_triggerdef(oid, boolean)Gets CREATE [ CONSTRAINT ] TRIGGER command for triggers.
diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/extended-syntax.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/extended-syntax.md index 8070c842..18b29cce 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/extended-syntax.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/extended-syntax.md @@ -1,52 +1,52 @@ ---- -title: Extended Syntax -summary: Extended Syntax -author: Zhang Cuiping -date: 2021-05-18 ---- - -# Extended Syntax - -MogDB provides extended syntax . - -**Table 1** Extended SQL syntax - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
CategoryKeywordsDescription
Creating a tableINHERITS ( parent_table [, … ] )Specifies whether an inherited table is supported.
column_constraint:
REFERENCES reftable
[ ( refcolumn ) ] [ MATCH
-FULL | MATCH PARTIAL |
MATCH SIMPLE ][ ON
DELETE action ] [ ON
UPDATE action ]
You can run REFERENCES reftable[(refcolumn)] [MATCH FULL |MATCH PARTIAL | MATCH SIMPLE] [ON DELETE action] [ON UPDATE action] to create foreign key constraints for tables.
Loading a moduleCREATE EXTENSIONLoads a new module (such as DBLINK) to the current database.
DROP EXTENSIONDeletes the loaded module.
Aggregate functionsCREATE AGGREGATEDefines a new aggregation function.
ALTER AGGREGATEModifies the definition of an aggregate function.
DROP AGGREGATEDrops an existing function.
+--- +title: Extended Syntax +summary: Extended Syntax +author: Zhang Cuiping +date: 2021-05-18 +--- + +# Extended Syntax + +MogDB provides extended syntax . + +**Table 1** Extended SQL syntax + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CategoryKeywordsDescription
Creating a tableINHERITS ( parent_table [, … ] )Specifies whether an inherited table is supported.
column_constraint:
REFERENCES reftable
[ ( refcolumn ) ] [ MATCH
+FULL | MATCH PARTIAL |
MATCH SIMPLE ][ ON
DELETE action ] [ ON
UPDATE action ]
You can run REFERENCES reftable[(refcolumn)] [MATCH FULL |MATCH PARTIAL | MATCH SIMPLE] [ON DELETE action] [ON UPDATE action] to create foreign key constraints for tables.
Loading a moduleCREATE EXTENSIONLoads a new module (such as DBLINK) to the current database.
DROP EXTENSIONDeletes the loaded module.
Aggregate functionsCREATE AGGREGATEDefines a new aggregation function.
ALTER AGGREGATEModifies the definition of an aggregate function.
DROP AGGREGATEDrops an existing function.
diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes-introduction.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes-introduction.md index b168203c..2230cedb 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes-introduction.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes-introduction.md @@ -1,16 +1,16 @@ ---- -title: Introduction -summary: Introduction -author: Zhang Cuiping -date: 2021-05-18 ---- - -# Introduction - -Generalized Inverted Index (GIN) is designed for handling cases where the items to be indexed are composite values, and the queries to be handled by the index need to search for element values that appear within the composite items. For example, the items could be documents, and the queries could be searches for documents containing specific words. - -We use the word "item" to refer to a composite value that is to be indexed, and the word "key" to refer to an element value. GIN stores and searches for keys, not item values. - -A GIN index stores a set of (key, posting list) key-value pairs, where a posting list is a set of row IDs in which the key occurs. The same row ID can appear in multiple posting lists, since an item can contain more than one key. Each key value is stored only once, so a GIN index is very compact for cases where the same key appears many times. - -GIN is generalized in the sense that the GIN access method code does not need to know the specific operations that it accelerates. Instead, it uses custom strategies defined for particular data types. The strategy defines how keys are extracted from indexed items and query conditions, and how to determine whether a row that contains some of the key values in a query actually satisfies the query. +--- +title: Introduction +summary: Introduction +author: Zhang Cuiping +date: 2021-05-18 +--- + +# Introduction + +Generalized Inverted Index (GIN) is designed for handling cases where the items to be indexed are composite values, and the queries to be handled by the index need to search for element values that appear within the composite items. For example, the items could be documents, and the queries could be searches for documents containing specific words. + +We use the word "item" to refer to a composite value that is to be indexed, and the word "key" to refer to an element value. GIN stores and searches for keys, not item values. + +A GIN index stores a set of (key, posting list) key-value pairs, where a posting list is a set of row IDs in which the key occurs. The same row ID can appear in multiple posting lists, since an item can contain more than one key. Each key value is stored only once, so a GIN index is very compact for cases where the same key appears many times. + +GIN is generalized in the sense that the GIN access method code does not need to know the specific operations that it accelerates. Instead, it uses custom strategies defined for particular data types. The strategy defines how keys are extracted from indexed items and query conditions, and how to determine whether a row that contains some of the key values in a query actually satisfies the query. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes.md index a98e9e4e..45049b6e 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes.md @@ -1,13 +1,13 @@ ---- -title: GIN Indexes -summary: GIN Indexes -author: zhang cuiping -date: 2023-04-07 ---- - -# GIN Indexes - -- **[Introduction](gin-indexes-introduction.md)** -- **[Scalability](scalability.md)** -- **[Implementation](implementation.md)** +--- +title: GIN Indexes +summary: GIN Indexes +author: zhang cuiping +date: 2023-04-07 +--- + +# GIN Indexes + +- **[Introduction](gin-indexes-introduction.md)** +- **[Scalability](scalability.md)** +- **[Implementation](implementation.md)** - **[GIN Tips and Tricks](gin-tips-and-tricks.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/gin-tips-and-tricks.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/gin-tips-and-tricks.md index 17ed886a..275e4b26 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/gin-tips-and-tricks.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/gin-tips-and-tricks.md @@ -1,26 +1,26 @@ ---- -title: GIN Tips and Tricks -summary: Functions and Operators -author: Zhang Cuiping -date: 2021-05-18 ---- - -# GIN Tips and Tricks - -Create vs. Insert - -Insertion into a GIN index can be slow due to the likelihood of many keys being inserted for each item. So, for bulk insertions into a table it is advisable to drop the GIN index and recreate it after finishing bulk insertion. GUC parameters related to GIN index creation and query performance as follows: - -- maintenance_work_mem - - Build time for a GIN index is very sensitive to the **maintenance_work_mem** setting. - -- work_mem - - During a series of insertions into an existing GIN index that has **FASTUPDATE** enabled, the system will clean up the pending-entry list whenever the list grows larger than **work_mem**. To avoid fluctuations in observed response time, it is desirable to have pending-list cleanup occur in the background (that is, via autovacuum). Foreground cleanup operations can be avoided by increasing **work_mem** or making **autovacuum** more aggressive. However, increasing **work_mem** means that if a foreground cleanup occurs, it will take even longer. - -- gin_fuzzy_search_limit - - The primary goal of developing GIN indexes was to support highly scalable full-text search in MogDB. A full-text search often returns a very large set of results. This often happens when the query contains very frequent words, so that the large result set is not even useful. Since reading many tuples from the disk and sorting them could take a lot of time, this is unacceptable for production. - - To facilitate controlled execution of such queries, GIN has a configurable soft upper limit on the number of rows returned: the **gin_fuzzy_search_limit** configuration parameter. The default value **0** indicates that there is no limit on the returned set. If a non-zero limit is set, then the returned set is a subset of the whole result set, chosen at random. **Soft upper limit** means that the actual number of returned results may deviate from the specified limit, depending on the quality of the query and the system random number generator. +--- +title: GIN Tips and Tricks +summary: Functions and Operators +author: Zhang Cuiping +date: 2021-05-18 +--- + +# GIN Tips and Tricks + +Create vs. Insert + +Insertion into a GIN index can be slow due to the likelihood of many keys being inserted for each item. So, for bulk insertions into a table it is advisable to drop the GIN index and recreate it after finishing bulk insertion. GUC parameters related to GIN index creation and query performance as follows: + +- maintenance_work_mem + + Build time for a GIN index is very sensitive to the **maintenance_work_mem** setting. + +- work_mem + + During a series of insertions into an existing GIN index that has **FASTUPDATE** enabled, the system will clean up the pending-entry list whenever the list grows larger than **work_mem**. To avoid fluctuations in observed response time, it is desirable to have pending-list cleanup occur in the background (that is, via autovacuum). Foreground cleanup operations can be avoided by increasing **work_mem** or making **autovacuum** more aggressive. However, increasing **work_mem** means that if a foreground cleanup occurs, it will take even longer. + +- gin_fuzzy_search_limit + + The primary goal of developing GIN indexes was to support highly scalable full-text search in MogDB. A full-text search often returns a very large set of results. This often happens when the query contains very frequent words, so that the large result set is not even useful. Since reading many tuples from the disk and sorting them could take a lot of time, this is unacceptable for production. + + To facilitate controlled execution of such queries, GIN has a configurable soft upper limit on the number of rows returned: the **gin_fuzzy_search_limit** configuration parameter. The default value **0** indicates that there is no limit on the returned set. If a non-zero limit is set, then the returned set is a subset of the whole result set, chosen at random. **Soft upper limit** means that the actual number of returned results may deviate from the specified limit, depending on the quality of the query and the system random number generator. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/implementation.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/implementation.md index bdc51bb2..8f209f6d 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/implementation.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/gin-indexes/implementation.md @@ -1,24 +1,24 @@ ---- -title: Implementation -summary: Implementation -author: Zhang Cuiping -date: 2021-05-18 ---- - -# Implementation - -Internally, a GIN index contains a B-tree index constructed over keys, where each key is an element of one or more indexed items (a member of an array, for example) and where each tuple in a page contains either a pointer to a B-tree of heap pointers (a "posting tree"), or a simple list of heap pointers (a "posting list") when the list is small enough to fit into a single index tuple along with the key value. - -Multi-column GIN indexes are implemented by building a single B-tree over composite values (column number, key value). The key values for different columns can be of different types. - -## GIN Fast Update Technique - -Updating a GIN index tends to be slow because of the intrinsic nature of inverted indexes: inserting or updating one heap row can cause many inserts into the index. After the table is vacuumed or if the pending list becomes larger than **work_mem**, the entries are moved to the main GIN data structure using the same bulk insert techniques used during initial index creation. This greatly increases the GIN index update speed, even counting the additional vacuum overhead. Moreover the overhead work can be done by a background process instead of in foreground query processing. - -The main disadvantage of this approach is that searches must scan the list of pending entries in addition to searching the regular index, and so a large list of pending entries will slow searches significantly. Another disadvantage is that, while most updates are fast, an update that causes the pending list to become "too large" will incur an immediate cleanup cycle and be much slower than other updates. Proper use of autovacuum can minimize both of these problems. - -If consistent response time (of entity cleanup and of update) is more important than update speed, use of pending entries can be disabled by turning off the **fastupdate** storage parameter for a GIN index. For details, see [CREATE INDEX](../../../../reference-guide/sql-syntax/CREATE-INDEX.md). - -## Partial Match Algorithm - -GIN can support "partial match" queries, in which the query does not determine an exact match for one or more keys, but the possible matches fall within a narrow range of key values (within the key sorting order determined by the **compare** support method). The **extractQuery** method, instead of returning a key value to be matched exactly, returns a key value that is the lower bound of the range to be searched, and sets the **pmatch** flag true. The key range is then scanned using the **comparePartial** method. **comparePartial** must return zero for a matching index key, less than zero for a non-match that is still within the range to be searched, or greater than zero if the index key is past the range that could match. +--- +title: Implementation +summary: Implementation +author: Zhang Cuiping +date: 2021-05-18 +--- + +# Implementation + +Internally, a GIN index contains a B-tree index constructed over keys, where each key is an element of one or more indexed items (a member of an array, for example) and where each tuple in a page contains either a pointer to a B-tree of heap pointers (a "posting tree"), or a simple list of heap pointers (a "posting list") when the list is small enough to fit into a single index tuple along with the key value. + +Multi-column GIN indexes are implemented by building a single B-tree over composite values (column number, key value). The key values for different columns can be of different types. + +## GIN Fast Update Technique + +Updating a GIN index tends to be slow because of the intrinsic nature of inverted indexes: inserting or updating one heap row can cause many inserts into the index. After the table is vacuumed or if the pending list becomes larger than **work_mem**, the entries are moved to the main GIN data structure using the same bulk insert techniques used during initial index creation. This greatly increases the GIN index update speed, even counting the additional vacuum overhead. Moreover the overhead work can be done by a background process instead of in foreground query processing. + +The main disadvantage of this approach is that searches must scan the list of pending entries in addition to searching the regular index, and so a large list of pending entries will slow searches significantly. Another disadvantage is that, while most updates are fast, an update that causes the pending list to become "too large" will incur an immediate cleanup cycle and be much slower than other updates. Proper use of autovacuum can minimize both of these problems. + +If consistent response time (of entity cleanup and of update) is more important than update speed, use of pending entries can be disabled by turning off the **fastupdate** storage parameter for a GIN index. For details, see [CREATE INDEX](../../../../reference-guide/sql-syntax/CREATE-INDEX.md). + +## Partial Match Algorithm + +GIN can support "partial match" queries, in which the query does not determine an exact match for one or more keys, but the possible matches fall within a narrow range of key values (within the key sorting order determined by the **compare** support method). The **extractQuery** method, instead of returning a key value to be matched exactly, returns a key value that is the lower bound of the range to be searched, and sets the **pmatch** flag true. The key range is then scanned using the **comparePartial** method. **comparePartial** must return zero for a matching index key, less than zero for a non-match that is still within the range to be searched, or greater than zero if the index key is past the range that could match. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/appendix.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/sql-reference-appendix.md similarity index 95% rename from product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/appendix.md rename to product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/sql-reference-appendix.md index c4b1d044..503ebfa7 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/appendix.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/appendix/sql-reference-appendix.md @@ -1,12 +1,12 @@ ---- -title: 附录 -summary: 附录 -author: zhang cuiping -date: 2023-04-07 ---- - -# Appendix - -- **[GIN Indexes](./gin-indexes/gin-indexes.md)** -- **[Extended Functions](extended-functions.md)** +--- +title: 附录 +summary: 附录 +author: zhang cuiping +date: 2023-04-07 +--- + +# Appendix + +- **[GIN Indexes](./gin-indexes/gin-indexes.md)** +- **[Extended Functions](extended-functions.md)** - **[Extended Syntax](extended-syntax.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/constant-and-macro.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/constant-and-macro.md index 8447eb60..e3564279 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/constant-and-macro.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/constant-and-macro.md @@ -1,24 +1,24 @@ ---- -title: Constant and Macro -summary: Constant and Macro -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Constant and Macro - -[Table 1](#constantandmacro) lists the constants and macros that can be used in openGauss. - -**Table 1** Constant and macro - -| Parameter | Description | Example | -| :------------- | :----------- | :--------------------------------| -| CURRENT_CATALOG | Specifies the current database. | `postgres=# SELECT CURRENT_CATALOG; current_database ------------------- postgres (1 row)` | -| CURRENT_ROLE | Specifies the current user. | `postgres=# SELECT CURRENT_ROLE; current_user -------------------- omm (1 row)` | -| CURRENT_SCHEMA | Specifies the current database mode. | `postgres=# SELECT CURRENT_SCHEMA; current_schema ------------------- public (1 row)` | -| CURRENT_USER | Specifies the current user. | `postgres=# SELECT CURRENT_USER; current_user -------------------- omm (1 row)` | -| LOCALTIMESTAMP | Specifies the current session time (without time zone). | `postgres=# SELECT LOCALTIMESTAMP; timestamp ------------------- 2015-10-10 15:37:30.968538 (1 row)` | -| NULL | This parameter is left blank. | N/A | -| SESSION_USER | Specifies the current system user. | `postgres=# SELECT SESSION_USER; session_user -------------------- omm (1 row)` | -| SYSDATE | Specifies the current system date. | `postgres=# SELECT SYSDATE; sysdate ------------------- 2015-10-10 15:48:53 (1 row)` | -| USER | Specifies the current user, also called **CURRENT_USER**. | `postgres=# SELECT USER; current_user -------------------- omm (1 row)` | +--- +title: Constant and Macro +summary: Constant and Macro +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Constant and Macro + +[Table 1](#constantandmacro) lists the constants and macros that can be used in openGauss. + +**Table 1** Constant and macro + +| Parameter | Description | Example | +| :------------- | :----------- | :--------------------------------| +| CURRENT_CATALOG | Specifies the current database. | `postgres=# SELECT CURRENT_CATALOG; current_database ------------------- postgres (1 row)` | +| CURRENT_ROLE | Specifies the current user. | `postgres=# SELECT CURRENT_ROLE; current_user -------------------- omm (1 row)` | +| CURRENT_SCHEMA | Specifies the current database mode. | `postgres=# SELECT CURRENT_SCHEMA; current_schema ------------------- public (1 row)` | +| CURRENT_USER | Specifies the current user. | `postgres=# SELECT CURRENT_USER; current_user -------------------- omm (1 row)` | +| LOCALTIMESTAMP | Specifies the current session time (without time zone). | `postgres=# SELECT LOCALTIMESTAMP; timestamp ------------------- 2015-10-10 15:37:30.968538 (1 row)` | +| NULL | This parameter is left blank. | N/A | +| SESSION_USER | Specifies the current system user. | `postgres=# SELECT SESSION_USER; session_user -------------------- omm (1 row)` | +| SYSDATE | Specifies the current system date. | `postgres=# SELECT SYSDATE; sysdate ------------------- 2015-10-10 15:48:53 (1 row)` | +| USER | Specifies the current user, also called **CURRENT_USER**. | `postgres=# SELECT USER; current_user -------------------- omm (1 row)` | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/controlling-transactions.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/controlling-transactions.md deleted file mode 100644 index d94cbc17..00000000 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/controlling-transactions.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Controlling Transactions -summary: Controlling Transactions -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Controlling Transactions - -A transaction is a user-defined sequence of database operations, which form an integral unit of work. - -## Starting a Transaction - -MogDB starts a transaction using START TRANSACTION and **BEGIN**. For details, see [START TRANSACTION](../../reference-guide/sql-syntax/START-TRANSACTION.md) and [BEGIN](../../reference-guide/sql-syntax/BEGIN.md). - -## Setting a Transaction - -MogDB sets a transaction using **SET TRANSACTION** or **SET LOCAL TRANSACTION**. For details, see [SET TRANSACTION](../../reference-guide/sql-syntax/SET-TRANSACTION.md). - -## Committing a Transaction - -MogDB commits all operations of a transaction using **COMMIT** or **END**. For details, see [COMMIT | END](../../reference-guide/sql-syntax/COMMIT-END.md). - -## Rolling Back a Transaction - -If a fault occurs during a transaction and the transaction cannot proceed, the system performs rollback to cancel all the completed database operations related to the transaction. See [ROLLBACK](../../reference-guide/sql-syntax/ROLLBACK.md). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> If an execution request (not in a transaction block) received in the database contains multiple statements, the request is packed into a transaction. If one of the statements fails, the entire request will be rolled back. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/dcl-syntax-overview.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/dcl-syntax-overview.md index 74e3fb14..d05282e6 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/dcl-syntax-overview.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/dcl-syntax-overview.md @@ -1,50 +1,50 @@ ---- -title: DCL Syntax Overview -summary: DCL Syntax Overview -author: Zhang Cuiping -date: 2021-05-17 ---- - -# DCL Syntax Overview - -Data control language (DCL) is used to create users and roles and set or modify database users or role rights. - -## Defining a Role - -A role is used to manage permissions. For database security, management and operation permissions can be granted to different roles. For details about related SQL statements, see [Table 1](#sqlstatement1). - -**Table 1** SQL statements for defining a role - -| Description | SQL Statement | -| :----------------------- | :----------------------------------------------------------- | -| Creating a role | [CREATE ROLE](../../reference-guide/sql-syntax/CREATE-ROLE.md) | -| Altering role attributes | [ALTER ROLE](../../reference-guide/sql-syntax/ALTER-ROLE.md) | -| Dropping a role | [DROP ROLE](../../reference-guide/sql-syntax/DROP-ROLE.md) | - -## Defining a User - -A user is used to log in to a database. Different permissions can be granted to users for managing data accesses and operations of the users. For details about related SQL statements, see [Table 2](#sqlstatement2). - -**Table 2** SQL statements for defining a user - -| Description | SQL Statement | -| :----------------------- | :----------------------------------------------------------- | -| Creating a User | [CREATE USER](../../reference-guide/sql-syntax/CREATE-USER.md) | -| Altering user attributes | [ALTER USER](../../reference-guide/sql-syntax/ALTER-USER.md) | -| Dropping a user | [DROP USER](../../reference-guide/sql-syntax/DROP-USER.md) | - -## Granting Rights - -MogDB provides a statement for granting rights to data objects and roles. For details, see [GRANT](../../reference-guide/sql-syntax/GRANT.md). - -## Revoking Rights - -MogDB provides a statement for revoking rights. For details, see [REVOKE](../../reference-guide/sql-syntax/REVOKE.md). - -## Setting Default Rights - -MogDB allows users to set rights for objects that will be created. For details, see [ALTER DEFAULT PRIVILEGES](../../reference-guide/sql-syntax/ALTER-DEFAULT-PRIVILEGES.md). - -## Shutting Down The Current Node - -MogDB allows users to run the **shutdown** command to shut down the current database node. For details, see [SHUTDOWN](../../reference-guide/sql-syntax/SHUTDOWN.md). +--- +title: DCL Syntax Overview +summary: DCL Syntax Overview +author: Zhang Cuiping +date: 2021-05-17 +--- + +# DCL Syntax Overview + +Data control language (DCL) is used to create users and roles and set or modify database users or role rights. + +## Defining a Role + +A role is used to manage permissions. For database security, management and operation permissions can be granted to different roles. For details about related SQL statements, see [Table 1](#sqlstatement1). + +**Table 1** SQL statements for defining a role + +| Description | SQL Statement | +| :----------------------- | :----------------------------------------------------------- | +| Creating a role | [CREATE ROLE](../../reference-guide/sql-syntax/CREATE-ROLE.md) | +| Altering role attributes | [ALTER ROLE](../../reference-guide/sql-syntax/ALTER-ROLE.md) | +| Dropping a role | [DROP ROLE](../../reference-guide/sql-syntax/DROP-ROLE.md) | + +## Defining a User + +A user is used to log in to a database. Different permissions can be granted to users for managing data accesses and operations of the users. For details about related SQL statements, see [Table 2](#sqlstatement2). + +**Table 2** SQL statements for defining a user + +| Description | SQL Statement | +| :----------------------- | :----------------------------------------------------------- | +| Creating a User | [CREATE USER](../../reference-guide/sql-syntax/CREATE-USER.md) | +| Altering user attributes | [ALTER USER](../../reference-guide/sql-syntax/ALTER-USER.md) | +| Dropping a user | [DROP USER](../../reference-guide/sql-syntax/DROP-USER.md) | + +## Granting Rights + +MogDB provides a statement for granting rights to data objects and roles. For details, see [GRANT](../../reference-guide/sql-syntax/GRANT.md). + +## Revoking Rights + +MogDB provides a statement for revoking rights. For details, see [REVOKE](../../reference-guide/sql-syntax/REVOKE.md). + +## Setting Default Rights + +MogDB allows users to set rights for objects that will be created. For details, see [ALTER DEFAULT PRIVILEGES](../../reference-guide/sql-syntax/ALTER-DEFAULT-PRIVILEGES.md). + +## Shutting Down The Current Node + +MogDB allows users to run the **shutdown** command to shut down the current database node. For details, see [SHUTDOWN](../../reference-guide/sql-syntax/SHUTDOWN.md). diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/ddl-syntax-overview.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/ddl-syntax-overview.md index f9ed2f73..530bbe20 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/ddl-syntax-overview.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/ddl-syntax-overview.md @@ -1,225 +1,225 @@ ---- -title: DDL Syntax Overview -summary: DDL Syntax Overview -author: Zhang Cuiping -date: 2021-05-17 ---- - -# DDL Syntax Overview - -Data definition language (DDL) is used to define or modify an object in a database, such as a table, an index, or a view. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** openGauss does not support DDL when the primary node of the database is incomplete. For example, if the primary node of the database in openGauss is faulty, creating a database or a table will fail. - -## Defining a CMK - -CMKs are used to encrypt CEKs for the encrypted database feature. CMK definition includes creating and deleting a CMK. For details about related SQL statements, see Table 1. - -**Table 1** SQL statements for defining a CMK - -| Function | SQL Statement | -| :------------- | :----------------------------------------------------------- | -| Creating a CMK | [CREATE CLIENT MASTER KEY](../../reference-guide/sql-syntax/CREATE-CLIENT-MASTER-KEY.md) | -| Deleting a CMK | [DROP CLIENT MASTER KEY](../../reference-guide/sql-syntax/DROP-CLIENT-MASTER-KEY.md) | - -## Defining a CEK - -CEKs are used to encrypt data for the encrypted database feature. CEK definition includes creating and deleting a CEK. For details about related SQL statements, see Table 2. - -**Table 2** SQL statements for defining a CEK - -| Function | SQL Statement | -| :------------- | :----------------------------------------------------------- | -| Creating a CEK | [CREATE COLUMN ENCRYPTION KEY](../../reference-guide/sql-syntax/CREATE-COLUMN-ENCRYPTION-KEY.md) | -| Deleting a CEK | [DROP COLUMN ENCRYPTION KEY](../../reference-guide/sql-syntax/DROP-COLUMN-ENCRYPTION-KEY.md) | - -## Defining a Database - -A database is the warehouse for organizing, storing, and managing data. Defining a database includes creating a database, altering the database attributes, and deleting the database. For details about related SQL statements, see Table 3. - -**Table 3** SQL statements for defining a database - -| Function | SQL Statement | -| :--------------------------- | :----------------------------------------------------------- | -| Creating a database | [CREATE DATABASE](../../reference-guide/sql-syntax/CREATE-DATABASE.md) | -| Altering database attributes | [ALTER DATABASE](../../reference-guide/sql-syntax/ALTER-DATABASE.md) | -| Deleting a database | [DROP DATABASE](../../reference-guide/sql-syntax/DROP-DATABASE.md) | - -## Defining a schema - -A schema is the set of a group of database objects and is used to control the access to the database objects. For details about related SQL statements, see Table 4. - -**Table 4** SQL statements for defining a schema - -| Function | SQL Statement | -| :------------------------- | :----------------------------------------------------------- | -| Creating a schema | [CREATE SCHEMA](../../reference-guide/sql-syntax/CREATE-SCHEMA.md) | -| Altering schema attributes | [ALTER SCHEMA](../../reference-guide/sql-syntax/ALTER-SCHEMA.md) | -| Deleting a schema | [DROP SCHEMA](../../reference-guide/sql-syntax/DROP-SCHEMA.md) | - -## Defining a Tablespace - -A tablespace is used to manage data objects and corresponds to a catalog on a disk. For details about related SQL statements, see Table 5. - -**Table 5** SQL statements for defining a tablespace - -| Function | SQL Statement | -| :----------------------------- | :----------------------------------------------------------- | -| Creating a tablespace | [CREATE TABLESPACE](../../reference-guide/sql-syntax/CREATE-TABLESPACE.md) | -| Altering tablespace attributes | [ALTER TABLESPACE](../../reference-guide/sql-syntax/ALTER-TABLESPACE.md) | -| Deleting a tablespace | [DROP TABLESPACE](../../reference-guide/sql-syntax/DROP-TABLESPACE.md) | - -## Defining a Table - -A table is a special data structure in a database and is used to store data objects and relationship between data objects. For details about related SQL statements, see Table 6. - -**Table 6** SQL statements for defining a table - -| Function | SQL Statement | -| :------------------------ | :----------------------------------------------------------- | -| Creating a table | [CREATE TABLE](../../reference-guide/sql-syntax/CREATE-TABLE.md) | -| Altering table attributes | [ALTER TABLE](../../reference-guide/sql-syntax/ALTER-TABLE.md) | -| Deleting a table | [DROP TABLE](../../reference-guide/sql-syntax/DROP-TABLE.md) | - -## Defining a Partitioned Table - -A partitioned table is a logical table used to improve query performance and does not store data (data is stored in common tables). For details about related SQL statements, see Table 7. - -**Table 7** SQL statements for defining a partitioned table - -| Function | SQL Statement | -| :------------------------------------ | :----------------------------------------------------------- | -| Creating a partitioned table | [CREATE TABLE PARTITION](../../reference-guide/sql-syntax/CREATE-TABLE-PARTITION.md) | -| Creating a partition | [ALTER TABLE PARTITION](../../reference-guide/sql-syntax/ALTER-TABLE-PARTITION.md) | -| Altering partitioned table attributes | [ALTER TABLE PARTITION](../../reference-guide/sql-syntax/ALTER-TABLE-PARTITION.md) | -| Deleting a partition | [ALTER TABLE PARTITION](../../reference-guide/sql-syntax/ALTER-TABLE-PARTITION.md) | -| Deleting a partitioned table | [DROP TABLE](../../reference-guide/sql-syntax/DROP-TABLE.md) | - -## Defining an Index - -An index indicates the sequence of values in one or more columns in a database table. It is a data structure that improves the speed of data access to specific information in a database table. For details about related SQL statements, see Table 8. - -**Table 8** SQL statements for defining an index - -| Function | SQL Statement | -| :------------------------ | :----------------------------------------------------------- | -| Creating an index | [CREATE INDEX](../../reference-guide/sql-syntax/CREATE-INDEX.md) | -| Altering index attributes | [ALTER INDEX](../../reference-guide/sql-syntax/ALTER-INDEX.md) | -| Deleting an index | [DROP INDEX](../../reference-guide/sql-syntax/DROP-INDEX.md) | -| Rebuilding an index | [REINDEX](../../reference-guide/sql-syntax/REINDEX.md) | - -## Defining a Stored Procedure - -A stored procedure is a set of SQL statements for achieving specific functions and is stored in the database after compiling. Users can specify a name and provide parameters (if necessary) to execute the stored procedure. For details about related SQL statements, see Table 9. - -**Table 9** SQL statements for defining a stored procedure - -| Function | SQL Statement | -| :-------------------------- | :----------------------------------------------------------- | -| Creating a stored procedure | [CREATE PROCEDURE](../../reference-guide/sql-syntax/CREATE-PROCEDURE.md) | -| Deleting a stored procedure | [DROP PROCEDURE](../../reference-guide/sql-syntax/DROP-PROCEDURE.md) | - -## Defining a Function - -In openGauss, a function is similar to a stored procedure, which is a set of SQL statements. The function and stored procedure are used the same. For details about related SQL statements, see Table 10. - -**Table 10** SQL statements for defining a function - -| Function | SQL Statement | -| :--------------------------- | :----------------------------------------------------------- | -| Creating a function | [CREATE FUNCTION](../../reference-guide/sql-syntax/CREATE-FUNCTION.md) | -| Altering function attributes | [ALTER FUNCTION](../../reference-guide/sql-syntax/ALTER-FUNCTION.md) | -| Deleting a function | [DROP FUNCTION](../../reference-guide/sql-syntax/DROP-FUNCTION.md) | - -## Defining a Package - -A package consists of the package specification and package body. It is used to manage stored procedures and functions by class, which is similar to classes in languages such as Java and C++. - -**Table 11** SQL statements for defining a package - -| Function | SQL Statement | -| :----------------- | :----------------------------------------------------------- | -| Creating a package | [CREATE PACKAGE](../../reference-guide/sql-syntax/CREATE-PACKAGE.md) | -| Deleting a package | [DROP PACKAGE](../../reference-guide/sql-syntax/DROP-PACKAGE.md) | - -## Defining a View - -A view is a virtual table exported from one or more basic tables. It is used to control data accesses of users. Table 12 lists the related SQL statements. - -**Table 12** SQL statements for defining a view - -| Function | SQL Statement | -| :-------------- | :----------------------------------------------------------- | -| Creating a view | [CREATE VIEW](../../reference-guide/sql-syntax/CREATE-VIEW.md) | -| Deleting a view | [DROP VIEW](../../reference-guide/sql-syntax/DROP-VIEW.md) | - -## Defining a Cursor - -To process SQL statements, the stored procedure process assigns a memory segment to store context association. Cursors are handles or pointers to context regions. With a cursor, the stored procedure can control alterations in context areas. For details, see Table 13. - -**Table 13** SQL statements for defining a cursor - -| Function | SQL Statement | -| :-------------------------- | :--------------------------------------------------- | -| Creating a cursor | [CURSOR](../../reference-guide/sql-syntax/CURSOR.md) | -| Moving a cursor | [MOVE](../../reference-guide/sql-syntax/MOVE.md) | -| Fetching data from a cursor | [FETCH](../../reference-guide/sql-syntax/FETCH.md) | -| Closing a cursor | [CLOSE](../../reference-guide/sql-syntax/CLOSE.md) | - -## Defining an Aggregate Function - -**Table 14** SQL statements for defining an aggregate function - -| Function | SQL Statement | -| :------------------------------ | :----------------------------------------------------------- | -| Creating an aggregate function | [CREATE AGGREGATE](../../reference-guide/sql-syntax/CREATE-AGGREGATE.md) | -| Modifying an aggregate function | [ALTER AGGREGATE](../../reference-guide/sql-syntax/ALTER-AGGREGATE.md) | -| Deleting an aggregate function | [DROP AGGREGATE](../../reference-guide/sql-syntax/DROP-AGGREGATE.md) | - -## Defining Data Type Conversion - -**Table 15** SQL statements for defining a data type - -| Function | SQL Statement | -| :----------------------------------------- | :----------------------------------------------------------- | -| Creating user-defined data type conversion | [CREATE CAST](../../reference-guide/sql-syntax/CREATE-CAST.md) | -| Deleting user-defined data type conversion | [DROP CAST](../../reference-guide/sql-syntax/DROP-CAST.md) | - -## Defining a Plug-in Extension - -**Table 16** SQL statements for defining a plug-in extension - -| Function | SQL Statement | -| :---------------------------- | :----------------------------------------------------------- | -| Creating a plug-in extension | [CREATE EXTENSION](../../reference-guide/sql-syntax/CREATE-EXTENSION.md) | -| Modifying a plug-in extension | [ALTER EXTENSION](../../reference-guide/sql-syntax/ALTER-EXTENSION.md) | -| Deleting a plug-in extension | [DROP EXTENSION](../../reference-guide/sql-syntax/DROP-EXTENSION.md) | - -## Defining an Operator - -**Table 17** SQL statements for defining an operator - -| Function | SQL Statement | -| :------------------- | :----------------------------------------------------------- | -| Creating an operator | [CREATE OPERATOR](../../reference-guide/sql-syntax/CREATE-OPERATOR.md) | -| Deleting an operator | [DROP OPERATOR](../../reference-guide/sql-syntax/DROP-OPERATOR.md) | - -## Defining a Procedural Language - -**Table 18** SQL statements for defining a procedural language - -| Function | SQL Statement | -| :----------------------------- | :----------------------------------------------------------- | -| Creating a procedural language | [CREATE LANGUAGE](../../reference-guide/sql-syntax/CREATE-LANGUAGE.md) | -| Changing a procedural language | [ALTER LANGUAGE](../../reference-guide/sql-syntax/ALTER-LANGUAGE.md) | -| Deleting a procedural language | [DROP LANGUAGE](../../reference-guide/sql-syntax/DROP-LANGUAGE.md) | - -## Defining a Data Type - -**Table 19** SQL statements for defining a data type - -| Function | SQL Statement | -| :-------------------- | :----------------------------------------------------------- | -| Creating a data type | [CREATE TYPE](../../reference-guide/sql-syntax/CREATE-TYPE.md) | -| Modifying a data type | [ALTER TYPE](../../reference-guide/sql-syntax/ALTER-TYPE.md) | -| Deleting a data type | [DROP TYPE](../../reference-guide/sql-syntax/DROP-TYPE.md) | +--- +title: DDL Syntax Overview +summary: DDL Syntax Overview +author: Zhang Cuiping +date: 2021-05-17 +--- + +# DDL Syntax Overview + +Data definition language (DDL) is used to define or modify an object in a database, such as a table, an index, or a view. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** openGauss does not support DDL when the primary node of the database is incomplete. For example, if the primary node of the database in openGauss is faulty, creating a database or a table will fail. + +## Defining a CMK + +CMKs are used to encrypt CEKs for the encrypted database feature. CMK definition includes creating and deleting a CMK. For details about related SQL statements, see Table 1. + +**Table 1** SQL statements for defining a CMK + +| Function | SQL Statement | +| :------------- | :----------------------------------------------------------- | +| Creating a CMK | [CREATE CLIENT MASTER KEY](../../reference-guide/sql-syntax/CREATE-CLIENT-MASTER-KEY.md) | +| Deleting a CMK | [DROP CLIENT MASTER KEY](../../reference-guide/sql-syntax/DROP-CLIENT-MASTER-KEY.md) | + +## Defining a CEK + +CEKs are used to encrypt data for the encrypted database feature. CEK definition includes creating and deleting a CEK. For details about related SQL statements, see Table 2. + +**Table 2** SQL statements for defining a CEK + +| Function | SQL Statement | +| :------------- | :----------------------------------------------------------- | +| Creating a CEK | [CREATE COLUMN ENCRYPTION KEY](../../reference-guide/sql-syntax/CREATE-COLUMN-ENCRYPTION-KEY.md) | +| Deleting a CEK | [DROP COLUMN ENCRYPTION KEY](../../reference-guide/sql-syntax/DROP-COLUMN-ENCRYPTION-KEY.md) | + +## Defining a Database + +A database is the warehouse for organizing, storing, and managing data. Defining a database includes creating a database, altering the database attributes, and deleting the database. For details about related SQL statements, see Table 3. + +**Table 3** SQL statements for defining a database + +| Function | SQL Statement | +| :--------------------------- | :----------------------------------------------------------- | +| Creating a database | [CREATE DATABASE](../../reference-guide/sql-syntax/CREATE-DATABASE.md) | +| Altering database attributes | [ALTER DATABASE](../../reference-guide/sql-syntax/ALTER-DATABASE.md) | +| Deleting a database | [DROP DATABASE](../../reference-guide/sql-syntax/DROP-DATABASE.md) | + +## Defining a schema + +A schema is the set of a group of database objects and is used to control the access to the database objects. For details about related SQL statements, see Table 4. + +**Table 4** SQL statements for defining a schema + +| Function | SQL Statement | +| :------------------------- | :----------------------------------------------------------- | +| Creating a schema | [CREATE SCHEMA](../../reference-guide/sql-syntax/CREATE-SCHEMA.md) | +| Altering schema attributes | [ALTER SCHEMA](../../reference-guide/sql-syntax/ALTER-SCHEMA.md) | +| Deleting a schema | [DROP SCHEMA](../../reference-guide/sql-syntax/DROP-SCHEMA.md) | + +## Defining a Tablespace + +A tablespace is used to manage data objects and corresponds to a catalog on a disk. For details about related SQL statements, see Table 5. + +**Table 5** SQL statements for defining a tablespace + +| Function | SQL Statement | +| :----------------------------- | :----------------------------------------------------------- | +| Creating a tablespace | [CREATE TABLESPACE](../../reference-guide/sql-syntax/CREATE-TABLESPACE.md) | +| Altering tablespace attributes | [ALTER TABLESPACE](../../reference-guide/sql-syntax/ALTER-TABLESPACE.md) | +| Deleting a tablespace | [DROP TABLESPACE](../../reference-guide/sql-syntax/DROP-TABLESPACE.md) | + +## Defining a Table + +A table is a special data structure in a database and is used to store data objects and relationship between data objects. For details about related SQL statements, see Table 6. + +**Table 6** SQL statements for defining a table + +| Function | SQL Statement | +| :------------------------ | :----------------------------------------------------------- | +| Creating a table | [CREATE TABLE](../../reference-guide/sql-syntax/CREATE-TABLE.md) | +| Altering table attributes | [ALTER TABLE](../../reference-guide/sql-syntax/ALTER-TABLE.md) | +| Deleting a table | [DROP TABLE](../../reference-guide/sql-syntax/DROP-TABLE.md) | + +## Defining a Partitioned Table + +A partitioned table is a logical table used to improve query performance and does not store data (data is stored in common tables). For details about related SQL statements, see Table 7. + +**Table 7** SQL statements for defining a partitioned table + +| Function | SQL Statement | +| :------------------------------------ | :----------------------------------------------------------- | +| Creating a partitioned table | [CREATE TABLE PARTITION](../../reference-guide/sql-syntax/CREATE-TABLE-PARTITION.md) | +| Creating a partition | [ALTER TABLE PARTITION](../../reference-guide/sql-syntax/ALTER-TABLE-PARTITION.md) | +| Altering partitioned table attributes | [ALTER TABLE PARTITION](../../reference-guide/sql-syntax/ALTER-TABLE-PARTITION.md) | +| Deleting a partition | [ALTER TABLE PARTITION](../../reference-guide/sql-syntax/ALTER-TABLE-PARTITION.md) | +| Deleting a partitioned table | [DROP TABLE](../../reference-guide/sql-syntax/DROP-TABLE.md) | + +## Defining an Index + +An index indicates the sequence of values in one or more columns in a database table. It is a data structure that improves the speed of data access to specific information in a database table. For details about related SQL statements, see Table 8. + +**Table 8** SQL statements for defining an index + +| Function | SQL Statement | +| :------------------------ | :----------------------------------------------------------- | +| Creating an index | [CREATE INDEX](../../reference-guide/sql-syntax/CREATE-INDEX.md) | +| Altering index attributes | [ALTER INDEX](../../reference-guide/sql-syntax/ALTER-INDEX.md) | +| Deleting an index | [DROP INDEX](../../reference-guide/sql-syntax/DROP-INDEX.md) | +| Rebuilding an index | [REINDEX](../../reference-guide/sql-syntax/REINDEX.md) | + +## Defining a Stored Procedure + +A stored procedure is a set of SQL statements for achieving specific functions and is stored in the database after compiling. Users can specify a name and provide parameters (if necessary) to execute the stored procedure. For details about related SQL statements, see Table 9. + +**Table 9** SQL statements for defining a stored procedure + +| Function | SQL Statement | +| :-------------------------- | :----------------------------------------------------------- | +| Creating a stored procedure | [CREATE PROCEDURE](../../reference-guide/sql-syntax/CREATE-PROCEDURE.md) | +| Deleting a stored procedure | [DROP PROCEDURE](../../reference-guide/sql-syntax/DROP-PROCEDURE.md) | + +## Defining a Function + +In openGauss, a function is similar to a stored procedure, which is a set of SQL statements. The function and stored procedure are used the same. For details about related SQL statements, see Table 10. + +**Table 10** SQL statements for defining a function + +| Function | SQL Statement | +| :--------------------------- | :----------------------------------------------------------- | +| Creating a function | [CREATE FUNCTION](../../reference-guide/sql-syntax/CREATE-FUNCTION.md) | +| Altering function attributes | [ALTER FUNCTION](../../reference-guide/sql-syntax/ALTER-FUNCTION.md) | +| Deleting a function | [DROP FUNCTION](../../reference-guide/sql-syntax/DROP-FUNCTION.md) | + +## Defining a Package + +A package consists of the package specification and package body. It is used to manage stored procedures and functions by class, which is similar to classes in languages such as Java and C++. + +**Table 11** SQL statements for defining a package + +| Function | SQL Statement | +| :----------------- | :----------------------------------------------------------- | +| Creating a package | [CREATE PACKAGE](../../reference-guide/sql-syntax/CREATE-PACKAGE.md) | +| Deleting a package | [DROP PACKAGE](../../reference-guide/sql-syntax/DROP-PACKAGE.md) | + +## Defining a View + +A view is a virtual table exported from one or more basic tables. It is used to control data accesses of users. Table 12 lists the related SQL statements. + +**Table 12** SQL statements for defining a view + +| Function | SQL Statement | +| :-------------- | :----------------------------------------------------------- | +| Creating a view | [CREATE VIEW](../../reference-guide/sql-syntax/CREATE-VIEW.md) | +| Deleting a view | [DROP VIEW](../../reference-guide/sql-syntax/DROP-VIEW.md) | + +## Defining a Cursor + +To process SQL statements, the stored procedure process assigns a memory segment to store context association. Cursors are handles or pointers to context regions. With a cursor, the stored procedure can control alterations in context areas. For details, see Table 13. + +**Table 13** SQL statements for defining a cursor + +| Function | SQL Statement | +| :-------------------------- | :--------------------------------------------------- | +| Creating a cursor | [CURSOR](../../reference-guide/sql-syntax/CURSOR.md) | +| Moving a cursor | [MOVE](../../reference-guide/sql-syntax/MOVE.md) | +| Fetching data from a cursor | [FETCH](../../reference-guide/sql-syntax/FETCH.md) | +| Closing a cursor | [CLOSE](../../reference-guide/sql-syntax/CLOSE.md) | + +## Defining an Aggregate Function + +**Table 14** SQL statements for defining an aggregate function + +| Function | SQL Statement | +| :------------------------------ | :----------------------------------------------------------- | +| Creating an aggregate function | [CREATE AGGREGATE](../../reference-guide/sql-syntax/CREATE-AGGREGATE.md) | +| Modifying an aggregate function | [ALTER AGGREGATE](../../reference-guide/sql-syntax/ALTER-AGGREGATE.md) | +| Deleting an aggregate function | [DROP AGGREGATE](../../reference-guide/sql-syntax/DROP-AGGREGATE.md) | + +## Defining Data Type Conversion + +**Table 15** SQL statements for defining a data type + +| Function | SQL Statement | +| :----------------------------------------- | :----------------------------------------------------------- | +| Creating user-defined data type conversion | [CREATE CAST](../../reference-guide/sql-syntax/CREATE-CAST.md) | +| Deleting user-defined data type conversion | [DROP CAST](../../reference-guide/sql-syntax/DROP-CAST.md) | + +## Defining a Plug-in Extension + +**Table 16** SQL statements for defining a plug-in extension + +| Function | SQL Statement | +| :---------------------------- | :----------------------------------------------------------- | +| Creating a plug-in extension | [CREATE EXTENSION](../../reference-guide/sql-syntax/CREATE-EXTENSION.md) | +| Modifying a plug-in extension | [ALTER EXTENSION](../../reference-guide/sql-syntax/ALTER-EXTENSION.md) | +| Deleting a plug-in extension | [DROP EXTENSION](../../reference-guide/sql-syntax/DROP-EXTENSION.md) | + +## Defining an Operator + +**Table 17** SQL statements for defining an operator + +| Function | SQL Statement | +| :------------------- | :----------------------------------------------------------- | +| Creating an operator | [CREATE OPERATOR](../../reference-guide/sql-syntax/CREATE-OPERATOR.md) | +| Deleting an operator | [DROP OPERATOR](../../reference-guide/sql-syntax/DROP-OPERATOR.md) | + +## Defining a Procedural Language + +**Table 18** SQL statements for defining a procedural language + +| Function | SQL Statement | +| :----------------------------- | :----------------------------------------------------------- | +| Creating a procedural language | [CREATE LANGUAGE](../../reference-guide/sql-syntax/CREATE-LANGUAGE.md) | +| Changing a procedural language | [ALTER LANGUAGE](../../reference-guide/sql-syntax/ALTER-LANGUAGE.md) | +| Deleting a procedural language | [DROP LANGUAGE](../../reference-guide/sql-syntax/DROP-LANGUAGE.md) | + +## Defining a Data Type + +**Table 19** SQL statements for defining a data type + +| Function | SQL Statement | +| :-------------------- | :----------------------------------------------------------- | +| Creating a data type | [CREATE TYPE](../../reference-guide/sql-syntax/CREATE-TYPE.md) | +| Modifying a data type | [ALTER TYPE](../../reference-guide/sql-syntax/ALTER-TYPE.md) | +| Deleting a data type | [DROP TYPE](../../reference-guide/sql-syntax/DROP-TYPE.md) | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/dml-syntax-overview.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/dml-syntax-overview.md index c81b97a9..3d5be923 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/dml-syntax-overview.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/dml-syntax-overview.md @@ -1,51 +1,51 @@ ---- -title: DML Syntax Overview -summary: DML Syntax Overview -author: Zhang Cuiping -date: 2021-05-17 ---- - -# DML Syntax Overview - -Data manipulation language (DML) is used to perform operations on data in database tables, such as inserting, updating, querying, or deleting data. - -## Inserting Data - -Inserting data refers to adding one or multiple records to a database table. For details, see [INSERT](../../reference-guide/sql-syntax/INSERT.md). - -## Updating Data - -Updating data refers to modifying one or multiple records in a database table. For details, see [UPDATE](../../reference-guide/sql-syntax/UPDATE.md). - -## Querying Data - -The database query statement **SELECT** is used to search required information in a database. For details, see [SELECT](../../reference-guide/sql-syntax/SELECT.md). - -## Deleting Data - -MogDB provides two statements for deleting data from database tables. To delete data meeting specified conditions from a database table, see [DELETE](../../reference-guide/sql-syntax/DELETE.md). To delete all data from a database table, see [TRUNCATE](../../reference-guide/sql-syntax/TRUNCATE.md). - -**TRUNCATE** can quickly delete all data from a database table, which achieves the effect same as that running **DELETE** to delete data without specifying conditions from each table. Deletion efficiency using **TRUNCATE** is faster because **TRUNCATE** does not scan tables. Therefore, **TRUNCATE** is useful in large tables. - -## Copying Data - -MogDB provides a statement for copying data between tables and files. For details, see [COPY](../../reference-guide/sql-syntax/COPY.md). - -## Locking a Table - -MogDB provides multiple lock modes to control concurrent accesses to table data. For details, see [LOCK](../../reference-guide/sql-syntax/LOCK.md). - -## Calling a Function - -MogDB provides three statements for calling functions. These statements are the same in the syntax structure. For details, see [CALL](../../reference-guide/sql-syntax/CALL.md). - -## Session Management - -A session is a connection established between the user and the database. [Table 1](#sqlstatement) lists the related SQL statements. - -**Table 1** SQL statements related to sessions - -| Function | SQL Statement | -| :----------------- | :----------------------------------------------------------- | -| Altering a session | [ALTER SESSION](../../reference-guide/sql-syntax/ALTER-SESSION.md) | -| Killing a session | [ALTER SYSTEM KILL SESSION](../../reference-guide/sql-syntax/ALTER-SYSTEM-KILL-SESSION.md) | +--- +title: DML Syntax Overview +summary: DML Syntax Overview +author: Zhang Cuiping +date: 2021-05-17 +--- + +# DML Syntax Overview + +Data manipulation language (DML) is used to perform operations on data in database tables, such as inserting, updating, querying, or deleting data. + +## Inserting Data + +Inserting data refers to adding one or multiple records to a database table. For details, see [INSERT](../../reference-guide/sql-syntax/INSERT.md). + +## Updating Data + +Updating data refers to modifying one or multiple records in a database table. For details, see [UPDATE](../../reference-guide/sql-syntax/UPDATE.md). + +## Querying Data + +The database query statement **SELECT** is used to search required information in a database. For details, see [SELECT](../../reference-guide/sql-syntax/SELECT.md). + +## Deleting Data + +MogDB provides two statements for deleting data from database tables. To delete data meeting specified conditions from a database table, see [DELETE](../../reference-guide/sql-syntax/DELETE.md). To delete all data from a database table, see [TRUNCATE](../../reference-guide/sql-syntax/TRUNCATE.md). + +**TRUNCATE** can quickly delete all data from a database table, which achieves the effect same as that running **DELETE** to delete data without specifying conditions from each table. Deletion efficiency using **TRUNCATE** is faster because **TRUNCATE** does not scan tables. Therefore, **TRUNCATE** is useful in large tables. + +## Copying Data + +MogDB provides a statement for copying data between tables and files. For details, see [COPY](../../reference-guide/sql-syntax/COPY.md). + +## Locking a Table + +MogDB provides multiple lock modes to control concurrent accesses to table data. For details, see [LOCK](../../reference-guide/sql-syntax/LOCK.md). + +## Calling a Function + +MogDB provides three statements for calling functions. These statements are the same in the syntax structure. For details, see [CALL](../../reference-guide/sql-syntax/CALL.md). + +## Session Management + +A session is a connection established between the user and the database. [Table 1](#sqlstatement) lists the related SQL statements. + +**Table 1** SQL statements related to sessions + +| Function | SQL Statement | +| :----------------- | :----------------------------------------------------------- | +| Altering a session | [ALTER SESSION](../../reference-guide/sql-syntax/ALTER-SESSION.md) | +| Killing a session | [ALTER SYSTEM KILL SESSION](../../reference-guide/sql-syntax/ALTER-SYSTEM-KILL-SESSION.md) | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/expressions/array-expressions.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/expressions/array-expressions.md index ed3809ba..72f60c97 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/expressions/array-expressions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/expressions/array-expressions.md @@ -1,94 +1,94 @@ ---- -title: Array Expressions -summary: Array Expressions -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Array Expressions - -## IN - -_expression **IN** (value [, …]) - -The parentheses on the right contain an expression list. The expression result on the left is compared with the content in the expression list. If the content in the list meets the expression result on the left, the result of **IN** is **true**. If no result meets the requirements, the result of **IN** is **false**. - -Example: - -```sql -mogdb=# SELECT 8000+500 IN (10000, 9000) AS RESULT; - result ----------- - f -(1 row) -``` - -If the expression result is null or the expression list does not meet the expression conditions and at least one empty value is returned for the expression list on the right, the result of **IN** is **null** rather than **false**. This method is consistent with the Boolean rules used when SQL statements return empty values. - -## NOT IN - -_expression **NOT IN** (value [, …])_ - -The parentheses on the right contain an expression list. The expression result on the left is compared with the content in the expression list. If the content in the list does not meet the expression result on the left, the result of **NOT IN** is **true**. If any content meets the expression result, the result of **NOT IN** is **false**. - -Example: - -```sql -mogdb=# SELECT 8000+500 NOT IN (10000, 9000) AS RESULT; - result ----------- - t -(1 row) -``` - -If the query statement result is null or the expression list does not meet the expression conditions and at least one empty value is returned for the expression list on the right, the result of **NOT IN** is **null** rather than **false**. This method is consistent with the Boolean rules used when SQL statements return empty values. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> In all situations, **X NOT IN Y** equals to **NOT(X IN Y)**. - -## ANY/SOME (array) - -_expression operator **ANY** (array expression)_ - -_expression operator **SOME** (array expression) - -```sql -mogdb=# SELECT 8000+500 < SOME (array[10000,9000]) AS RESULT; - result ----------- - t -(1 row) -mogdb=# SELECT 8000+500 < ANY (array[10000,9000]) AS RESULT; - result ----------- - t -(1 row) -``` - -The right-hand side is a parenthesized expression, which must yield an array value. The result of the expression on the left uses operators to compute and compare the results in each row of the array expression. The comparison result must be a Boolean value. - -- If at least one comparison result is true, the result of **ANY** is **true**. -- If no comparison result is true, the result of ANY is false. -- If no comparison result is true and the array expression generates at least one null value, the value of ANY is NULL, rather than false. This method is consistent with the Boolean rules used when SQL statements return empty values. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> **SOME** is a synonym of **ANY**. - -## ALL (array) - -_expression operator **ALL** (array expression)_ - -The right-hand side is a parenthesized expression, which must yield an array value. The result of the expression on the left uses operators to compute and compare the results in each row of the array expression. The comparison result must be a Boolean value. - -- The result of **ALL** is **true** if all comparisons yield **true** (including the case where the array has zero elements). -- The result of **ALL** is **false** if one or multiple comparisons yield **false**. - -- If the array expression yields a null array, the result of **ALL** will be null. If the left-hand expression yields null, the result of **ALL** is ordinarily null (though a non-strict comparison operator could possibly yield a different result). Also, if the right-hand array contains any null elements and no false comparison result is obtained, the result of **ALL** will be null, not true (again, assuming a strict comparison operator). This method is consistent with the Boolean rules used when SQL statements return empty values. - -```sql -mogdb=# SELECT 8000+500 < ALL (array[10000,9000]) AS RESULT; - result ----------- - t -(1 row) -``` +--- +title: Array Expressions +summary: Array Expressions +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Array Expressions + +## IN + +_expression **IN** (value [, …]) + +The parentheses on the right contain an expression list. The expression result on the left is compared with the content in the expression list. If the content in the list meets the expression result on the left, the result of **IN** is **true**. If no result meets the requirements, the result of **IN** is **false**. + +Example: + +```sql +mogdb=# SELECT 8000+500 IN (10000, 9000) AS RESULT; + result +---------- + f +(1 row) +``` + +If the expression result is null or the expression list does not meet the expression conditions and at least one empty value is returned for the expression list on the right, the result of **IN** is **null** rather than **false**. This method is consistent with the Boolean rules used when SQL statements return empty values. + +## NOT IN + +_expression **NOT IN** (value [, …])_ + +The parentheses on the right contain an expression list. The expression result on the left is compared with the content in the expression list. If the content in the list does not meet the expression result on the left, the result of **NOT IN** is **true**. If any content meets the expression result, the result of **NOT IN** is **false**. + +Example: + +```sql +mogdb=# SELECT 8000+500 NOT IN (10000, 9000) AS RESULT; + result +---------- + t +(1 row) +``` + +If the query statement result is null or the expression list does not meet the expression conditions and at least one empty value is returned for the expression list on the right, the result of **NOT IN** is **null** rather than **false**. This method is consistent with the Boolean rules used when SQL statements return empty values. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> In all situations, **X NOT IN Y** equals to **NOT(X IN Y)**. + +## ANY/SOME (array) + +_expression operator **ANY** (array expression)_ + +_expression operator **SOME** (array expression) + +```sql +mogdb=# SELECT 8000+500 < SOME (array[10000,9000]) AS RESULT; + result +---------- + t +(1 row) +mogdb=# SELECT 8000+500 < ANY (array[10000,9000]) AS RESULT; + result +---------- + t +(1 row) +``` + +The right-hand side is a parenthesized expression, which must yield an array value. The result of the expression on the left uses operators to compute and compare the results in each row of the array expression. The comparison result must be a Boolean value. + +- If at least one comparison result is true, the result of **ANY** is **true**. +- If no comparison result is true, the result of ANY is false. +- If no comparison result is true and the array expression generates at least one null value, the value of ANY is NULL, rather than false. This method is consistent with the Boolean rules used when SQL statements return empty values. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> **SOME** is a synonym of **ANY**. + +## ALL (array) + +_expression operator **ALL** (array expression)_ + +The right-hand side is a parenthesized expression, which must yield an array value. The result of the expression on the left uses operators to compute and compare the results in each row of the array expression. The comparison result must be a Boolean value. + +- The result of **ALL** is **true** if all comparisons yield **true** (including the case where the array has zero elements). +- The result of **ALL** is **false** if one or multiple comparisons yield **false**. + +- If the array expression yields a null array, the result of **ALL** will be null. If the left-hand expression yields null, the result of **ALL** is ordinarily null (though a non-strict comparison operator could possibly yield a different result). Also, if the right-hand array contains any null elements and no false comparison result is obtained, the result of **ALL** will be null, not true (again, assuming a strict comparison operator). This method is consistent with the Boolean rules used when SQL statements return empty values. + +```sql +mogdb=# SELECT 8000+500 < ALL (array[10000,9000]) AS RESULT; + result +---------- + t +(1 row) +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/expressions/row-expressions.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/expressions/row-expressions.md index 8cfbb7d6..ce60305f 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/expressions/row-expressions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/expressions/row-expressions.md @@ -1,28 +1,28 @@ ---- -title: Row Expressions -summary: Row Expressions -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Row Expressions - -Syntax: - -*row_constructor operator row_constructor* - -Both sides of the row expression are row constructors. The values of both rows must have the same number of fields and they are compared with each other. The row comparison allows operators including =, <>, <, <=, and >= or a similar operator. - -The use of operators =<> is slightly different from other operators. If all fields of two rows are not empty and equal, the two rows are equal. If any field in two rows is not empty and not equal, the two rows are not equal. Otherwise, the comparison result is null. - -For operators <, <=, >, and > =, the fields in rows are compared from left to right until a pair of fields that are not equal or are empty are detected. If the pair of fields contains at least one null value, the comparison result is null. Otherwise, the comparison result of this pair of fields is the final result. - -Example: - -```sql -mogdb=# SELECT ROW(1,2,NULL) < ROW(1,3,0) AS RESULT; - result ----------- - t -(1 row) -``` +--- +title: Row Expressions +summary: Row Expressions +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Row Expressions + +Syntax: + +*row_constructor operator row_constructor* + +Both sides of the row expression are row constructors. The values of both rows must have the same number of fields and they are compared with each other. The row comparison allows operators including =, <>, <, <=, and >= or a similar operator. + +The use of operators =<> is slightly different from other operators. If all fields of two rows are not empty and equal, the two rows are equal. If any field in two rows is not empty and not equal, the two rows are not equal. Otherwise, the comparison result is null. + +For operators <, <=, >, and > =, the fields in rows are compared from left to right until a pair of fields that are not equal or are empty are detected. If the pair of fields contains at least one null value, the comparison result is null. Otherwise, the comparison result of this pair of fields is the final result. + +Example: + +```sql +mogdb=# SELECT ROW(1,2,NULL) < ROW(1,3,0) AS RESULT; + result +---------- + t +(1 row) +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/expressions/subquery-expressions.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/expressions/subquery-expressions.md index 1478c123..30826851 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/expressions/subquery-expressions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/expressions/subquery-expressions.md @@ -1,146 +1,146 @@ ---- -title: Subquery Expressions -summary: Subquery Expressions -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Subquery Expressions - -Subquery expressions include the following types: - -- EXISTS/NOT EXISTS - - [Figure 1](#existnotexist) shows the syntax of an **EXISTS/NOT EXISTS** expression. - - **Figure 1** EXISTS/NOT EXISTS::= . - - ![exists-not-exists](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/subquery-expressions-1.png) - - The parameter of an **EXISTS** expression is an arbitrary **SELECT** statement, or subquery. The subquery is evaluated to determine whether it returns any rows. If it returns at least one row, the result of **EXISTS** is "true". If the subquery returns no rows, the result of **EXISTS** is "false". - - The subquery will generally only be executed long enough to determine whether at least one row is returned, not all the way to completion. - - Example: - - ```sql - mogdb=# SELECT sr_reason_sk,sr_customer_sk FROM tpcds.store_returns WHERE EXISTS (SELECT d_dom FROM tpcds.date_dim WHERE d_dom = store_returns.sr_reason_sk and sr_customer_sk <10); - sr_reason_sk | sr_customer_sk - --------------+---------------- - 13 | 2 - 22 | 5 - 17 | 7 - 25 | 7 - 3 | 7 - 31 | 5 - 7 | 7 - 14 | 6 - 20 | 4 - 5 | 6 - 10 | 3 - 1 | 5 - 15 | 2 - 4 | 1 - 26 | 3 - (15 rows) - ``` - -- IN/NOT IN - - [Figure 2](#innotin) shows the syntax of an **IN/NOT IN** expression. - - **Figure 2** IN/NOT IN::= - - ![in-not-in](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/subquery-expressions-2.png) - - The right-hand side is a parenthesized subquery, which must return exactly one column. The left-hand expression is evaluated and compared to each row of the subquery result. The result of **IN** is "true" if any equal subquery row is found. The result is "false" if no equal row is found (including the case where the subquery returns no rows). - - This is in accordance with SQL normal rules for Boolean combinations of null values. If the columns corresponding to two rows equal and are not empty, the two rows are equal to each other. If any columns corresponding to the two rows do not equal and are not empty, the two rows are not equal to each other. Otherwise, the result is **NULL**. If there are no equal right-hand values and at least one right-hand row yields null, the result of **IN** will be null, not false. - - Example: - - ```sql - mogdb=# SELECT sr_reason_sk,sr_customer_sk FROM tpcds.store_returns WHERE sr_customer_sk IN (SELECT d_dom FROM tpcds.date_dim WHERE d_dom < 10); - sr_reason_sk | sr_customer_sk - --------------+---------------- - 10 | 3 - 26 | 3 - 22 | 5 - 31 | 5 - 1 | 5 - 32 | 5 - 32 | 5 - 4 | 1 - 15 | 2 - 13 | 2 - 33 | 4 - 20 | 4 - 33 | 8 - 5 | 6 - 14 | 6 - 17 | 7 - 3 | 7 - 25 | 7 - 7 | 7 - (19 rows) - ``` - -- ANY/SOME - - [Figure 3](#anysome) shows the syntax of an **ANY/SOME** expression. - - **Figure 3** any/some::= - - ![any-some](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/subquery-expressions-3.png) - - The right-hand side is a parenthesized subquery, which must return exactly one column. The left-hand expression is evaluated and compared to each row of the subquery result using the given operator, which must yield a Boolean result. The result of **ANY** is "true" if any true result is obtained. The result is "false" if no true result is found (including the case where the subquery returns no rows). **SOME** is a synonym of **ANY**. **IN** can be equivalently replaced with **ANY**. - - Example: - - ```sql - mogdb=# SELECT sr_reason_sk,sr_customer_sk FROM tpcds.store_returns WHERE sr_customer_sk < ANY (SELECT d_dom FROM tpcds.date_dim WHERE d_dom < 10); - sr_reason_sk | sr_customer_sk - --------------+---------------- - 26 | 3 - 17 | 7 - 32 | 5 - 32 | 5 - 13 | 2 - 31 | 5 - 25 | 7 - 5 | 6 - 7 | 7 - 10 | 3 - 1 | 5 - 14 | 6 - 4 | 1 - 3 | 7 - 22 | 5 - 33 | 4 - 20 | 4 - 33 | 8 - 15 | 2 - (19 rows) - ``` - -- ALL - - [Figure 4](#all) shows the syntax of an **ALL** expression. - - **Figure 4** all::= - - ![all](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/subquery-expressions-4.png) - - The right-hand side is a parenthesized subquery, which must return exactly one column. The left-hand expression is evaluated and compared to each row of the subquery result using the given operator, which must yield a Boolean result. The result of **ALL** is "true" if all rows yield true (including the case where the subquery returns no rows). The result is "false" if any false result is found. - - Example: - - ```sql - mogdb=# SELECT sr_reason_sk,sr_customer_sk FROM tpcds.store_returns WHERE sr_customer_sk < all(SELECT d_dom FROM tpcds.date_dim WHERE d_dom < 10); - sr_reason_sk | sr_customer_sk - --------------+---------------- - (0 rows) - - ``` - -​ +--- +title: Subquery Expressions +summary: Subquery Expressions +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Subquery Expressions + +Subquery expressions include the following types: + +- EXISTS/NOT EXISTS + + [Figure 1](#existnotexist) shows the syntax of an **EXISTS/NOT EXISTS** expression. + + **Figure 1** EXISTS/NOT EXISTS::= . + + ![exists-not-exists](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/subquery-expressions-1.png) + + The parameter of an **EXISTS** expression is an arbitrary **SELECT** statement, or subquery. The subquery is evaluated to determine whether it returns any rows. If it returns at least one row, the result of **EXISTS** is "true". If the subquery returns no rows, the result of **EXISTS** is "false". + + The subquery will generally only be executed long enough to determine whether at least one row is returned, not all the way to completion. + + Example: + + ```sql + mogdb=# SELECT sr_reason_sk,sr_customer_sk FROM tpcds.store_returns WHERE EXISTS (SELECT d_dom FROM tpcds.date_dim WHERE d_dom = store_returns.sr_reason_sk and sr_customer_sk <10); + sr_reason_sk | sr_customer_sk + --------------+---------------- + 13 | 2 + 22 | 5 + 17 | 7 + 25 | 7 + 3 | 7 + 31 | 5 + 7 | 7 + 14 | 6 + 20 | 4 + 5 | 6 + 10 | 3 + 1 | 5 + 15 | 2 + 4 | 1 + 26 | 3 + (15 rows) + ``` + +- IN/NOT IN + + [Figure 2](#innotin) shows the syntax of an **IN/NOT IN** expression. + + **Figure 2** IN/NOT IN::= + + ![in-not-in](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/subquery-expressions-2.png) + + The right-hand side is a parenthesized subquery, which must return exactly one column. The left-hand expression is evaluated and compared to each row of the subquery result. The result of **IN** is "true" if any equal subquery row is found. The result is "false" if no equal row is found (including the case where the subquery returns no rows). + + This is in accordance with SQL normal rules for Boolean combinations of null values. If the columns corresponding to two rows equal and are not empty, the two rows are equal to each other. If any columns corresponding to the two rows do not equal and are not empty, the two rows are not equal to each other. Otherwise, the result is **NULL**. If there are no equal right-hand values and at least one right-hand row yields null, the result of **IN** will be null, not false. + + Example: + + ```sql + mogdb=# SELECT sr_reason_sk,sr_customer_sk FROM tpcds.store_returns WHERE sr_customer_sk IN (SELECT d_dom FROM tpcds.date_dim WHERE d_dom < 10); + sr_reason_sk | sr_customer_sk + --------------+---------------- + 10 | 3 + 26 | 3 + 22 | 5 + 31 | 5 + 1 | 5 + 32 | 5 + 32 | 5 + 4 | 1 + 15 | 2 + 13 | 2 + 33 | 4 + 20 | 4 + 33 | 8 + 5 | 6 + 14 | 6 + 17 | 7 + 3 | 7 + 25 | 7 + 7 | 7 + (19 rows) + ``` + +- ANY/SOME + + [Figure 3](#anysome) shows the syntax of an **ANY/SOME** expression. + + **Figure 3** any/some::= + + ![any-some](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/subquery-expressions-3.png) + + The right-hand side is a parenthesized subquery, which must return exactly one column. The left-hand expression is evaluated and compared to each row of the subquery result using the given operator, which must yield a Boolean result. The result of **ANY** is "true" if any true result is obtained. The result is "false" if no true result is found (including the case where the subquery returns no rows). **SOME** is a synonym of **ANY**. **IN** can be equivalently replaced with **ANY**. + + Example: + + ```sql + mogdb=# SELECT sr_reason_sk,sr_customer_sk FROM tpcds.store_returns WHERE sr_customer_sk < ANY (SELECT d_dom FROM tpcds.date_dim WHERE d_dom < 10); + sr_reason_sk | sr_customer_sk + --------------+---------------- + 26 | 3 + 17 | 7 + 32 | 5 + 32 | 5 + 13 | 2 + 31 | 5 + 25 | 7 + 5 | 6 + 7 | 7 + 10 | 3 + 1 | 5 + 14 | 6 + 4 | 1 + 3 | 7 + 22 | 5 + 33 | 4 + 20 | 4 + 33 | 8 + 15 | 2 + (19 rows) + ``` + +- ALL + + [Figure 4](#all) shows the syntax of an **ALL** expression. + + **Figure 4** all::= + + ![all](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/subquery-expressions-4.png) + + The right-hand side is a parenthesized subquery, which must return exactly one column. The left-hand expression is evaluated and compared to each row of the subquery result using the given operator, which must yield a Boolean result. The result of **ALL** is "true" if all rows yield true (including the case where the subquery returns no rows). The result is "false" if any false result is found. + + Example: + + ```sql + mogdb=# SELECT sr_reason_sk,sr_customer_sk FROM tpcds.store_returns WHERE sr_customer_sk < all(SELECT d_dom FROM tpcds.date_dim WHERE d_dom < 10); + sr_reason_sk | sr_customer_sk + --------------+---------------- + (0 rows) + + ``` + +​ diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/additional-features.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/additional-features.md index 9d0ec278..f8fc13e8 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/additional-features.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/additional-features.md @@ -1,13 +1,13 @@ ---- -title: Additional Features -summary: Additional Features -author: zhang cuiping -date: 2023-04-07 ---- - -# Additional Features - -- **[Manipulating tsvector](manipulating-tsvector.md)** -- **[Manipulating Queries](manipulating-queries.md)** -- **[Rewriting Queries](rewriting-queries.md)** +--- +title: Additional Features +summary: Additional Features +author: zhang cuiping +date: 2023-04-07 +--- + +# Additional Features + +- **[Manipulating tsvector](manipulating-tsvector.md)** +- **[Manipulating Queries](manipulating-queries.md)** +- **[Rewriting Queries](rewriting-queries.md)** - **[Gathering Document Statistics](gathering-document-statistics.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/gathering-document-statistics.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/gathering-document-statistics.md index 22b7825b..e4c838eb 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/gathering-document-statistics.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/gathering-document-statistics.md @@ -1,50 +1,50 @@ ---- -title: Gathering Document Statistics -summary: Gathering Document Statistics -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Gathering Document Statistics - -The function **ts_stat** is useful for checking your configuration and for finding stop-word candidates. - -```sql -ts_stat(sqlquery text, [ weights text, ] - OUT word text, OUT ndoc integer, - OUT nentry integer) returns setof record -``` - -**sqlquery** is a text value containing an SQL query which must return a single **tsvector** column. **ts_stat** executes the query and returns statistics about each distinct lexeme (word) contained in the **tsvector** data. The columns returned are: - -- **word text**: the value of a lexeme -- **ndoc integer**: number of documents (**tsvector**) the word occurred in -- **nentry integer**: total number of occurrences of the word - -If **weights** is supplied, only occurrences having one of those weights are counted. For example, to find the ten most frequent words in a document collection: - -```sql -mogdb=# SELECT * FROM ts_stat('SELECT to_tsvector(''english'', sr_reason_sk) FROM tpcds.store_returns WHERE sr_customer_sk < 10') ORDER BY nentry DESC, ndoc DESC, word LIMIT 10; - word | ndoc | nentry -------+------+-------- - 32 | 2 | 2 - 33 | 2 | 2 - 1 | 1 | 1 - 10 | 1 | 1 - 13 | 1 | 1 - 14 | 1 | 1 - 15 | 1 | 1 - 17 | 1 | 1 - 20 | 1 | 1 - 22 | 1 | 1 -(10 rows) -``` - -The same, but counting only word occurrences with weight **A** or **B**: - -```sql -mogdb=# SELECT * FROM ts_stat('SELECT to_tsvector(''english'', sr_reason_sk) FROM tpcds.store_returns WHERE sr_customer_sk < 10', 'a') ORDER BY nentry DESC, ndoc DESC, word LIMIT 10; - word | ndoc | nentry -------+------+-------- -(0 rows) -``` +--- +title: Gathering Document Statistics +summary: Gathering Document Statistics +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Gathering Document Statistics + +The function **ts_stat** is useful for checking your configuration and for finding stop-word candidates. + +```sql +ts_stat(sqlquery text, [ weights text, ] + OUT word text, OUT ndoc integer, + OUT nentry integer) returns setof record +``` + +**sqlquery** is a text value containing an SQL query which must return a single **tsvector** column. **ts_stat** executes the query and returns statistics about each distinct lexeme (word) contained in the **tsvector** data. The columns returned are: + +- **word text**: the value of a lexeme +- **ndoc integer**: number of documents (**tsvector**) the word occurred in +- **nentry integer**: total number of occurrences of the word + +If **weights** is supplied, only occurrences having one of those weights are counted. For example, to find the ten most frequent words in a document collection: + +```sql +mogdb=# SELECT * FROM ts_stat('SELECT to_tsvector(''english'', sr_reason_sk) FROM tpcds.store_returns WHERE sr_customer_sk < 10') ORDER BY nentry DESC, ndoc DESC, word LIMIT 10; + word | ndoc | nentry +------+------+-------- + 32 | 2 | 2 + 33 | 2 | 2 + 1 | 1 | 1 + 10 | 1 | 1 + 13 | 1 | 1 + 14 | 1 | 1 + 15 | 1 | 1 + 17 | 1 | 1 + 20 | 1 | 1 + 22 | 1 | 1 +(10 rows) +``` + +The same, but counting only word occurrences with weight **A** or **B**: + +```sql +mogdb=# SELECT * FROM ts_stat('SELECT to_tsvector(''english'', sr_reason_sk) FROM tpcds.store_returns WHERE sr_customer_sk < 10', 'a') ORDER BY nentry DESC, ndoc DESC, word LIMIT 10; + word | ndoc | nentry +------+------+-------- +(0 rows) +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/manipulating-queries.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/manipulating-queries.md index 9a513c38..0e0abb1f 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/manipulating-queries.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/manipulating-queries.md @@ -1,52 +1,52 @@ ---- -title: Manipulating Queries -summary: Manipulating Queries -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Manipulating Queries - -MogDB provides functions and operators that can be used to manipulate queries that are already in **tsquery** type. - -- tsquery && tsquery - - Returns the AND-combination of the two given queries. - -- tsquery || tsquery - - Returns the OR-combination of the two given queries. - -- !! tsquery - - Returns the negation (NOT) of the given query. - -- numnode(query tsquery) returns integer - - Returns the number of nodes (lexemes plus operators) in a **tsquery**. This function is useful to determine if the query is meaningful (returns > 0), or contains only stop words (returns 0). For example: - - ```sql - mogdb=# SELECT numnode(plainto_tsquery('the any')); - NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored - CONTEXT: referenced column: numnode - numnode - --------- - 0 - - mogdb=# SELECT numnode('foo & bar'::tsquery); - numnode - --------- - 3 - ``` - -- querytree(query tsquery) returns text - - Returns the portion of a **tsquery** that can be used for searching an index. This function is useful for detecting unindexable queries, for example those containing only stop words or only negated terms. For example: - - ```sql - mogdb=# SELECT querytree(to_tsquery('!defined')); - querytree - ----------- - T - (1 row) - ``` +--- +title: Manipulating Queries +summary: Manipulating Queries +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Manipulating Queries + +MogDB provides functions and operators that can be used to manipulate queries that are already in **tsquery** type. + +- tsquery && tsquery + + Returns the AND-combination of the two given queries. + +- tsquery || tsquery + + Returns the OR-combination of the two given queries. + +- !! tsquery + + Returns the negation (NOT) of the given query. + +- numnode(query tsquery) returns integer + + Returns the number of nodes (lexemes plus operators) in a **tsquery**. This function is useful to determine if the query is meaningful (returns > 0), or contains only stop words (returns 0). For example: + + ```sql + mogdb=# SELECT numnode(plainto_tsquery('the any')); + NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored + CONTEXT: referenced column: numnode + numnode + --------- + 0 + + mogdb=# SELECT numnode('foo & bar'::tsquery); + numnode + --------- + 3 + ``` + +- querytree(query tsquery) returns text + + Returns the portion of a **tsquery** that can be used for searching an index. This function is useful for detecting unindexable queries, for example those containing only stop words or only negated terms. For example: + + ```sql + mogdb=# SELECT querytree(to_tsquery('!defined')); + querytree + ----------- + T + (1 row) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/manipulating-tsvector.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/manipulating-tsvector.md index 48b6a2f1..a2520544 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/manipulating-tsvector.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/manipulating-tsvector.md @@ -1,31 +1,31 @@ ---- -title: Manipulating tsvector -summary: Manipulating tsvector -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Manipulating tsvector - -MogDB provides functions and operators that can be used to manipulate documents that are already in **tsvector** type. - -- tsvector || tsvector - - The tsvector concatenation operator returns a new tsvector which combines the lexemes and positional information of the two tsvectors given as arguments. Positions and weight labels are retained during the concatenation. Positions appearing in the right-hand tsvector are offset by the largest position mentioned in the left-hand tsvector, so that the result is nearly equivalent to the result of performing **to_tsvector** on the concatenation of the two original document strings. (The equivalence is not exact, because any stop-words removed from the end of the left-hand argument will not affect the result, whereas they would have affected the positions of the lexemes in the right-hand argument if textual concatenation were used.) - - One advantage of using concatenation in the tsvector form, rather than concatenating text before applying **to_tsvector**, is that you can use different configurations to parse different sections of the document. Also, because the **setweight** function marks all lexemes of the given tsvector the same way, it is necessary to parse the text and do **setweight** before concatenating if you want to label different parts of the document with different weights. - -- setweight(vector tsvector, weight "char") returns tsvector - - **setweight** returns a copy of the input tsvector in which every position has been labeled with the given weight, either **A**, **B**, **C**, or **D**. (**D** is the default for new tsvectors and as such is not displayed on output.) These labels are retained when tsvectors are concatenated, allowing words from different parts of a document to be weighted differently by ranking functions. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > Note that weight labels apply to positions, not lexemes. If the input tsvector has been stripped of positions then **setweight** does nothing. - -- length(vector tsvector) returns integer - - Returns the number of lexemes stored in the tsvector. - -- strip(vector tsvector) returns tsvector - - Returns a tsvector which lists the same lexemes as the given tsvector, but which lacks any position or weight information. While the returned tsvector is much less useful than an unstripped tsvector for relevance ranking, it will usually be much smaller. +--- +title: Manipulating tsvector +summary: Manipulating tsvector +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Manipulating tsvector + +MogDB provides functions and operators that can be used to manipulate documents that are already in **tsvector** type. + +- tsvector || tsvector + + The tsvector concatenation operator returns a new tsvector which combines the lexemes and positional information of the two tsvectors given as arguments. Positions and weight labels are retained during the concatenation. Positions appearing in the right-hand tsvector are offset by the largest position mentioned in the left-hand tsvector, so that the result is nearly equivalent to the result of performing **to_tsvector** on the concatenation of the two original document strings. (The equivalence is not exact, because any stop-words removed from the end of the left-hand argument will not affect the result, whereas they would have affected the positions of the lexemes in the right-hand argument if textual concatenation were used.) + + One advantage of using concatenation in the tsvector form, rather than concatenating text before applying **to_tsvector**, is that you can use different configurations to parse different sections of the document. Also, because the **setweight** function marks all lexemes of the given tsvector the same way, it is necessary to parse the text and do **setweight** before concatenating if you want to label different parts of the document with different weights. + +- setweight(vector tsvector, weight "char") returns tsvector + + **setweight** returns a copy of the input tsvector in which every position has been labeled with the given weight, either **A**, **B**, **C**, or **D**. (**D** is the default for new tsvectors and as such is not displayed on output.) These labels are retained when tsvectors are concatenated, allowing words from different parts of a document to be weighted differently by ranking functions. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** + > Note that weight labels apply to positions, not lexemes. If the input tsvector has been stripped of positions then **setweight** does nothing. + +- length(vector tsvector) returns integer + + Returns the number of lexemes stored in the tsvector. + +- strip(vector tsvector) returns tsvector + + Returns a tsvector which lists the same lexemes as the given tsvector, but which lacks any position or weight information. While the returned tsvector is much less useful than an unstripped tsvector for relevance ranking, it will usually be much smaller. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/rewriting-queries.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/rewriting-queries.md index 5f814049..c5d62eb0 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/rewriting-queries.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/additional-features/rewriting-queries.md @@ -1,68 +1,68 @@ ---- -title: Rewriting Queries -summary: Rewriting Queries -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Rewriting Queries - -The **ts_rewrite** family of functions searches a given **tsquery** for occurrences of a target subquery, and replace each occurrence with a substitute subquery. In essence this operation is a **tsquery** specific version of substring replacement. A target and substitute combination can be thought of as a query rewrite rule. A collection of such rewrite rules can be a powerful search aid. For example, you can expand the search using synonyms (that is, new york, big apple, nyc, gotham) or narrow the search to direct the user to some hot topic. - -- ts_rewrite (query tsquery, target tsquery, substitute tsquery) returns tsquery - - This form of **ts_rewrite** simply applies a single rewrite rule: **target** is replaced by **substitute** wherever it appears in query. Example: - - ```sql - mogdb=# SELECT ts_rewrite('a & b'::tsquery, 'a'::tsquery, 'c'::tsquery); - ts_rewrite - ------------ - 'b' & 'c' - ``` - -- ts_rewrite (query tsquery, select text) returns tsquery - - This form of **ts_rewrite** accepts a starting query and a SQL select command, which is given as a text string. The **select** must yield two columns of **tsquery** type. For each row of the select result, occurrences of the first column value (the target) are replaced by the second column value (the substitute) within the current **query** value. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > Note that when multiple rewrite rules are applied in this way, the order of application can be important; so in practice you will want the source query to **ORDER BY** some ordering key. - - Consider a real-life astronomical example. We will expand query supernovae using table-driven rewriting rules: - - ```sql - mogdb=# CREATE TABLE tsearch.aliases (id int, t tsquery, s tsquery); - - mogdb=# INSERT INTO tsearch.aliases VALUES(1, to_tsquery('supernovae'), to_tsquery('supernovae|sn')); - - mogdb=# SELECT ts_rewrite(to_tsquery('supernovae & crab'), 'SELECT t, s FROM tsearch.aliases'); - - ts_rewrite - --------------------------------- - 'crab' & ( 'supernova' | 'sn' ) - ``` - - We can change the rewriting rules just by updating the table: - - ```sql - mogdb=# UPDATE tsearch.aliases - SET s = to_tsquery('supernovae|sn & !nebulae') - WHERE t = to_tsquery('supernovae'); - - mogdb=# SELECT ts_rewrite(to_tsquery('supernovae & crab'), 'SELECT t, s FROM tsearch.aliases'); - - ts_rewrite - --------------------------------------------- - 'crab' & ( 'supernova' | 'sn' & !'nebula' ) - ``` - - Rewriting can be slow when there are many rewriting rules, since it checks every rule for a possible match. To filter out obvious non-candidate rules we can use the containment operators for the **tsquery** type. In the example below, we select only those rules which might match the original query: - - ```sql - mogdb=# SELECT ts_rewrite('a & b'::tsquery, 'SELECT t,s FROM tsearch.aliases WHERE ''a & b''::tsquery @> t'); - - ts_rewrite - ------------ - 'b' & 'a' - (1 row) - mogdb=# DROP TABLE tsearch.aliases; - ``` +--- +title: Rewriting Queries +summary: Rewriting Queries +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Rewriting Queries + +The **ts_rewrite** family of functions searches a given **tsquery** for occurrences of a target subquery, and replace each occurrence with a substitute subquery. In essence this operation is a **tsquery** specific version of substring replacement. A target and substitute combination can be thought of as a query rewrite rule. A collection of such rewrite rules can be a powerful search aid. For example, you can expand the search using synonyms (that is, new york, big apple, nyc, gotham) or narrow the search to direct the user to some hot topic. + +- ts_rewrite (query tsquery, target tsquery, substitute tsquery) returns tsquery + + This form of **ts_rewrite** simply applies a single rewrite rule: **target** is replaced by **substitute** wherever it appears in query. Example: + + ```sql + mogdb=# SELECT ts_rewrite('a & b'::tsquery, 'a'::tsquery, 'c'::tsquery); + ts_rewrite + ------------ + 'b' & 'c' + ``` + +- ts_rewrite (query tsquery, select text) returns tsquery + + This form of **ts_rewrite** accepts a starting query and a SQL select command, which is given as a text string. The **select** must yield two columns of **tsquery** type. For each row of the select result, occurrences of the first column value (the target) are replaced by the second column value (the substitute) within the current **query** value. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > Note that when multiple rewrite rules are applied in this way, the order of application can be important; so in practice you will want the source query to **ORDER BY** some ordering key. + + Consider a real-life astronomical example. We will expand query supernovae using table-driven rewriting rules: + + ```sql + mogdb=# CREATE TABLE tsearch.aliases (id int, t tsquery, s tsquery); + + mogdb=# INSERT INTO tsearch.aliases VALUES(1, to_tsquery('supernovae'), to_tsquery('supernovae|sn')); + + mogdb=# SELECT ts_rewrite(to_tsquery('supernovae & crab'), 'SELECT t, s FROM tsearch.aliases'); + + ts_rewrite + --------------------------------- + 'crab' & ( 'supernova' | 'sn' ) + ``` + + We can change the rewriting rules just by updating the table: + + ```sql + mogdb=# UPDATE tsearch.aliases + SET s = to_tsquery('supernovae|sn & !nebulae') + WHERE t = to_tsquery('supernovae'); + + mogdb=# SELECT ts_rewrite(to_tsquery('supernovae & crab'), 'SELECT t, s FROM tsearch.aliases'); + + ts_rewrite + --------------------------------------------- + 'crab' & ( 'supernova' | 'sn' & !'nebula' ) + ``` + + Rewriting can be slow when there are many rewriting rules, since it checks every rule for a possible match. To filter out obvious non-candidate rules we can use the containment operators for the **tsquery** type. In the example below, we select only those rules which might match the original query: + + ```sql + mogdb=# SELECT ts_rewrite('a & b'::tsquery, 'SELECT t,s FROM tsearch.aliases WHERE ''a & b''::tsquery @> t'); + + ts_rewrite + ------------ + 'b' & 'a' + (1 row) + mogdb=# DROP TABLE tsearch.aliases; + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/controlling-text-search.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/controlling-text-search.md index 276d982b..662cb0f4 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/controlling-text-search.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/controlling-text-search.md @@ -1,13 +1,13 @@ ---- -title: Controlling Text Search -summary: Controlling Text Search -author: zhang cuiping -date: 2023-04-07 ---- - -# Controlling Text Search - -- **[Parsing Documents](parsing-documents.md)** -- **[Parsing Queries](parsing-queries.md)** -- **[Ranking Search Results](ranking-search-results.md)** +--- +title: Controlling Text Search +summary: Controlling Text Search +author: zhang cuiping +date: 2023-04-07 +--- + +# Controlling Text Search + +- **[Parsing Documents](parsing-documents.md)** +- **[Parsing Queries](parsing-queries.md)** +- **[Ranking Search Results](ranking-search-results.md)** - **[Highlighting Results](highlighting-results.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/highlighting-results.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/highlighting-results.md index c9d81217..52b42696 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/highlighting-results.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/highlighting-results.md @@ -1,66 +1,66 @@ ---- -title: Highlighting Results -summary: Highlighting Results -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Highlighting Results - -To present search results it is ideal to show a part of each document and how it is related to the query. Usually, search engines show fragments of the document with marked search terms. MogDB provides a function **ts_headline** that implements this functionality. - -```sql -ts_headline([ config regconfig, ] document text, query tsquery [, options text ]) returns text -``` - -**ts_headline** accepts a document along with a query, and returns an excerpt from the document in which terms from the query are highlighted. The configuration to be used to parse the document can be specified by **config**. If **config** is omitted, the **default_text_search_config** configuration is used. - -If an options string is specified it must consist of a comma-separated list of one or more **option=value** pairs. The available options are: - -- **StartSel**, **StopSel**: The strings with which to delimit query words appearing in the document, to distinguish them from other excerpted words. You must double-quote these strings if they contain spaces or commas. -- **MaxWords**, **MinWords**: These numbers determine the longest and shortest headlines to output. -- **ShortWord**: Words of this length or less will be dropped at the start and end of a headline. The default value of three eliminates common English articles. -- **HighlightAll**: Boolean flag. If **true** the whole document will be used as the headline, ignoring the preceding three parameters. -- **MaxFragments**: Maximum number of text excerpts or fragments to display. The default value of zero selects a non-fragment-oriented headline generation method. A value greater than zero selects fragment-based headline generation. This method finds text fragments with as many query words as possible and stretches those fragments around the query words. As a result query words are close to the middle of each fragment and have words on each side. Each fragment will be of at most **MaxWords** and words of length **ShortWord** or less are dropped at the start and end of each fragment. If not all query words are found in the document, then a single fragment of the first **MinWords** in the document will be displayed. -- **FragmentDelimiter**: When more than one fragment is displayed, the fragments will be separated by this string. - -Any unspecified options receive these defaults: - -```sql -StartSel=, StopSel=, -MaxWords=35, MinWords=15, ShortWord=3, HighlightAll=FALSE, -MaxFragments=0, FragmentDelimiter=" ... " -``` - -Example: - -```sql -mogdb=# SELECT ts_headline('english', -'The most common type of search -is to find all documents containing given query terms -and return them in order of their similarity to the -query.', -to_tsquery('english', 'query & similarity')); - ts_headline ------------------------------------------------------------- - containing given query terms - and return them in order of their similarity to the - query. -(1 row) - -mogdb=# SELECT ts_headline('english', -'The most common type of search -is to find all documents containing given query terms -and return them in order of their similarity to the -query.', -to_tsquery('english', 'query & similarity'), -'StartSel = <, StopSel = >'); - ts_headline -------------------------------------------------------- - containing given terms - and return them in order of their to the - . -(1 row) -``` - -**ts_headline** uses the original document, not a **tsvector** summary, so it can be slow and should be used with care. +--- +title: Highlighting Results +summary: Highlighting Results +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Highlighting Results + +To present search results it is ideal to show a part of each document and how it is related to the query. Usually, search engines show fragments of the document with marked search terms. MogDB provides a function **ts_headline** that implements this functionality. + +```sql +ts_headline([ config regconfig, ] document text, query tsquery [, options text ]) returns text +``` + +**ts_headline** accepts a document along with a query, and returns an excerpt from the document in which terms from the query are highlighted. The configuration to be used to parse the document can be specified by **config**. If **config** is omitted, the **default_text_search_config** configuration is used. + +If an options string is specified it must consist of a comma-separated list of one or more **option=value** pairs. The available options are: + +- **StartSel**, **StopSel**: The strings with which to delimit query words appearing in the document, to distinguish them from other excerpted words. You must double-quote these strings if they contain spaces or commas. +- **MaxWords**, **MinWords**: These numbers determine the longest and shortest headlines to output. +- **ShortWord**: Words of this length or less will be dropped at the start and end of a headline. The default value of three eliminates common English articles. +- **HighlightAll**: Boolean flag. If **true** the whole document will be used as the headline, ignoring the preceding three parameters. +- **MaxFragments**: Maximum number of text excerpts or fragments to display. The default value of zero selects a non-fragment-oriented headline generation method. A value greater than zero selects fragment-based headline generation. This method finds text fragments with as many query words as possible and stretches those fragments around the query words. As a result query words are close to the middle of each fragment and have words on each side. Each fragment will be of at most **MaxWords** and words of length **ShortWord** or less are dropped at the start and end of each fragment. If not all query words are found in the document, then a single fragment of the first **MinWords** in the document will be displayed. +- **FragmentDelimiter**: When more than one fragment is displayed, the fragments will be separated by this string. + +Any unspecified options receive these defaults: + +```sql +StartSel=, StopSel=, +MaxWords=35, MinWords=15, ShortWord=3, HighlightAll=FALSE, +MaxFragments=0, FragmentDelimiter=" ... " +``` + +Example: + +```sql +mogdb=# SELECT ts_headline('english', +'The most common type of search +is to find all documents containing given query terms +and return them in order of their similarity to the +query.', +to_tsquery('english', 'query & similarity')); + ts_headline +------------------------------------------------------------ + containing given query terms + and return them in order of their similarity to the + query. +(1 row) + +mogdb=# SELECT ts_headline('english', +'The most common type of search +is to find all documents containing given query terms +and return them in order of their similarity to the +query.', +to_tsquery('english', 'query & similarity'), +'StartSel = <, StopSel = >'); + ts_headline +------------------------------------------------------- + containing given terms + and return them in order of their to the + . +(1 row) +``` + +**ts_headline** uses the original document, not a **tsvector** summary, so it can be slow and should be used with care. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/parsing-queries.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/parsing-queries.md index 539b1306..56eaf5ad 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/parsing-queries.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/parsing-queries.md @@ -1,74 +1,74 @@ ---- -title: Parsing Queries -summary: Parsing Queries -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Parsing Queries - -MogDB provides functions **to_tsquery** and **plainto_tsquery** for converting a query to the **tsquery** data type. **to_tsquery** offers access to more features than **plainto_tsquery**, but is less forgiving about its input. - -```sql -to_tsquery([ config regconfig, ] querytext text) returns tsquery -``` - -**to_tsquery** creates a **tsquery** value from **querytext**, which must consist of single tokens separated by the Boolean operators **&** (AND), **|** (OR), and **!** (NOT). These operators can be grouped using parentheses. In other words, the input to **to_tsquery** must follow the general rules for **tsquery** input, as described in **Text Search Types**. The difference is that while basic **tsquery** input takes the tokens at face value, **to_tsquery** normalizes each token to a lexeme using the specified or default configuration, and discards any tokens that are stop words according to the configuration. Example: - -```sql -mogdb=# SELECT to_tsquery('english', 'The & Fat & Rats'); - to_tsquery ---------------- - 'fat' & 'rat' -(1 row) -``` - -As in basic **tsquery** input, **weight(s)** can be attached to each lexeme to restrict it to match only **tsvector** lexemes of those **weight(s)**. Example: - -```sql -mogdb=# SELECT to_tsquery('english', 'Fat | Rats:AB'); - to_tsquery ------------------- - 'fat' | 'rat':AB -(1 row) -``` - -Also, the asterisk (*) can be attached to a lexeme to specify prefix matching: - -```sql -mogdb=# SELECT to_tsquery('supern:*A & star:A*B'); - to_tsquery --------------------------- - 'supern':*A & 'star':*AB -(1 row) -``` - -Such a lexeme will match any word having the specified string and weight in a **tsquery**. - -```sql -plainto_tsquery([ config regconfig, ] querytext text) returns tsquery -``` - -**plainto_tsquery** transforms unformatted text **querytext** to **tsquery**. The text is parsed and normalized much as for **to_tsvector**, and then the **&** (AND) Boolean operator is inserted between surviving words. - -For example: - -```sql -mogdb=# SELECT plainto_tsquery('english', 'The Fat Rats'); - plainto_tsquery ------------------ - 'fat' & 'rat' -(1 row) -``` - -Note that **plainto_tsquery** cannot recognize Boolean operators, weight labels, or prefix-match labels in its input: - -```sql -mogdb=# SELECT plainto_tsquery('english', 'The Fat & Rats:C'); - plainto_tsquery ---------------------- - 'fat' & 'rat' & 'c' -(1 row) -``` - -Here, all the input punctuation was discarded as being space symbols. +--- +title: Parsing Queries +summary: Parsing Queries +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Parsing Queries + +MogDB provides functions **to_tsquery** and **plainto_tsquery** for converting a query to the **tsquery** data type. **to_tsquery** offers access to more features than **plainto_tsquery**, but is less forgiving about its input. + +```sql +to_tsquery([ config regconfig, ] querytext text) returns tsquery +``` + +**to_tsquery** creates a **tsquery** value from **querytext**, which must consist of single tokens separated by the Boolean operators **&** (AND), **|** (OR), and **!** (NOT). These operators can be grouped using parentheses. In other words, the input to **to_tsquery** must follow the general rules for **tsquery** input, as described in **Text Search Types**. The difference is that while basic **tsquery** input takes the tokens at face value, **to_tsquery** normalizes each token to a lexeme using the specified or default configuration, and discards any tokens that are stop words according to the configuration. Example: + +```sql +mogdb=# SELECT to_tsquery('english', 'The & Fat & Rats'); + to_tsquery +--------------- + 'fat' & 'rat' +(1 row) +``` + +As in basic **tsquery** input, **weight(s)** can be attached to each lexeme to restrict it to match only **tsvector** lexemes of those **weight(s)**. Example: + +```sql +mogdb=# SELECT to_tsquery('english', 'Fat | Rats:AB'); + to_tsquery +------------------ + 'fat' | 'rat':AB +(1 row) +``` + +Also, the asterisk (*) can be attached to a lexeme to specify prefix matching: + +```sql +mogdb=# SELECT to_tsquery('supern:*A & star:A*B'); + to_tsquery +-------------------------- + 'supern':*A & 'star':*AB +(1 row) +``` + +Such a lexeme will match any word having the specified string and weight in a **tsquery**. + +```sql +plainto_tsquery([ config regconfig, ] querytext text) returns tsquery +``` + +**plainto_tsquery** transforms unformatted text **querytext** to **tsquery**. The text is parsed and normalized much as for **to_tsvector**, and then the **&** (AND) Boolean operator is inserted between surviving words. + +For example: + +```sql +mogdb=# SELECT plainto_tsquery('english', 'The Fat Rats'); + plainto_tsquery +----------------- + 'fat' & 'rat' +(1 row) +``` + +Note that **plainto_tsquery** cannot recognize Boolean operators, weight labels, or prefix-match labels in its input: + +```sql +mogdb=# SELECT plainto_tsquery('english', 'The Fat & Rats:C'); + plainto_tsquery +--------------------- + 'fat' & 'rat' & 'c' +(1 row) +``` + +Here, all the input punctuation was discarded as being space symbols. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/ranking-search-results.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/ranking-search-results.md index 96fa4798..f8cf329b 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/ranking-search-results.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/controlling-text-search/ranking-search-results.md @@ -1,108 +1,108 @@ ---- -title: Ranking Search Results -summary: Ranking Search Results -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Ranking Search Results - -Ranking attempts to measure how relevant documents are to a particular query, so that when there are many matches the most relevant ones can be shown first. MogDB provides two predefined ranking functions. The functions take into account lexical, proximity, and structural information; that is, they consider how often the query terms appear in the document, how close together the terms are in the document, and how important is the part of the document where they occur. However, the concept of relevancy is vague and application-specific. Different applications might require additional information for ranking, for example, document modification time. The built-in ranking functions are only examples. You can write your own ranking functions and/or combine their results with additional factors to fit your specific needs. - -The two ranking functions currently available are: - -```sql -ts_rank([ weights float4[], ] vector tsvector, query tsquery [, normalization integer ]) returns float4 -``` - -Ranks vectors based on the frequency of their matching lexemes. - -```sql -ts_rank_cd([ weights float4[], ] vector tsvector, query tsquery [, normalization integer ]) returns float4 -``` - -This function requires positional information in its input. Therefore, it will not work on "stripped" **tsvector** values. It will always return zero. - -For both these functions, the optional **weights** argument offers the ability to weigh word instances more or less heavily depending on how they are labeled. The weight arrays specify how heavily to weigh each category of word, in the order: - -```sql -{D-weight, C-weight, B-weight, A-weight} -``` - -If no **weights** is provided, then these defaults are used: {0.1, 0.2, 0.4, 1.0} - -Typically weights are used to mark words from special areas of the document, like the title or an initial abstract, so they can be treated with more or less importance than words in the document body. - -Since a longer document has a greater chance of containing a query term it is reasonable to take into account document size. For example, a hundred-word document with five instances of a search word is probably more relevant than a thousand-word document with five instances. Both ranking functions take an integer **normalization** option that specifies whether and how a document's length should impact its rank. The integer option controls several behaviors, so it is a bit mask: you can specify one or more behaviors using a vertical bar (**|**) (for example, **2|4**). - -- **0** (default) ignores the document length. -- **1** divides the rank by (1 + logarithm of the document length). -- **2** divides the rank by the document length. -- **4** divides the rank by the mean harmonic distance between extents. This is implemented only by **ts_rank_cd**. -- **8** divides the rank by the number of unique words in document. -- **16** divides the rank by (1 + Logarithm of the number of unique words in document). -- **32** divides the rank by (itself + 1). - -If more than one flag bit is specified, the transformations are applied in the order listed. - -It is important to note that the ranking functions do not use any global information, so it is impossible to produce a fair normalization to 1% or 100% as sometimes desired. Normalization option 32 (**rank/(rank+1)**) can be applied to scale all ranks into the range zero to one, but of course this is just a cosmetic change; it will not affect the ordering of the search results. - -Here is an example that selects only the ten highest-ranked matches: - -```sql -mogdb=# SELECT id, title, ts_rank_cd(to_tsvector(body), query) AS rank -FROM tsearch.pgweb, to_tsquery('america') query -WHERE query @@ to_tsvector(body) -ORDER BY rank DESC -LIMIT 10; - id | title | rank -----+---------+------ - 11 | Brazil | .2 - 2 | America | .1 - 12 | Canada | .1 - 13 | Mexico | .1 -(4 rows) -``` - -This is the same example using normalized ranking: - -```sql -mogdb=# SELECT id, title, ts_rank_cd(to_tsvector(body), query, 32 /* rank/(rank+1) */ ) AS rank -FROM tsearch.pgweb, to_tsquery('america') query -WHERE query @@ to_tsvector(body) -ORDER BY rank DESC -LIMIT 10; - id | title | rank -----+---------+---------- - 11 | Brazil | .166667 - 2 | America | .0909091 - 12 | Canada | .0909091 - 13 | Mexico | .0909091 -(4 rows) -``` - -The following example sorts query by Chinese word segmentation: - -```sql -mogdb=# CREATE TABLE tsearch.ts_ngram(id int, body text); -mogdb=# INSERT INTO tsearch.ts_ngram VALUES (1, 'Chinese'); -mogdb=# INSERT INTO tsearch.ts_ngram VALUES (2, 'Chinese search'); -mogdb=# INSERT INTO tsearch.ts_ngram VALUES (3 'Search Chinese'); --- Exact match -mogdb=# SELECT id, body, ts_rank_cd (to_tsvector ('ngram', body), query) AS rank FROM tsearch.ts_ngram, to_tsquery ('Chinese') query WHERE query @@ to_tsvector (body); - id | body | rank -----+------+------ - 1 | Chinese | .1 -(1 row) - --- Fuzzy Match -mogdb=# SELECT id, body, ts_rank_cd (to_tsvector ('ngram', body), query) AS rank FROM tsearch.ts_ngram, to_tsquery ('Chinese') query WHERE query @@ to_tsvector ('ngram', body); - id | body | rank -----+----------+------ - 3 | Search Chinese | .1 - 1 | Chinese | .1 - 2 | Chinese search | .1 -(3 rows) -``` - -Ranking can be expensive since it requires consulting the **tsvector** of each matching document, which can be I/O bound and therefore slow. Unfortunately, it is almost impossible to avoid since practical queries often result in large numbers of matches. +--- +title: Ranking Search Results +summary: Ranking Search Results +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Ranking Search Results + +Ranking attempts to measure how relevant documents are to a particular query, so that when there are many matches the most relevant ones can be shown first. MogDB provides two predefined ranking functions. The functions take into account lexical, proximity, and structural information; that is, they consider how often the query terms appear in the document, how close together the terms are in the document, and how important is the part of the document where they occur. However, the concept of relevancy is vague and application-specific. Different applications might require additional information for ranking, for example, document modification time. The built-in ranking functions are only examples. You can write your own ranking functions and/or combine their results with additional factors to fit your specific needs. + +The two ranking functions currently available are: + +```sql +ts_rank([ weights float4[], ] vector tsvector, query tsquery [, normalization integer ]) returns float4 +``` + +Ranks vectors based on the frequency of their matching lexemes. + +```sql +ts_rank_cd([ weights float4[], ] vector tsvector, query tsquery [, normalization integer ]) returns float4 +``` + +This function requires positional information in its input. Therefore, it will not work on "stripped" **tsvector** values. It will always return zero. + +For both these functions, the optional **weights** argument offers the ability to weigh word instances more or less heavily depending on how they are labeled. The weight arrays specify how heavily to weigh each category of word, in the order: + +```sql +{D-weight, C-weight, B-weight, A-weight} +``` + +If no **weights** is provided, then these defaults are used: {0.1, 0.2, 0.4, 1.0} + +Typically weights are used to mark words from special areas of the document, like the title or an initial abstract, so they can be treated with more or less importance than words in the document body. + +Since a longer document has a greater chance of containing a query term it is reasonable to take into account document size. For example, a hundred-word document with five instances of a search word is probably more relevant than a thousand-word document with five instances. Both ranking functions take an integer **normalization** option that specifies whether and how a document's length should impact its rank. The integer option controls several behaviors, so it is a bit mask: you can specify one or more behaviors using a vertical bar (**|**) (for example, **2|4**). + +- **0** (default) ignores the document length. +- **1** divides the rank by (1 + logarithm of the document length). +- **2** divides the rank by the document length. +- **4** divides the rank by the mean harmonic distance between extents. This is implemented only by **ts_rank_cd**. +- **8** divides the rank by the number of unique words in document. +- **16** divides the rank by (1 + Logarithm of the number of unique words in document). +- **32** divides the rank by (itself + 1). + +If more than one flag bit is specified, the transformations are applied in the order listed. + +It is important to note that the ranking functions do not use any global information, so it is impossible to produce a fair normalization to 1% or 100% as sometimes desired. Normalization option 32 (**rank/(rank+1)**) can be applied to scale all ranks into the range zero to one, but of course this is just a cosmetic change; it will not affect the ordering of the search results. + +Here is an example that selects only the ten highest-ranked matches: + +```sql +mogdb=# SELECT id, title, ts_rank_cd(to_tsvector(body), query) AS rank +FROM tsearch.pgweb, to_tsquery('america') query +WHERE query @@ to_tsvector(body) +ORDER BY rank DESC +LIMIT 10; + id | title | rank +----+---------+------ + 11 | Brazil | .2 + 2 | America | .1 + 12 | Canada | .1 + 13 | Mexico | .1 +(4 rows) +``` + +This is the same example using normalized ranking: + +```sql +mogdb=# SELECT id, title, ts_rank_cd(to_tsvector(body), query, 32 /* rank/(rank+1) */ ) AS rank +FROM tsearch.pgweb, to_tsquery('america') query +WHERE query @@ to_tsvector(body) +ORDER BY rank DESC +LIMIT 10; + id | title | rank +----+---------+---------- + 11 | Brazil | .166667 + 2 | America | .0909091 + 12 | Canada | .0909091 + 13 | Mexico | .0909091 +(4 rows) +``` + +The following example sorts query by Chinese word segmentation: + +```sql +mogdb=# CREATE TABLE tsearch.ts_ngram(id int, body text); +mogdb=# INSERT INTO tsearch.ts_ngram VALUES (1, 'Chinese'); +mogdb=# INSERT INTO tsearch.ts_ngram VALUES (2, 'Chinese search'); +mogdb=# INSERT INTO tsearch.ts_ngram VALUES (3 'Search Chinese'); +-- Exact match +mogdb=# SELECT id, body, ts_rank_cd (to_tsvector ('ngram', body), query) AS rank FROM tsearch.ts_ngram, to_tsquery ('Chinese') query WHERE query @@ to_tsvector (body); + id | body | rank +----+------+------ + 1 | Chinese | .1 +(1 row) + +-- Fuzzy Match +mogdb=# SELECT id, body, ts_rank_cd (to_tsvector ('ngram', body), query) AS rank FROM tsearch.ts_ngram, to_tsquery ('Chinese') query WHERE query @@ to_tsvector ('ngram', body); + id | body | rank +----+----------+------ + 3 | Search Chinese | .1 + 1 | Chinese | .1 + 2 | Chinese search | .1 +(3 rows) +``` + +Ranking can be expensive since it requires consulting the **tsvector** of each matching document, which can be I/O bound and therefore slow. Unfortunately, it is almost impossible to avoid since practical queries often result in large numbers of matches. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/dictionaries-overview.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/dictionaries-overview.md index 2da0f824..f141a128 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/dictionaries-overview.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/dictionaries-overview.md @@ -1,36 +1,36 @@ ---- -title: Overview -summary: Overview -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Overview - -A dictionary is used to define stop words, that is, words to be ignored in full-text retrieval. - -A dictionary can also be used to normalize words so that different derived forms of the same word will match. A normalized word is called a lexeme. - -In addition to improving retrieval quality, normalization and removal of stop words can reduce the size of the **tsvector** representation of a document, thereby improving performance. Normalization and removal of stop words do not always have linguistic meaning. Users can define normalization and removal rules in dictionary definition files based on application environments. - -A dictionary is a program that receives a token as input and returns: - -- An array of lexemes if the input token is known to the dictionary (note that one token can produce more than one lexeme). -- A single lexeme A new token is passed to the subsequent dictionary (the current dictionary may be referred to as a filter dictionary) in place of the input token. -- An empty array if the input token is known to the dictionary but is a stop word. -- **NULL** if the dictionary does not recognize the token. - -MogDB provides predefined dictionaries for many languages and also provides five predefined dictionary templates, **Simple**, **Synonym**, **Thesaurus**, **Ispell**, and **Snowball**. These templates can be used to create new dictionaries with custom parameters. - -When using full-text retrieval, you are advised to: - -- In the text search configuration, configure a parser together with a set of dictionaries to process the parser's output tokens. For each token type that the parser can return, a separate list of dictionaries is specified by the configuration. When a token of that type is found by the parser, each dictionary in the list is consulted in turn, until a dictionary recognizes it as a known word. If it is identified as a stop word, or no dictionary recognizes the token, it will be discarded and not indexed or searched for. Generally, the first dictionary that returns a non-**NULL** output determines the result, and any remaining dictionaries are not consulted. However, a filtering dictionary can replace the input token with a modified one, which is then passed to subsequent dictionaries. - -- The general rule for configuring a list of dictionaries is to place first the most narrow, most specific dictionary, then the more general dictionaries, finishing with a very general dictionary, like a **Snowball** stemmer dictionary or a **Simple** dictionary, which recognizes everything. In the following example, for an astronomy-specific search (**astro_en** configuration), you can configure the token type **asciiword** (ASCII word) with a **Synonym** dictionary of astronomical terms, a general English **Ispell** dictionary, and a **Snowball** English stemmer dictionary: - - ```sql - mogdb=# ALTER TEXT SEARCH CONFIGURATION astro_en - ADD MAPPING FOR asciiword WITH astro_syn, english_ispell, english_stem; - ``` - - A filtering dictionary can be placed anywhere in the list, except at the end where it would be useless. Filtering dictionaries are useful to partially normalize words to simplify the task of later dictionaries. +--- +title: Overview +summary: Overview +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Overview + +A dictionary is used to define stop words, that is, words to be ignored in full-text retrieval. + +A dictionary can also be used to normalize words so that different derived forms of the same word will match. A normalized word is called a lexeme. + +In addition to improving retrieval quality, normalization and removal of stop words can reduce the size of the **tsvector** representation of a document, thereby improving performance. Normalization and removal of stop words do not always have linguistic meaning. Users can define normalization and removal rules in dictionary definition files based on application environments. + +A dictionary is a program that receives a token as input and returns: + +- An array of lexemes if the input token is known to the dictionary (note that one token can produce more than one lexeme). +- A single lexeme A new token is passed to the subsequent dictionary (the current dictionary may be referred to as a filter dictionary) in place of the input token. +- An empty array if the input token is known to the dictionary but is a stop word. +- **NULL** if the dictionary does not recognize the token. + +MogDB provides predefined dictionaries for many languages and also provides five predefined dictionary templates, **Simple**, **Synonym**, **Thesaurus**, **Ispell**, and **Snowball**. These templates can be used to create new dictionaries with custom parameters. + +When using full-text retrieval, you are advised to: + +- In the text search configuration, configure a parser together with a set of dictionaries to process the parser's output tokens. For each token type that the parser can return, a separate list of dictionaries is specified by the configuration. When a token of that type is found by the parser, each dictionary in the list is consulted in turn, until a dictionary recognizes it as a known word. If it is identified as a stop word, or no dictionary recognizes the token, it will be discarded and not indexed or searched for. Generally, the first dictionary that returns a non-**NULL** output determines the result, and any remaining dictionaries are not consulted. However, a filtering dictionary can replace the input token with a modified one, which is then passed to subsequent dictionaries. + +- The general rule for configuring a list of dictionaries is to place first the most narrow, most specific dictionary, then the more general dictionaries, finishing with a very general dictionary, like a **Snowball** stemmer dictionary or a **Simple** dictionary, which recognizes everything. In the following example, for an astronomy-specific search (**astro_en** configuration), you can configure the token type **asciiword** (ASCII word) with a **Synonym** dictionary of astronomical terms, a general English **Ispell** dictionary, and a **Snowball** English stemmer dictionary: + + ```sql + mogdb=# ALTER TEXT SEARCH CONFIGURATION astro_en + ADD MAPPING FOR asciiword WITH astro_syn, english_ispell, english_stem; + ``` + + A filtering dictionary can be placed anywhere in the list, except at the end where it would be useless. Filtering dictionaries are useful to partially normalize words to simplify the task of later dictionaries. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/dictionaries.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/dictionaries.md index ed9f0f93..79d047e0 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/dictionaries.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/dictionaries.md @@ -1,16 +1,16 @@ ---- -title: Dictionaries -summary: Dictionaries -author: zhang cuiping -date: 2023-04-07 ---- - -# Dictionaries - -- **[Overview](dictionaries-overview.md)** -- **[Stop Words](stop-words.md)** -- **[Simple Dictionary](simple-dictionary.md)** -- **[Synonym Dictionary](synonym-dictionary.md)** -- **[Thesaurus Dictionary](thesaurus-dictionary.md)** -- **[Ispell Dictionary](ispell-dictionary.md)** +--- +title: Dictionaries +summary: Dictionaries +author: zhang cuiping +date: 2023-04-07 +--- + +# Dictionaries + +- **[Overview](dictionaries-overview.md)** +- **[Stop Words](stop-words.md)** +- **[Simple Dictionary](simple-dictionary.md)** +- **[Synonym Dictionary](synonym-dictionary.md)** +- **[Thesaurus Dictionary](thesaurus-dictionary.md)** +- **[Ispell Dictionary](ispell-dictionary.md)** - **[Snowball Dictionary](snowball-dictionary.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/ispell-dictionary.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/ispell-dictionary.md index 8275ea56..f3b87324 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/ispell-dictionary.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/ispell-dictionary.md @@ -1,49 +1,49 @@ ---- -title: Ispell Dictionary -summary: Ispell Dictionary -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Ispell Dictionary - -An **Ispell** dictionary is a morphological dictionary, which can normalize different linguistic forms of a word into the same lexeme. For example, an English **Ispell** dictionary can match all declensions and conjugations of the search term **bank**, such as, **banking**, **banked**, **banks**, **banks'**, and **bank's**. - -MogDB does not provide any predefined **Ispell** dictionaries or dictionary files. The .dict files and .affix files support multiple open-source dictionary formats, including **Ispell**, **MySpell**, and **Hunspell**. - -## Procedure - -1. Obtain the dictionary definition file (.dict) and affix file (.affix). - - You can use an open-source dictionary. The name extensions of the open-source dictionary may be .aff and .dic. In this case, you need to change them to .affix and .dict. In addition, for some dictionary files (for example, Norwegian dictionary files), you need to run the following commands to convert the character encoding to UTF-8: - - ```bash - iconv -f ISO_8859-1 -t UTF-8 -o nn_no.affix nn_NO.aff - iconv -f ISO_8859-1 -t UTF-8 -o nn_no.dict nn_NO.dic - ``` - -2. Create an **Ispell** dictionary. - - ```sql - MogDB=# CREATE TEXT SEARCH DICTIONARY norwegian_ispell ( - TEMPLATE = ispell, - DictFile = nn_no, - AffFile = nn_no, - FilePath = 'file:///home/dicts' - ); - ``` - - The full names of the **Ispell** dictionary files are **nn_no.dict** and **nn_no.affix**, and the dictionary is stored in the **/home/dicts/** directory of the current database primary node. For details about the syntax and parameters for creating an dictionary, see [CREATE TEXT SEARCH DICTIONARY](../../../../reference-guide/sql-syntax/CREATE-TEXT-SEARCH-DICTIONARY.md). - -3. Use the **Ispell** dictionary to split compound words. - - ```sql - MogDB=# SELECT ts_lexize('norwegian_ispell', 'sjokoladefabrikk'); - ts_lexize - --------------------- - {sjokolade,fabrikk} - (1 row) - - ``` - - **MySpell** does not support compound words. **Hunspell** supports compound words. MogDB supports only the basic compound word operations of **Hunspell**. Generally, **Ispell** dictionaries recognize a limited set of words, so they should be followed by another broader dictionary, for example, a **Snowball** dictionary, which recognizes everything. +--- +title: Ispell Dictionary +summary: Ispell Dictionary +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Ispell Dictionary + +An **Ispell** dictionary is a morphological dictionary, which can normalize different linguistic forms of a word into the same lexeme. For example, an English **Ispell** dictionary can match all declensions and conjugations of the search term **bank**, such as, **banking**, **banked**, **banks**, **banks'**, and **bank's**. + +MogDB does not provide any predefined **Ispell** dictionaries or dictionary files. The .dict files and .affix files support multiple open-source dictionary formats, including **Ispell**, **MySpell**, and **Hunspell**. + +## Procedure + +1. Obtain the dictionary definition file (.dict) and affix file (.affix). + + You can use an open-source dictionary. The name extensions of the open-source dictionary may be .aff and .dic. In this case, you need to change them to .affix and .dict. In addition, for some dictionary files (for example, Norwegian dictionary files), you need to run the following commands to convert the character encoding to UTF-8: + + ```bash + iconv -f ISO_8859-1 -t UTF-8 -o nn_no.affix nn_NO.aff + iconv -f ISO_8859-1 -t UTF-8 -o nn_no.dict nn_NO.dic + ``` + +2. Create an **Ispell** dictionary. + + ```sql + MogDB=# CREATE TEXT SEARCH DICTIONARY norwegian_ispell ( + TEMPLATE = ispell, + DictFile = nn_no, + AffFile = nn_no, + FilePath = 'file:///home/dicts' + ); + ``` + + The full names of the **Ispell** dictionary files are **nn_no.dict** and **nn_no.affix**, and the dictionary is stored in the **/home/dicts/** directory of the current database primary node. For details about the syntax and parameters for creating an dictionary, see [CREATE TEXT SEARCH DICTIONARY](../../../../reference-guide/sql-syntax/CREATE-TEXT-SEARCH-DICTIONARY.md). + +3. Use the **Ispell** dictionary to split compound words. + + ```sql + MogDB=# SELECT ts_lexize('norwegian_ispell', 'sjokoladefabrikk'); + ts_lexize + --------------------- + {sjokolade,fabrikk} + (1 row) + + ``` + + **MySpell** does not support compound words. **Hunspell** supports compound words. MogDB supports only the basic compound word operations of **Hunspell**. Generally, **Ispell** dictionaries recognize a limited set of words, so they should be followed by another broader dictionary, for example, a **Snowball** dictionary, which recognizes everything. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/simple-dictionary.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/simple-dictionary.md index 3ed2f0ea..71a4065f 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/simple-dictionary.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/simple-dictionary.md @@ -1,64 +1,64 @@ ---- -title: Simple Dictionary -summary: Simple Dictionary -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Simple Dictionary - -A **Simple** dictionary operates by converting the input token to lower case and checking it against a list of stop words. If the token is found in the list, an empty array will be returned, causing the token to be discarded. If it is not found, the lower-cased form of the word is returned as the normalized lexeme. In addition, you can set **Accept** to **false** for **Simple** dictionaries (default: **true**) to report non-stop-words as unrecognized, allowing them to be passed on to the next dictionary in the list. - -## Precautions - -- Most types of dictionaries rely on dictionary configuration files. The name of a configuration file can only be lowercase letters, digits, and underscores (_). -- A dictionary cannot be created in **pg_temp** mode. -- Dictionary configuration files must be stored in UTF-8 encoding. They will be translated to the actual database encoding, if that is different, when they are read into the server. -- Generally, a session will read a dictionary configuration file only once, when it is first used within the session. To modify a configuration file, run the **ALTER TEXT SEARCH DICTIONARY** statement to update and reload the file. - -## Procedure - -1. Create a **Simple** dictionary. - - ```sql - MogDB=# CREATE TEXT SEARCH DICTIONARY public.simple_dict ( - TEMPLATE = pg_catalog.simple, - STOPWORDS = english - ); - ``` - - **english.stop** is the full name of a file of stop words. For details about the syntax and parameters for creating a **Simple** dictionary, see [CREATE TEXT SEARCH DICTIONARY](../../../../reference-guide/sql-syntax/CREATE-TEXT-SEARCH-DICTIONARY.md). - -2. Use the **Simple** dictionary. - - ```sql - MogDB=# SELECT ts_lexize('public.simple_dict','YeS'); - ts_lexize - ----------- - {yes} - (1 row) - - MogDB=# SELECT ts_lexize('public.simple_dict','The'); - ts_lexize - ----------- - {} - (1 row) - ``` - -3. Set **Accept=false** so that the **Simple** dictionary returns **NULL** instead of a lower-cased non-stop word. - - ```sql - MogDB=# ALTER TEXT SEARCH DICTIONARY public.simple_dict ( Accept = false ); - ALTER TEXT SEARCH DICTIONARY - MogDB=# SELECT ts_lexize('public.simple_dict','YeS'); - ts_lexize - ----------- - - (1 row) - - MogDB=# SELECT ts_lexize('public.simple_dict','The'); - ts_lexize - ----------- - {} - (1 row) - ``` +--- +title: Simple Dictionary +summary: Simple Dictionary +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Simple Dictionary + +A **Simple** dictionary operates by converting the input token to lower case and checking it against a list of stop words. If the token is found in the list, an empty array will be returned, causing the token to be discarded. If it is not found, the lower-cased form of the word is returned as the normalized lexeme. In addition, you can set **Accept** to **false** for **Simple** dictionaries (default: **true**) to report non-stop-words as unrecognized, allowing them to be passed on to the next dictionary in the list. + +## Precautions + +- Most types of dictionaries rely on dictionary configuration files. The name of a configuration file can only be lowercase letters, digits, and underscores (_). +- A dictionary cannot be created in **pg_temp** mode. +- Dictionary configuration files must be stored in UTF-8 encoding. They will be translated to the actual database encoding, if that is different, when they are read into the server. +- Generally, a session will read a dictionary configuration file only once, when it is first used within the session. To modify a configuration file, run the **ALTER TEXT SEARCH DICTIONARY** statement to update and reload the file. + +## Procedure + +1. Create a **Simple** dictionary. + + ```sql + MogDB=# CREATE TEXT SEARCH DICTIONARY public.simple_dict ( + TEMPLATE = pg_catalog.simple, + STOPWORDS = english + ); + ``` + + **english.stop** is the full name of a file of stop words. For details about the syntax and parameters for creating a **Simple** dictionary, see [CREATE TEXT SEARCH DICTIONARY](../../../../reference-guide/sql-syntax/CREATE-TEXT-SEARCH-DICTIONARY.md). + +2. Use the **Simple** dictionary. + + ```sql + MogDB=# SELECT ts_lexize('public.simple_dict','YeS'); + ts_lexize + ----------- + {yes} + (1 row) + + MogDB=# SELECT ts_lexize('public.simple_dict','The'); + ts_lexize + ----------- + {} + (1 row) + ``` + +3. Set **Accept=false** so that the **Simple** dictionary returns **NULL** instead of a lower-cased non-stop word. + + ```sql + MogDB=# ALTER TEXT SEARCH DICTIONARY public.simple_dict ( Accept = false ); + ALTER TEXT SEARCH DICTIONARY + MogDB=# SELECT ts_lexize('public.simple_dict','YeS'); + ts_lexize + ----------- + + (1 row) + + MogDB=# SELECT ts_lexize('public.simple_dict','The'); + ts_lexize + ----------- + {} + (1 row) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/snowball-dictionary.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/snowball-dictionary.md index 0de4c0fd..ebc3dc5d 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/snowball-dictionary.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/snowball-dictionary.md @@ -1,14 +1,14 @@ ---- -title: Snowball Dictionary -summary: Snowball Dictionary -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Snowball Dictionary - -A **Snowball** dictionary is based on a project by Martin Porter and is used for stem analysis, providing stemming algorithms for many languages. MogDB provides predefined **Snowball** dictionaries of many languages. You can query the [PG_TS_DICT](../../../../reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TS_DICT.md) system catalog to view the predefined **Snowball** dictionaries and supported stemming algorithms. - -A **Snowball** dictionary recognizes everything, no matter whether it is able to simplify the word. Therefore, it should be placed at the end of the dictionary list. It is useless to place it before any other dictionary because a token will never pass it through to the next dictionary. - -For details about the syntax of **Snowball** dictionaries, see [CREATE TEXT SEARCH DICTIONARY](../../../../reference-guide/sql-syntax/CREATE-TEXT-SEARCH-DICTIONARY.md). +--- +title: Snowball Dictionary +summary: Snowball Dictionary +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Snowball Dictionary + +A **Snowball** dictionary is based on a project by Martin Porter and is used for stem analysis, providing stemming algorithms for many languages. MogDB provides predefined **Snowball** dictionaries of many languages. You can query the [PG_TS_DICT](../../../../reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TS_DICT.md) system catalog to view the predefined **Snowball** dictionaries and supported stemming algorithms. + +A **Snowball** dictionary recognizes everything, no matter whether it is able to simplify the word. Therefore, it should be placed at the end of the dictionary list. It is useless to place it before any other dictionary because a token will never pass it through to the next dictionary. + +For details about the syntax of **Snowball** dictionaries, see [CREATE TEXT SEARCH DICTIONARY](../../../../reference-guide/sql-syntax/CREATE-TEXT-SEARCH-DICTIONARY.md). diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/stop-words.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/stop-words.md index 0d8ec268..d7cfb2e7 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/stop-words.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/stop-words.md @@ -1,33 +1,33 @@ ---- -title: Stop Words -summary: Stop Words -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Stop Words - -Stop words are words that are very common, appear in almost every document, and have no discrimination value. Therefore, they can be ignored in the context of full text searching. Each type of dictionaries treats stop words in different ways. For example, **Ispell** dictionaries first normalize words and then check the list of stop words, while **Snowball** dictionaries first check the list of stop words. - -For example, every English text contains words like **a** and **the**, so it is useless to store them in an index. However, stop words affect the positions in **tsvector**, which in turn affect ranking. - -```sql -mogdb=# SELECT to_tsvector('english','in the list of stop words'); - to_tsvector ----------------------------- - 'list':3 'stop':5 'word':6 -``` - -The missing positions 1, 2, and 4 are because of stop words. Ranks calculated for documents with and without stop words are quite different: - -```sql -mogdb=# SELECT ts_rank_cd (to_tsvector('english','in the list of stop words'), to_tsquery('list & stop')); - ts_rank_cd ------------- - .05 - -mogdb=# SELECT ts_rank_cd (to_tsvector('english','list stop words'), to_tsquery('list & stop')); - ts_rank_cd ------------- - .1 -``` +--- +title: Stop Words +summary: Stop Words +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Stop Words + +Stop words are words that are very common, appear in almost every document, and have no discrimination value. Therefore, they can be ignored in the context of full text searching. Each type of dictionaries treats stop words in different ways. For example, **Ispell** dictionaries first normalize words and then check the list of stop words, while **Snowball** dictionaries first check the list of stop words. + +For example, every English text contains words like **a** and **the**, so it is useless to store them in an index. However, stop words affect the positions in **tsvector**, which in turn affect ranking. + +```sql +mogdb=# SELECT to_tsvector('english','in the list of stop words'); + to_tsvector +---------------------------- + 'list':3 'stop':5 'word':6 +``` + +The missing positions 1, 2, and 4 are because of stop words. Ranks calculated for documents with and without stop words are quite different: + +```sql +mogdb=# SELECT ts_rank_cd (to_tsvector('english','in the list of stop words'), to_tsquery('list & stop')); + ts_rank_cd +------------ + .05 + +mogdb=# SELECT ts_rank_cd (to_tsvector('english','list stop words'), to_tsquery('list & stop')); + ts_rank_cd +------------ + .1 +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/thesaurus-dictionary.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/thesaurus-dictionary.md index 7645ba30..5ea911a1 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/thesaurus-dictionary.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/dictionaries/thesaurus-dictionary.md @@ -1,92 +1,92 @@ ---- -title: Thesaurus Dictionary -summary: Thesaurus Dictionary -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Thesaurus Dictionary - -A **Thesaurus** dictionary (sometimes abbreviated as TZ) is a collection of relationships between words and phrases, such as broader terms (BT), narrower terms (NT), preferred terms, non-preferred terms, and related terms. Based on definitions in the dictionary file, a TZ replaces all non-preferred terms by one preferred term and, optionally, preserves the original terms for indexing as well. A TZ is an extension of a **Synonym** dictionary with added phrase support. - -## Precautions - -- A TZ has the capability to recognize phrases and therefore it must remember its state and interact with the parser to determine whether to handle the next token or stop accumulation. A TZ must be configured carefully. For example, if a TZ is configured to handle only **asciiword** tokens, a TZ definition like **one 7** will not work because the token type **uint** is not assigned to the TZ. -- TZs are used during indexing, so any change in the TZ's parameters requires reindexing. For most other dictionary types, small changes such as adding or removing stop words does not force reindexing. - -## Procedure - -1. Create a TZ named **thesaurus_astro**. - - **thesaurus_astro** is a simple astronomical TZ that defines two astronomical word combinations (word+synonym). - - ```sql - supernovae stars : sn - crab nebulae : crab - ``` - - Run the following statement to create the TZ: - - ```sql - MogDB=# CREATE TEXT SEARCH DICTIONARY thesaurus_astro ( - TEMPLATE = thesaurus, - DictFile = thesaurus_astro, - Dictionary = pg_catalog.english_stem, - FILEPATH = 'file:///home/dicts/' - ); - ``` - - The full name of the TZ file is **thesaurus_astro.ths**, and the TZ is stored in the **/home/dicts/** directory of the current database primary node. **pg_catalog.english_stem** is the subdictionary (a **Snowball** English stemmer) used for input normalization. The subdictionary has its own configuration (for example, stop words), which is not shown here. For details about the syntax and parameters for creating an **Ispell** dictionary, see [CREATE TEXT SEARCH DICTIONARY](../../../../reference-guide/sql-syntax/CREATE-TEXT-SEARCH-DICTIONARY.md). - -2. Bind the TZ to the desired token types in the text search configuration. - - ```sql - MogDB=# ALTER TEXT SEARCH CONFIGURATION russian - ALTER MAPPING FOR asciiword, asciihword, hword_asciipart - WITH thesaurus_astro, english_stem; - ``` - -3. Use the TZ. - - - Test the TZ. - - The **ts_lexize** function is not very useful for testing the TZ because the function processes its input as a single token. Instead, you can use the **plainto_tsquery**, **to_tsvector**, or **to_tsquery** function which will break their input strings into multiple tokens. - - ```sql - MogDB=# SELECT plainto_tsquery('russian','supernova star'); - plainto_tsquery - ----------------- - 'sn' - (1 row) - - MogDB=# SELECT to_tsvector('russian','supernova star'); - to_tsvector - ------------- - 'sn':1 - (1 row) - - MogDB=# SELECT to_tsquery('russian','''supernova star'''); - to_tsquery - ------------ - 'sn' - (1 row) - - ``` - - **supernova star** matches **supernovae stars** in **thesaurus_astro** because the Snowball **english_stem** stemmer is specified in the **thesaurus_astro** definition. The stemmer removed **e** and **s**. - - - To index the original phrase, include it in the right-hand part of the definition. - - ```sql - supernovae stars : sn supernovae stars - - MogDB=# ALTER TEXT SEARCH DICTIONARY thesaurus_astro ( - DictFile = thesaurus_astro, - FILEPATH = 'file:///home/dicts/'); - - MogDB=# SELECT plainto_tsquery('russian','supernova star'); - plainto_tsquery - ----------------------------- - 'sn' & 'supernova' & 'star' - (1 row) - ``` +--- +title: Thesaurus Dictionary +summary: Thesaurus Dictionary +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Thesaurus Dictionary + +A **Thesaurus** dictionary (sometimes abbreviated as TZ) is a collection of relationships between words and phrases, such as broader terms (BT), narrower terms (NT), preferred terms, non-preferred terms, and related terms. Based on definitions in the dictionary file, a TZ replaces all non-preferred terms by one preferred term and, optionally, preserves the original terms for indexing as well. A TZ is an extension of a **Synonym** dictionary with added phrase support. + +## Precautions + +- A TZ has the capability to recognize phrases and therefore it must remember its state and interact with the parser to determine whether to handle the next token or stop accumulation. A TZ must be configured carefully. For example, if a TZ is configured to handle only **asciiword** tokens, a TZ definition like **one 7** will not work because the token type **uint** is not assigned to the TZ. +- TZs are used during indexing, so any change in the TZ's parameters requires reindexing. For most other dictionary types, small changes such as adding or removing stop words does not force reindexing. + +## Procedure + +1. Create a TZ named **thesaurus_astro**. + + **thesaurus_astro** is a simple astronomical TZ that defines two astronomical word combinations (word+synonym). + + ```sql + supernovae stars : sn + crab nebulae : crab + ``` + + Run the following statement to create the TZ: + + ```sql + MogDB=# CREATE TEXT SEARCH DICTIONARY thesaurus_astro ( + TEMPLATE = thesaurus, + DictFile = thesaurus_astro, + Dictionary = pg_catalog.english_stem, + FILEPATH = 'file:///home/dicts/' + ); + ``` + + The full name of the TZ file is **thesaurus_astro.ths**, and the TZ is stored in the **/home/dicts/** directory of the current database primary node. **pg_catalog.english_stem** is the subdictionary (a **Snowball** English stemmer) used for input normalization. The subdictionary has its own configuration (for example, stop words), which is not shown here. For details about the syntax and parameters for creating an **Ispell** dictionary, see [CREATE TEXT SEARCH DICTIONARY](../../../../reference-guide/sql-syntax/CREATE-TEXT-SEARCH-DICTIONARY.md). + +2. Bind the TZ to the desired token types in the text search configuration. + + ```sql + MogDB=# ALTER TEXT SEARCH CONFIGURATION russian + ALTER MAPPING FOR asciiword, asciihword, hword_asciipart + WITH thesaurus_astro, english_stem; + ``` + +3. Use the TZ. + + - Test the TZ. + + The **ts_lexize** function is not very useful for testing the TZ because the function processes its input as a single token. Instead, you can use the **plainto_tsquery**, **to_tsvector**, or **to_tsquery** function which will break their input strings into multiple tokens. + + ```sql + MogDB=# SELECT plainto_tsquery('russian','supernova star'); + plainto_tsquery + ----------------- + 'sn' + (1 row) + + MogDB=# SELECT to_tsvector('russian','supernova star'); + to_tsvector + ------------- + 'sn':1 + (1 row) + + MogDB=# SELECT to_tsquery('russian','''supernova star'''); + to_tsquery + ------------ + 'sn' + (1 row) + + ``` + + **supernova star** matches **supernovae stars** in **thesaurus_astro** because the Snowball **english_stem** stemmer is specified in the **thesaurus_astro** definition. The stemmer removed **e** and **s**. + + - To index the original phrase, include it in the right-hand part of the definition. + + ```sql + supernovae stars : sn supernovae stars + + MogDB=# ALTER TEXT SEARCH DICTIONARY thesaurus_astro ( + DictFile = thesaurus_astro, + FILEPATH = 'file:///home/dicts/'); + + MogDB=# SELECT plainto_tsquery('russian','supernova star'); + plainto_tsquery + ----------------------------- + 'sn' & 'supernova' & 'star' + (1 row) + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/full-text-search.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/full-text-search.md index 03c22b6c..1679fadd 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/full-text-search.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/full-text-search.md @@ -1,18 +1,18 @@ ---- -title: Full Text Search -summary: Full Text Search -author: zhang cuiping -date: 2023-04-07 ---- - -# Full Text Search - -- **[Introduction](./introduction/full-text-search-introduction.md)** -- **[Tables and Indexes](./tables-and-indexes/tables-and-indexes.md)** -- **[Controlling Text Search](./controlling-text-search/controlling-text-search.md)** -- **[Additional Features](./additional-features/additional-features.md)** -- **[Parser](parser.md)** -- **[Dictionaries](./dictionaries/dictionaries.md)** -- **[Configuration Examples](configuration-examples.md)** -- **[Testing and Debugging Text Search](./testing-and-debugging-text-search/testing-and-debugging-text-search.md)** +--- +title: Full Text Search +summary: Full Text Search +author: zhang cuiping +date: 2023-04-07 +--- + +# Full Text Search + +- **[Introduction](./introduction/full-text-search-introduction.md)** +- **[Tables and Indexes](./tables-and-indexes/tables-and-indexes.md)** +- **[Controlling Text Search](./controlling-text-search/controlling-text-search.md)** +- **[Additional Features](./additional-features/additional-features.md)** +- **[Parser](parser.md)** +- **[Dictionaries](./dictionaries/dictionaries.md)** +- **[Configuration Examples](configuration-examples.md)** +- **[Testing and Debugging Text Search](./testing-and-debugging-text-search/testing-and-debugging-text-search.md)** - **[Limitations](limitations.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/basic-text-matching.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/basic-text-matching.md index be176f3e..c793274c 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/basic-text-matching.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/basic-text-matching.md @@ -1,56 +1,56 @@ ---- -title: Basic Text Matching -summary: Basic Text Matching -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Basic Text Matching - -Full text search in MogDB is based on the match operator **@@**, which returns **true** if a **tsvector** (document) matches a **tsquery** (query). It does not matter which data type is written first: - -```sql -MogDB=# SELECT 'a fat cat sat on a mat and ate a fat rat'::tsvector @@ 'cat & rat'::tsquery AS RESULT; - result ----------- - t -(1 row) -MogDB=# SELECT 'fat & cow'::tsquery @@ 'a fat cat sat on a mat and ate a fat rat'::tsvector AS RESULT; - result ----------- - f -(1 row) -``` - -As the above example suggests, a **tsquery** is not raw text, any more than a **tsvector** is. A tsquery contains search terms, which must be already-normalized lexemes, and may combine multiple terms using **AND**, **OR**, and **NOT** operators. For details, see **Text Search Types**. There are functions **to_tsquery** and **plainto_tsquery** that are helpful in converting user-written text into a proper tsquery, for example by normalizing words appearing in the text. Similarly, **to_tsvector** is used to parse and normalize a document string. So in practice a text search match would look more like this: - -```sql -MogDB=# SELECT to_tsvector('fat cats ate fat rats') @@ to_tsquery('fat & rat') AS RESULT; -result ----------- - t -(1 row) -``` - -Observe that this match would not succeed if written as follows: - -```sql -MogDB=# SELECT 'fat cats ate fat rats'::tsvector @@ to_tsquery('fat & rat')AS RESULT; -result ----------- - f -(1 row) -``` - -In the preceding match, no normalization of the word **rats** will occur. Therefore, **rats** does not match **rat**. - -The **@@** operator also supports text input, allowing explicit conversion of a text string to **tsvector** or **tsquery** to be skipped in simple cases. The variants available are: - -``` -tsvector @@ tsquery -tsquery @@ tsvector -text @@ tsquery -text @@ text -``` - -We already saw the first two of these. The form **text @@ tsquery** is equivalent to **to_tsvector(text) @@ tsquery**. The form **text @@ text** is equivalent to **to_tsvector(text) @@ plainto_tsquery(text)**. +--- +title: Basic Text Matching +summary: Basic Text Matching +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Basic Text Matching + +Full text search in MogDB is based on the match operator **@@**, which returns **true** if a **tsvector** (document) matches a **tsquery** (query). It does not matter which data type is written first: + +```sql +MogDB=# SELECT 'a fat cat sat on a mat and ate a fat rat'::tsvector @@ 'cat & rat'::tsquery AS RESULT; + result +---------- + t +(1 row) +MogDB=# SELECT 'fat & cow'::tsquery @@ 'a fat cat sat on a mat and ate a fat rat'::tsvector AS RESULT; + result +---------- + f +(1 row) +``` + +As the above example suggests, a **tsquery** is not raw text, any more than a **tsvector** is. A tsquery contains search terms, which must be already-normalized lexemes, and may combine multiple terms using **AND**, **OR**, and **NOT** operators. For details, see **Text Search Types**. There are functions **to_tsquery** and **plainto_tsquery** that are helpful in converting user-written text into a proper tsquery, for example by normalizing words appearing in the text. Similarly, **to_tsvector** is used to parse and normalize a document string. So in practice a text search match would look more like this: + +```sql +MogDB=# SELECT to_tsvector('fat cats ate fat rats') @@ to_tsquery('fat & rat') AS RESULT; +result +---------- + t +(1 row) +``` + +Observe that this match would not succeed if written as follows: + +```sql +MogDB=# SELECT 'fat cats ate fat rats'::tsvector @@ to_tsquery('fat & rat')AS RESULT; +result +---------- + f +(1 row) +``` + +In the preceding match, no normalization of the word **rats** will occur. Therefore, **rats** does not match **rat**. + +The **@@** operator also supports text input, allowing explicit conversion of a text string to **tsvector** or **tsquery** to be skipped in simple cases. The variants available are: + +``` +tsvector @@ tsquery +tsquery @@ tsvector +text @@ tsquery +text @@ text +``` + +We already saw the first two of these. The form **text @@ tsquery** is equivalent to **to_tsvector(text) @@ tsquery**. The form **text @@ text** is equivalent to **to_tsvector(text) @@ plainto_tsquery(text)**. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/configurations.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/configurations.md index e5585ec1..3f903bf4 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/configurations.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/configurations.md @@ -1,21 +1,21 @@ ---- -title: Configurations -summary: Configurations -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Configurations - -Full text search functionality includes the ability to do many more things: skip indexing certain words (stop words), process synonyms, and use sophisticated parsing, for example, parse based on more than just white space. This functionality is controlled by text search configurations. MogDB comes with predefined configurations for many languages, and you can easily create your own configurations. (The **\\dF** command of **gsql** shows all available configurations.) - -During installation an appropriate configuration is selected and **default_text_search_config** is set accordingly in **postgresql.conf**. If you are using the same text search configuration for MogDB, you can use the value in **postgresql.conf**. To use different configurations throughout MogDB but the same configuration within any one database, use **ALTER DATABASE …** **SET**. Otherwise, you can set **default_text_search_config** in each session. - -Each text search function that depends on a configuration has an optional argument, so that the configuration to use can be specified explicitly. **default_text_search_config** is used only when this argument is omitted. - -To make it easier to build custom text search configurations, a configuration is built up from simpler database objects. MogDB's text search facility provides the following types of configuration-related database objects: - -- Text search parsers break documents into tokens and classify each token (for example, as words or numbers). -- Text search dictionaries convert tokens to normalized form and reject stop words. -- Text search templates provide the functions underlying dictionaries. (A dictionary simply specifies a template and a set of parameters for the template.) -- Text search configurations select a parser and a set of dictionaries to use to normalize the tokens produced by the parser. +--- +title: Configurations +summary: Configurations +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Configurations + +Full text search functionality includes the ability to do many more things: skip indexing certain words (stop words), process synonyms, and use sophisticated parsing, for example, parse based on more than just white space. This functionality is controlled by text search configurations. MogDB comes with predefined configurations for many languages, and you can easily create your own configurations. (The **\\dF** command of **gsql** shows all available configurations.) + +During installation an appropriate configuration is selected and **default_text_search_config** is set accordingly in **postgresql.conf**. If you are using the same text search configuration for MogDB, you can use the value in **postgresql.conf**. To use different configurations throughout MogDB but the same configuration within any one database, use **ALTER DATABASE …** **SET**. Otherwise, you can set **default_text_search_config** in each session. + +Each text search function that depends on a configuration has an optional argument, so that the configuration to use can be specified explicitly. **default_text_search_config** is used only when this argument is omitted. + +To make it easier to build custom text search configurations, a configuration is built up from simpler database objects. MogDB's text search facility provides the following types of configuration-related database objects: + +- Text search parsers break documents into tokens and classify each token (for example, as words or numbers). +- Text search dictionaries convert tokens to normalized form and reject stop words. +- Text search templates provide the functions underlying dictionaries. (A dictionary simply specifies a template and a set of parameters for the template.) +- Text search configurations select a parser and a set of dictionaries to use to normalize the tokens produced by the parser. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/full-text-search-introduction.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/full-text-search-introduction.md index 31d8ebe9..46ef5ddc 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/full-text-search-introduction.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/full-text-search-introduction.md @@ -1,13 +1,13 @@ ---- -title: Introduction -summary: Introduction -author: zhang cuiping -date: 2023-04-07 ---- - -# Introduction - -- **[Full-Text Retrieval](full-text-retrieval.md)** -- **[What Is a Document](what-is-a-document.md)** -- **[Basic Text Matching](basic-text-matching.md)** +--- +title: Introduction +summary: Introduction +author: zhang cuiping +date: 2023-04-07 +--- + +# Introduction + +- **[Full-Text Retrieval](full-text-retrieval.md)** +- **[What Is a Document](what-is-a-document.md)** +- **[Basic Text Matching](basic-text-matching.md)** - **[Configurations](configurations.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/what-is-a-document.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/what-is-a-document.md index b6a0c10a..b8130296 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/what-is-a-document.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/introduction/what-is-a-document.md @@ -1,33 +1,33 @@ ---- -title: What Is a Document? -summary: What Is a Document? -author: Zhang Cuiping -date: 2021-05-17 ---- - -# What Is a Document? - -A document is the unit of searching in a full text search system; for example, a magazine article or email message. The text search engine must be able to parse documents and store associations of lexemes (keywords) with their parent document. Later, these associations are used to search for documents that contain query words. - -For searches within MogDB, a document is normally a textual column within a row of a database table, or possibly a combination (concatenation) of such columns, perhaps stored in several tables or obtained dynamically. In other words, a document can be constructed from different parts for indexing and it might not be stored anywhere as a whole. Example: - -```sql -mogdb=# SELECT d_dow || '-' || d_dom || '-' || d_fy_week_seq AS identify_serials FROM tpcds.date_dim WHERE d_fy_week_seq = 1; -identify_serials ------------------- - 5-6-1 - 0-8-1 - 2-3-1 - 3-4-1 - 4-5-1 - 1-2-1 - 6-7-1 -(7 rows) -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> Actually, in these example queries, **coalesce** should be used to prevent a single **NULL** attribute from causing a **NULL** result for the whole document. - -Another possibility is to store the documents as simple text files in the file system. In this case, the database can be used to store the full text index and to execute searches, and some unique identifier can be used to retrieve the document from the file system. However, retrieving files from outside the database requires system administrator permissions or special function support, so this is usually less convenient than keeping all the data inside the database. Also, keeping everything inside the database allows easy access to document metadata to assist in indexing and display. - -For text search purposes, each document must be reduced to the preprocessed **tsvector** format. Searching and relevance-based ranking are performed entirely on the **tsvector** representation of a document. The original text is retrieved only when the document has been selected for display to a user. We therefore often speak of the **tsvector** as being the document, but it is only a compact representation of the full document. +--- +title: What Is a Document? +summary: What Is a Document? +author: Zhang Cuiping +date: 2021-05-17 +--- + +# What Is a Document? + +A document is the unit of searching in a full text search system; for example, a magazine article or email message. The text search engine must be able to parse documents and store associations of lexemes (keywords) with their parent document. Later, these associations are used to search for documents that contain query words. + +For searches within MogDB, a document is normally a textual column within a row of a database table, or possibly a combination (concatenation) of such columns, perhaps stored in several tables or obtained dynamically. In other words, a document can be constructed from different parts for indexing and it might not be stored anywhere as a whole. Example: + +```sql +mogdb=# SELECT d_dow || '-' || d_dom || '-' || d_fy_week_seq AS identify_serials FROM tpcds.date_dim WHERE d_fy_week_seq = 1; +identify_serials +------------------ + 5-6-1 + 0-8-1 + 2-3-1 + 3-4-1 + 4-5-1 + 1-2-1 + 6-7-1 +(7 rows) +``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> Actually, in these example queries, **coalesce** should be used to prevent a single **NULL** attribute from causing a **NULL** result for the whole document. + +Another possibility is to store the documents as simple text files in the file system. In this case, the database can be used to store the full text index and to execute searches, and some unique identifier can be used to retrieve the document from the file system. However, retrieving files from outside the database requires system administrator permissions or special function support, so this is usually less convenient than keeping all the data inside the database. Also, keeping everything inside the database allows easy access to document metadata to assist in indexing and display. + +For text search purposes, each document must be reduced to the preprocessed **tsvector** format. Searching and relevance-based ranking are performed entirely on the **tsvector** representation of a document. The original text is retrieved only when the document has been selected for display to a user. We therefore often speak of the **tsvector** as being the document, but it is only a compact representation of the full document. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/limitations.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/limitations.md index 9f0a1e5f..8548659a 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/limitations.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/limitations.md @@ -1,16 +1,16 @@ ---- -title: Limitations -summary: Limitations -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Limitations - -The current limitations of MogDB's text search features are: - -- The length of each lexeme must be less than 2 KB. -- The length of a **tsvector** (lexemes + positions) must be less than 1 megabyte. -- Position values in **tsvector** must be greater than 0 and no more than 16383. -- No more than 256 positions per lexeme. Excessive positions, if any, will be discarded. -- The number of nodes (lexemes + operators) in a tsquery must be less than 32768. +--- +title: Limitations +summary: Limitations +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Limitations + +The current limitations of MogDB's text search features are: + +- The length of each lexeme must be less than 2 KB. +- The length of a **tsvector** (lexemes + positions) must be less than 1 megabyte. +- Position values in **tsvector** must be greater than 0 and no more than 16383. +- No more than 256 positions per lexeme. Excessive positions, if any, will be discarded. +- The number of nodes (lexemes + operators) in a tsquery must be less than 32768. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/parser.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/parser.md index 9a39ddeb..a4c0fd1b 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/parser.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/parser.md @@ -1,108 +1,108 @@ ---- -title: Parser -summary: Parser -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Parser - -Text search parsers are responsible for splitting raw document text into tokens and identifying each token's type, where the type set is defined by the parser itself. Note that a parser does not modify the text; it simply identifies plausible word boundaries. Because of this limit, there is less need for application-specific custom parsers than there is for custom dictionaries. - -Currently, MogDB provides the following built-in parsers: pg_catalog.default for English configuration, and pg_catalog.ngram and pg_catalog.pound for full text search in texts containing Chinese, or both Chinese and English. - -The built-in parser is named **pg_catalog.default**. It recognizes 23 token types, shown in [Table 1](#defaultparser). - -**Table 1** Token types of the default parser - -| Alias | Description | Example | -| :--------------------------------------- | :-------------- | :------------------------------------------------------- | -| asciiword | Word, all ASCII letters | elephant | -| word | Word, all letters | mañana | -| numword | Word, letters and digits | beta1 | -| asciihword | Hyphenated word, all ASCII | up-to-date | -| hword | Hyphenated word, all letters | lógico-matemática | -| numhword | Hyphenated word, letters and digits | mogdb-beta1 | -| hword_asciipart | Hyphenated word part, all ASCII | mogdb in the context mogdb-beta1 | -| hword_part | Hyphenated word part, all letters | lógico or matemática in the context lógico-matemática | -| hword_numpart | Hyphenated word part, letters and digits | beta1 in the context mogdb-beta1 | -| email | Email address | foo@example.com | -| protocol | Protocol head | `http://` | -| url | URL | example.com/stuff/index.html | -| host | Host | example.com | -| url_path | URL path | /stuff/index.html, in the context of a URL | -| file | File or path name | /usr/local/foo.txt, if not within a URL | -| sfloat | Scientific notation | -1.23E+56 | -| float | Decimal notation | -1.234 | -| int | Signed integer | -1234 | -| uint | Unsigned integer | 1234 | -| version | Version number | 8.3.0 | -| tag | XML tag | <a href="dictionaries.html"> | -| entity | XML entity | & | -| blank | Space symbols | (any whitespace or punctuation not otherwise recognized) | - -Note: The parser's notion of a "letter" is determined by the database's locale setting, specifically **lc_ctype**. Words containing only the basic ASCII letters are reported as a separate token type, since it is sometimes useful to distinguish them. In most European languages, token types word and asciiword should be treated alike. - -**email** does not support all valid email characters as defined by RFC 5322. Specifically, the only non-alphanumeric characters supported for email user names are period, dash, and underscore. - -It is possible for the parser to identify overlapping tokens in the same piece of text. For example, a hyphenated word will be reported both as the entire word and as each component. - -```sql -mogdb=# SELECT alias, description, token FROM ts_debug('english','foo-bar-beta1'); - alias | description | token ------------------+------------------------------------------+--------------- - numhword | Hyphenated word, letters and digits | foo-bar-beta1 - hword_asciipart | Hyphenated word part, all ASCII | foo - blank | Space symbols | - - hword_asciipart | Hyphenated word part, all ASCII | bar - blank | Space symbols | - - hword_numpart | Hyphenated word part, letters and digits | beta1 -``` - -This behavior is desirable since it allows searches to work for both the whole compound word and for components. Here is another instructive example: - -```sql -mogdb=# SELECT alias, description, token FROM ts_debug('english','http://example.com/stuff/index.html'); - alias | description | token -----------+---------------+------------------------------ - protocol | Protocol head | http:// - url | URL | example.com/stuff/index.html - host | Host | example.com - url_path | URL path | /stuff/index.html -``` - -N-gram is a mechanical word segmentation method, and applies to no semantic Chinese segmentation scenarios. The N-gram segmentation method ensures the completeness of the segmentation. However, to cover all the possibilities, it adds unnecessary words to the index, resulting in a large number of index items. N-gram supports Chinese coding, including GBK and UTF-8, and has six built-in token types, as shown in [Table 2](#tokentypes). - -**Table 2** Token types - -| Alias | Description | -| :---------- | :-------------- | -| zh_words | chinese words | -| en_word | english word | -| numeric | numeric data | -| alnum | alnum string | -| grapsymbol | graphic symbol | -| multisymbol | multiple symbol | - -Pound segments words in a fixed format. It is used to segment to-be-parsed nonsense Chinese and English words that are separated by fixed separators. It supports Chinese encoding (including GBK and UTF8) and English encoding (including ASCII). Six built-in token types are available, as listed in [Table 3](#tokentypes3). Five types of delimiters are supported, as shown in [Table 4](#separatortypes), and the default delimiter is \#. The maximum length of a token is 256 characters. - -**Table 3** Token types - -| Alias | Description | -| :---------- | :-------------- | -| zh_words | chinese words | -| en_word | english word | -| numeric | numeric data | -| alnum | alnum string | -| grapsymbol | graphic symbol | -| multisymbol | multiple symbol | - -**Table 4** Separator types - -| Separator | Description | -| :-------- | :---------------- | -| @ | Special character | -| # | Special character | -| $ | Special character | -| % | Special character | -| / | Special character | +--- +title: Parser +summary: Parser +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Parser + +Text search parsers are responsible for splitting raw document text into tokens and identifying each token's type, where the type set is defined by the parser itself. Note that a parser does not modify the text; it simply identifies plausible word boundaries. Because of this limit, there is less need for application-specific custom parsers than there is for custom dictionaries. + +Currently, MogDB provides the following built-in parsers: pg_catalog.default for English configuration, and pg_catalog.ngram and pg_catalog.pound for full text search in texts containing Chinese, or both Chinese and English. + +The built-in parser is named **pg_catalog.default**. It recognizes 23 token types, shown in [Table 1](#defaultparser). + +**Table 1** Token types of the default parser + +| Alias | Description | Example | +| :--------------------------------------- | :-------------- | :------------------------------------------------------- | +| asciiword | Word, all ASCII letters | elephant | +| word | Word, all letters | mañana | +| numword | Word, letters and digits | beta1 | +| asciihword | Hyphenated word, all ASCII | up-to-date | +| hword | Hyphenated word, all letters | lógico-matemática | +| numhword | Hyphenated word, letters and digits | mogdb-beta1 | +| hword_asciipart | Hyphenated word part, all ASCII | mogdb in the context mogdb-beta1 | +| hword_part | Hyphenated word part, all letters | lógico or matemática in the context lógico-matemática | +| hword_numpart | Hyphenated word part, letters and digits | beta1 in the context mogdb-beta1 | +| email | Email address | foo@example.com | +| protocol | Protocol head | `http://` | +| url | URL | example.com/stuff/index.html | +| host | Host | example.com | +| url_path | URL path | /stuff/index.html, in the context of a URL | +| file | File or path name | /usr/local/foo.txt, if not within a URL | +| sfloat | Scientific notation | -1.23E+56 | +| float | Decimal notation | -1.234 | +| int | Signed integer | -1234 | +| uint | Unsigned integer | 1234 | +| version | Version number | 8.3.0 | +| tag | XML tag | <a href="dictionaries.html"> | +| entity | XML entity | & | +| blank | Space symbols | (any whitespace or punctuation not otherwise recognized) | + +Note: The parser's notion of a "letter" is determined by the database's locale setting, specifically **lc_ctype**. Words containing only the basic ASCII letters are reported as a separate token type, since it is sometimes useful to distinguish them. In most European languages, token types word and asciiword should be treated alike. + +**email** does not support all valid email characters as defined by RFC 5322. Specifically, the only non-alphanumeric characters supported for email user names are period, dash, and underscore. + +It is possible for the parser to identify overlapping tokens in the same piece of text. For example, a hyphenated word will be reported both as the entire word and as each component. + +```sql +mogdb=# SELECT alias, description, token FROM ts_debug('english','foo-bar-beta1'); + alias | description | token +-----------------+------------------------------------------+--------------- + numhword | Hyphenated word, letters and digits | foo-bar-beta1 + hword_asciipart | Hyphenated word part, all ASCII | foo + blank | Space symbols | - + hword_asciipart | Hyphenated word part, all ASCII | bar + blank | Space symbols | - + hword_numpart | Hyphenated word part, letters and digits | beta1 +``` + +This behavior is desirable since it allows searches to work for both the whole compound word and for components. Here is another instructive example: + +```sql +mogdb=# SELECT alias, description, token FROM ts_debug('english','http://example.com/stuff/index.html'); + alias | description | token +----------+---------------+------------------------------ + protocol | Protocol head | http:// + url | URL | example.com/stuff/index.html + host | Host | example.com + url_path | URL path | /stuff/index.html +``` + +N-gram is a mechanical word segmentation method, and applies to no semantic Chinese segmentation scenarios. The N-gram segmentation method ensures the completeness of the segmentation. However, to cover all the possibilities, it adds unnecessary words to the index, resulting in a large number of index items. N-gram supports Chinese coding, including GBK and UTF-8, and has six built-in token types, as shown in [Table 2](#tokentypes). + +**Table 2** Token types + +| Alias | Description | +| :---------- | :-------------- | +| zh_words | chinese words | +| en_word | english word | +| numeric | numeric data | +| alnum | alnum string | +| grapsymbol | graphic symbol | +| multisymbol | multiple symbol | + +Pound segments words in a fixed format. It is used to segment to-be-parsed nonsense Chinese and English words that are separated by fixed separators. It supports Chinese encoding (including GBK and UTF8) and English encoding (including ASCII). Six built-in token types are available, as listed in [Table 3](#tokentypes3). Five types of delimiters are supported, as shown in [Table 4](#separatortypes), and the default delimiter is \#. The maximum length of a token is 256 characters. + +**Table 3** Token types + +| Alias | Description | +| :---------- | :-------------- | +| zh_words | chinese words | +| en_word | english word | +| numeric | numeric data | +| alnum | alnum string | +| grapsymbol | graphic symbol | +| multisymbol | multiple symbol | + +**Table 4** Separator types + +| Separator | Description | +| :-------- | :---------------- | +| @ | Special character | +| # | Special character | +| $ | Special character | +| % | Special character | +| / | Special character | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/constraints-on-index-use.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/constraints-on-index-use.md index bda94447..87a29b2c 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/constraints-on-index-use.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/constraints-on-index-use.md @@ -1,44 +1,44 @@ ---- -title: Constraints on Index Use -summary: Constraints on Index Use -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Constraints on Index Use - -The following is an example of index use: - -```sql -mogdb=# create table table1 (c_int int,c_bigint bigint,c_varchar varchar,c_text text) with(orientation=row); - -mogdb=# create text search configuration ts_conf_1(parser=POUND); -mogdb=# create text search configuration ts_conf_2(parser=POUND) with(split_flag='%'); - -mogdb=# set default_text_search_config='ts_conf_1'; -mogdb=# create index idx1 on table1 using gin(to_tsvector(c_text)); - -mogdb=# set default_text_search_config='ts_conf_2'; -mogdb=# create index idx2 on table1 using gin(to_tsvector(c_text)); - -mogdb=# select c_varchar,to_tsvector(c_varchar) from table1 where to_tsvector(c_text) @@ plainto_tsquery('¥#@……&**') and to_tsvector(c_text) @@ plainto_tsquery('某公司 ') and c_varchar is not null order by 1 desc limit 3; -``` - -In this example, **table1** has two GIN indexes created on the same column **c_text**, **idx1** and **idx2**, but these two indexes are created under different settings of **default_text_search_config**. Differences between this example and the scenario where one table has common indexes created on the same column are as follows: - -- GIN indexes use different parsers (that is, different delimiters). In this case, the index data of **idx1** is different from that of **idx2**. -- In the specified scenario, the index data of multiple common indexes created on the same column is the same. - -As a result, using **idx1** and **idx2** for the same query returns different results. - -## Constraints - -Still use the above example. When: - -- Multiple GIN indexes are created on the same column of the same table. - -- The GIN indexes use different parsers (that is, different delimiters). - -- The column is used in a query, and an index scan is used in the execution plan. - - To avoid different query results caused by different GIN indexes, ensure that only one GIN index is available on a column of the physical table. +--- +title: Constraints on Index Use +summary: Constraints on Index Use +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Constraints on Index Use + +The following is an example of index use: + +```sql +mogdb=# create table table1 (c_int int,c_bigint bigint,c_varchar varchar,c_text text) with(orientation=row); + +mogdb=# create text search configuration ts_conf_1(parser=POUND); +mogdb=# create text search configuration ts_conf_2(parser=POUND) with(split_flag='%'); + +mogdb=# set default_text_search_config='ts_conf_1'; +mogdb=# create index idx1 on table1 using gin(to_tsvector(c_text)); + +mogdb=# set default_text_search_config='ts_conf_2'; +mogdb=# create index idx2 on table1 using gin(to_tsvector(c_text)); + +mogdb=# select c_varchar,to_tsvector(c_varchar) from table1 where to_tsvector(c_text) @@ plainto_tsquery('¥#@……&**') and to_tsvector(c_text) @@ plainto_tsquery('某公司 ') and c_varchar is not null order by 1 desc limit 3; +``` + +In this example, **table1** has two GIN indexes created on the same column **c_text**, **idx1** and **idx2**, but these two indexes are created under different settings of **default_text_search_config**. Differences between this example and the scenario where one table has common indexes created on the same column are as follows: + +- GIN indexes use different parsers (that is, different delimiters). In this case, the index data of **idx1** is different from that of **idx2**. +- In the specified scenario, the index data of multiple common indexes created on the same column is the same. + +As a result, using **idx1** and **idx2** for the same query returns different results. + +## Constraints + +Still use the above example. When: + +- Multiple GIN indexes are created on the same column of the same table. + +- The GIN indexes use different parsers (that is, different delimiters). + +- The column is used in a query, and an index scan is used in the execution plan. + + To avoid different query results caused by different GIN indexes, ensure that only one GIN index is available on a column of the physical table. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/creating-an-index.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/creating-an-index.md index 7fc4a4d4..16f3059d 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/creating-an-index.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/creating-an-index.md @@ -1,65 +1,65 @@ ---- -title: Creating an Index -summary: Creating an Index -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Creating an Index - -You can create a **GIN** index to speed up text searches: - -```sql -mogdb=# CREATE INDEX pgweb_idx_1 ON tsearch.pgweb USING gin(to_tsvector('english', body)); -``` - -The **to_tsvector** function comes in to two versions: the 1-argument version and the 2-argument version. When the 1-argument version is used, the system uses the configuration specified by **default_text_search_config** by default. - -Notice that the 2-argument version of **to_tsvector** is used for index creation. Only text search functions that specify a configuration name can be used in expression indexes. This is because the index contents must be unaffected by **default_text_search_config**, whose value can be changed at any time. If they were affected, the index contents might be inconsistent, because different entries could contain **tsvectors** that were created with different text search configurations, and there would be no way to guess which was which. It would be impossible to dump and restore such an index correctly. - -Because the two-argument version of **to_tsvector** was used in the index above, only a query reference that uses the 2-argument version of **to_tsvector** with the same configuration name will use that index. That is, **WHERE to_tsvector('english', body) @@ 'a & b'** can use the index, but **WHERE to_tsvector(body) @@ 'a & b'** cannot. This ensures that an index will be used only with the same configuration used to create the index entries. - -It is possible to set up more complex expression indexes wherein the configuration name is specified by another column. For example: - -```sql -mogdb=# CREATE INDEX pgweb_idx_2 ON tsearch.pgweb USING gin(to_tsvector('ngram', body)); -``` - -where **body** is a column in the **pgweb** table. This allows mixed configurations in the same index while recording which configuration was used for each index entry. This would be useful, for example, if the document collection contained documents in different languages. Again, queries that are meant to use the index must be phrased to match, for example, **WHERE to_tsvector(config_name, body) @@ 'a & b'** must match **to_tsvector** in the index. - -Indexes can even concatenate columns: - -```sql -mogdb=# CREATE INDEX pgweb_idx_3 ON tsearch.pgweb USING gin(to_tsvector('english', title || ' ' || body)); -``` - -Another approach is to create a separate **tsvector** column to hold the output of **to_tsvector**. This example is a concatenation of **title** and **body**, using **coalesce** to ensure that one column will still be indexed when the other is **NULL**: - -```sql -mogdb=# ALTER TABLE tsearch.pgweb ADD COLUMN textsearchable_index_col tsvector; -mogdb=# UPDATE tsearch.pgweb SET textsearchable_index_col = to_tsvector('english', coalesce(title,'') || ' ' || coalesce(body,'')); -``` - -Then, create a GIN index to speed up the search: - -```sql -mogdb=# CREATE INDEX textsearch_idx_4 ON tsearch.pgweb USING gin(textsearchable_index_col); -``` - -Now you are ready to perform a fast full text search: - -```sql -mogdb=# SELECT title -FROM tsearch.pgweb -WHERE textsearchable_index_col @@ to_tsquery('north & america') -ORDER BY last_mod_date DESC -LIMIT 10; - - title --------- - Canada - Mexico -(2 rows) -``` - -One advantage of the separate-column approach over an expression index is that it is unnecessary to explicitly specify the text search configuration in queries in order to use the index. As shown in the preceding example, the query can depend on **default_text_search_config**. Another advantage is that searches will be faster, since it will not be necessary to redo the **to_tsvector** calls to verify index matches. The expression-index approach is simpler to set up, however, and it requires less disk space since the **tsvector** representation is not stored explicitly. +--- +title: Creating an Index +summary: Creating an Index +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Creating an Index + +You can create a **GIN** index to speed up text searches: + +```sql +mogdb=# CREATE INDEX pgweb_idx_1 ON tsearch.pgweb USING gin(to_tsvector('english', body)); +``` + +The **to_tsvector** function comes in to two versions: the 1-argument version and the 2-argument version. When the 1-argument version is used, the system uses the configuration specified by **default_text_search_config** by default. + +Notice that the 2-argument version of **to_tsvector** is used for index creation. Only text search functions that specify a configuration name can be used in expression indexes. This is because the index contents must be unaffected by **default_text_search_config**, whose value can be changed at any time. If they were affected, the index contents might be inconsistent, because different entries could contain **tsvectors** that were created with different text search configurations, and there would be no way to guess which was which. It would be impossible to dump and restore such an index correctly. + +Because the two-argument version of **to_tsvector** was used in the index above, only a query reference that uses the 2-argument version of **to_tsvector** with the same configuration name will use that index. That is, **WHERE to_tsvector('english', body) @@ 'a & b'** can use the index, but **WHERE to_tsvector(body) @@ 'a & b'** cannot. This ensures that an index will be used only with the same configuration used to create the index entries. + +It is possible to set up more complex expression indexes wherein the configuration name is specified by another column. For example: + +```sql +mogdb=# CREATE INDEX pgweb_idx_2 ON tsearch.pgweb USING gin(to_tsvector('ngram', body)); +``` + +where **body** is a column in the **pgweb** table. This allows mixed configurations in the same index while recording which configuration was used for each index entry. This would be useful, for example, if the document collection contained documents in different languages. Again, queries that are meant to use the index must be phrased to match, for example, **WHERE to_tsvector(config_name, body) @@ 'a & b'** must match **to_tsvector** in the index. + +Indexes can even concatenate columns: + +```sql +mogdb=# CREATE INDEX pgweb_idx_3 ON tsearch.pgweb USING gin(to_tsvector('english', title || ' ' || body)); +``` + +Another approach is to create a separate **tsvector** column to hold the output of **to_tsvector**. This example is a concatenation of **title** and **body**, using **coalesce** to ensure that one column will still be indexed when the other is **NULL**: + +```sql +mogdb=# ALTER TABLE tsearch.pgweb ADD COLUMN textsearchable_index_col tsvector; +mogdb=# UPDATE tsearch.pgweb SET textsearchable_index_col = to_tsvector('english', coalesce(title,'') || ' ' || coalesce(body,'')); +``` + +Then, create a GIN index to speed up the search: + +```sql +mogdb=# CREATE INDEX textsearch_idx_4 ON tsearch.pgweb USING gin(textsearchable_index_col); +``` + +Now you are ready to perform a fast full text search: + +```sql +mogdb=# SELECT title +FROM tsearch.pgweb +WHERE textsearchable_index_col @@ to_tsquery('north & america') +ORDER BY last_mod_date DESC +LIMIT 10; + + title +-------- + Canada + Mexico +(2 rows) +``` + +One advantage of the separate-column approach over an expression index is that it is unnecessary to explicitly specify the text search configuration in queries in order to use the index. As shown in the preceding example, the query can depend on **default_text_search_config**. Another advantage is that searches will be faster, since it will not be necessary to redo the **to_tsvector** calls to verify index matches. The expression-index approach is simpler to set up, however, and it requires less disk space since the **tsvector** representation is not stored explicitly. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/searching-a-table.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/searching-a-table.md index fe28de60..db18e579 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/searching-a-table.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/searching-a-table.md @@ -1,89 +1,89 @@ ---- -title: Searching a Table -summary: Searching a Table -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Searching a Table - -It is possible to do a full text search without an index. - -- A simple query to print each row that contains the word **america** in its **body** column is as follows: - - ```sql - MogDB=# DROP SCHEMA IF EXISTS tsearch CASCADE; - - MogDB=# CREATE SCHEMA tsearch; - - MogDB=# CREATE TABLE tsearch.pgweb(id int, body text, title text, last_mod_date date); - - MogDB=# INSERT INTO tsearch.pgweb VALUES(1, 'China, officially the People''s Republic of China (PRC), located in Asia, is the world''s most populous state.', 'China', '2010-1-1'); - - MogDB=# INSERT INTO tsearch.pgweb VALUES(2, 'America is a rock band, formed in England in 1970 by multi-instrumentalists Dewey Bunnell, Dan Peek, and Gerry Beckley.', 'America', '2010-1-1'); - - MogDB=# INSERT INTO tsearch.pgweb VALUES(3, 'England is a country that is part of the United Kingdom. It shares land borders with Scotland to the north and Wales to the west.', 'England', '2010-1-1'); - - MogDB=# INSERT INTO tsearch.pgweb VALUES(4, 'Australia, officially the Commonwealth of Australia, is a country comprising the mainland of the Australian continent, the island of Tasmania, and numerous smaller islands.', 'Australia', '2010-1-1'); - - MogDB=# INSERT INTO tsearch.pgweb VALUES(6, 'Japan is an island country in East Asia.', 'Japan', '2010-1-1'); - - MogDB=# INSERT INTO tsearch.pgweb VALUES(7, 'Germany, officially the Federal Republic of Germany, is a sovereign state and federal parliamentary republic in central-western Europe.', 'Germany', '2010-1-1'); - - MogDB=# INSERT INTO tsearch.pgweb VALUES(8, 'France, is a sovereign state comprising territory in western Europe and several overseas regions and territories.', 'France', '2010-1-1'); - - MogDB=# INSERT INTO tsearch.pgweb VALUES(9, 'Italy officially the Italian Republic, is a unitary parliamentary republic in Europe.', 'Italy', '2010-1-1'); - - MogDB=# INSERT INTO tsearch.pgweb VALUES(10, 'India, officially the Republic of India, is a country in South Asia.', 'India', '2010-1-1'); - - MogDB=# INSERT INTO tsearch.pgweb VALUES(11, 'Brazil, officially the Federative Republic of Brazil, is the largest country in both South America and Latin America.', 'Brazil', '2010-1-1'); - - MogDB=# INSERT INTO tsearch.pgweb VALUES(12, 'Canada is a country in the northern half of North America.', 'Canada', '2010-1-1'); - - MogDB=# INSERT INTO tsearch.pgweb VALUES(13, 'Mexico, officially the United Mexican States, is a federal republic in the southern part of North America.', 'Mexico', '2010-1-1'); - - MogDB=# SELECT id, body, title FROM tsearch.pgweb WHERE to_tsvector('english', body) @@ to_tsquery('english', 'america'); - id | body | title - ----+-------------------------------------------------------------------------------------------------------------------------+--------- - 2 | America is a rock band, formed in England in 1970 by multi-instrumentalists Dewey Bunnell, Dan Peek, and Gerry Beckley. | America - 12 | Canada is a country in the northern half of North America. | Canada - 13 | Mexico, officially the United Mexican States, is a federal republic in the southern part of North America. | Mexico - 11 | Brazil, officially the Federative Republic of Brazil, is the largest country in both South America and Latin America. | Brazil - (4 rows) - ``` - - This will also find related words, such as **America**, since all these are reduced to the same normalized lexeme. - - The query above specifies that the **english** configuration is to be used to parse and normalize the strings. Alternatively we could omit the configuration parameters, and use the configuration set by **default_text_search_config**. - - ```sql - MogDB=# SHOW default_text_search_config; - default_text_search_config - ---------------------------- - pg_catalog.english - (1 row) - - MogDB=# SELECT id, body, title FROM tsearch.pgweb WHERE to_tsvector(body) @@ to_tsquery('america'); - id | body | title - ----+-------------------------------------------------------------------------------------------------------------------------+--------- - 11 | Brazil, officially the Federative Republic of Brazil, is the largest country in both South America and Latin America. | Brazil - 2 | America is a rock band, formed in England in 1970 by multi-instrumentalists Dewey Bunnell, Dan Peek, and Gerry Beckley. | America - 12 | Canada is a country in the northern half of North America. | Canada - 13 | Mexico, officially the United Mexican States, is a federal republic in the southern part of North America. | Mexico - (4 rows) - ``` - -- A more complex example to select the ten most recent documents that contain **north** and **america** in the **title** or **body** column is as follows: - - ```sql - MogDB=# SELECT title FROM tsearch.pgweb WHERE to_tsvector(title || ' ' || body) @@ to_tsquery('north & america') ORDER BY last_mod_date DESC LIMIT 10; - title - -------- - Mexico - Canada - (2 rows) - ``` - - For clarity we omitted the **coalesce** function calls which would be needed to find rows that contain **NULL** in one of the two columns. - - The preceding examples show queries without using indexes. Most applications will find this approach too slow. Therefore, practical use of text searching usually requires creating an index, except perhaps for occasional ad-hoc searches. +--- +title: Searching a Table +summary: Searching a Table +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Searching a Table + +It is possible to do a full text search without an index. + +- A simple query to print each row that contains the word **america** in its **body** column is as follows: + + ```sql + MogDB=# DROP SCHEMA IF EXISTS tsearch CASCADE; + + MogDB=# CREATE SCHEMA tsearch; + + MogDB=# CREATE TABLE tsearch.pgweb(id int, body text, title text, last_mod_date date); + + MogDB=# INSERT INTO tsearch.pgweb VALUES(1, 'China, officially the People''s Republic of China (PRC), located in Asia, is the world''s most populous state.', 'China', '2010-1-1'); + + MogDB=# INSERT INTO tsearch.pgweb VALUES(2, 'America is a rock band, formed in England in 1970 by multi-instrumentalists Dewey Bunnell, Dan Peek, and Gerry Beckley.', 'America', '2010-1-1'); + + MogDB=# INSERT INTO tsearch.pgweb VALUES(3, 'England is a country that is part of the United Kingdom. It shares land borders with Scotland to the north and Wales to the west.', 'England', '2010-1-1'); + + MogDB=# INSERT INTO tsearch.pgweb VALUES(4, 'Australia, officially the Commonwealth of Australia, is a country comprising the mainland of the Australian continent, the island of Tasmania, and numerous smaller islands.', 'Australia', '2010-1-1'); + + MogDB=# INSERT INTO tsearch.pgweb VALUES(6, 'Japan is an island country in East Asia.', 'Japan', '2010-1-1'); + + MogDB=# INSERT INTO tsearch.pgweb VALUES(7, 'Germany, officially the Federal Republic of Germany, is a sovereign state and federal parliamentary republic in central-western Europe.', 'Germany', '2010-1-1'); + + MogDB=# INSERT INTO tsearch.pgweb VALUES(8, 'France, is a sovereign state comprising territory in western Europe and several overseas regions and territories.', 'France', '2010-1-1'); + + MogDB=# INSERT INTO tsearch.pgweb VALUES(9, 'Italy officially the Italian Republic, is a unitary parliamentary republic in Europe.', 'Italy', '2010-1-1'); + + MogDB=# INSERT INTO tsearch.pgweb VALUES(10, 'India, officially the Republic of India, is a country in South Asia.', 'India', '2010-1-1'); + + MogDB=# INSERT INTO tsearch.pgweb VALUES(11, 'Brazil, officially the Federative Republic of Brazil, is the largest country in both South America and Latin America.', 'Brazil', '2010-1-1'); + + MogDB=# INSERT INTO tsearch.pgweb VALUES(12, 'Canada is a country in the northern half of North America.', 'Canada', '2010-1-1'); + + MogDB=# INSERT INTO tsearch.pgweb VALUES(13, 'Mexico, officially the United Mexican States, is a federal republic in the southern part of North America.', 'Mexico', '2010-1-1'); + + MogDB=# SELECT id, body, title FROM tsearch.pgweb WHERE to_tsvector('english', body) @@ to_tsquery('english', 'america'); + id | body | title + ----+-------------------------------------------------------------------------------------------------------------------------+--------- + 2 | America is a rock band, formed in England in 1970 by multi-instrumentalists Dewey Bunnell, Dan Peek, and Gerry Beckley. | America + 12 | Canada is a country in the northern half of North America. | Canada + 13 | Mexico, officially the United Mexican States, is a federal republic in the southern part of North America. | Mexico + 11 | Brazil, officially the Federative Republic of Brazil, is the largest country in both South America and Latin America. | Brazil + (4 rows) + ``` + + This will also find related words, such as **America**, since all these are reduced to the same normalized lexeme. + + The query above specifies that the **english** configuration is to be used to parse and normalize the strings. Alternatively we could omit the configuration parameters, and use the configuration set by **default_text_search_config**. + + ```sql + MogDB=# SHOW default_text_search_config; + default_text_search_config + ---------------------------- + pg_catalog.english + (1 row) + + MogDB=# SELECT id, body, title FROM tsearch.pgweb WHERE to_tsvector(body) @@ to_tsquery('america'); + id | body | title + ----+-------------------------------------------------------------------------------------------------------------------------+--------- + 11 | Brazil, officially the Federative Republic of Brazil, is the largest country in both South America and Latin America. | Brazil + 2 | America is a rock band, formed in England in 1970 by multi-instrumentalists Dewey Bunnell, Dan Peek, and Gerry Beckley. | America + 12 | Canada is a country in the northern half of North America. | Canada + 13 | Mexico, officially the United Mexican States, is a federal republic in the southern part of North America. | Mexico + (4 rows) + ``` + +- A more complex example to select the ten most recent documents that contain **north** and **america** in the **title** or **body** column is as follows: + + ```sql + MogDB=# SELECT title FROM tsearch.pgweb WHERE to_tsvector(title || ' ' || body) @@ to_tsquery('north & america') ORDER BY last_mod_date DESC LIMIT 10; + title + -------- + Mexico + Canada + (2 rows) + ``` + + For clarity we omitted the **coalesce** function calls which would be needed to find rows that contain **NULL** in one of the two columns. + + The preceding examples show queries without using indexes. Most applications will find this approach too slow. Therefore, practical use of text searching usually requires creating an index, except perhaps for occasional ad-hoc searches. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/tables-and-indexes.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/tables-and-indexes.md index 8be14bfe..ed3f2413 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/tables-and-indexes.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/tables-and-indexes/tables-and-indexes.md @@ -1,12 +1,12 @@ ---- -title: Tables and Indexes -summary: Tables and Indexes -author: zhang cuiping -date: 2023-04-07 ---- - -# Tables and Indexes - -- **[Searching a Table](searching-a-table.md)** -- **[Creating an Index](creating-an-index.md)** +--- +title: Tables and Indexes +summary: Tables and Indexes +author: zhang cuiping +date: 2023-04-07 +--- + +# Tables and Indexes + +- **[Searching a Table](searching-a-table.md)** +- **[Creating an Index](creating-an-index.md)** - **[Constraints on Index Use](constraints-on-index-use.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-configuration.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-configuration.md index 8ec7ee1b..bb4fafbb 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-configuration.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-configuration.md @@ -1,65 +1,65 @@ ---- -title: Testing a Configuration -summary: Testing a Configuration -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Testing a Configuration - -The **ts_debug** function allows easy testing of a text search configuration. - -```sql -ts_debug([ config regconfig, ] document text, - OUT alias text, - OUT description text, - OUT token text, - OUT dictionaries regdictionary[], - OUT dictionary regdictionary, - OUT lexemes text[]) - returns setof record -``` - -**ts_debug** displays information about every token of document as produced by the parser and processed by the configured dictionaries. It uses the configuration specified by **config**, or **default_text_search_config** if that argument is omitted. - -**ts_debug** returns one row for each token identified in the text by the parser. The columns returned are: - -- **alias text**: short name of the token type -- **description text**: description of the token type -- **token text**: text of the token -- **dictionaries regdictionary[]**: dictionaries selected by the configuration for this token type -- **dictionary regdictionary**: the dictionary that recognized the token, or NULL if none did -- **lexemes text[]**: the lexeme(s) produced by the dictionary that recognized the token, or NULL if none did; an empty array ({}) means the token was recognized as a stop word - -Here is a simple example: - -```sql -mogdb=# SELECT * FROM ts_debug('english','a fat cat sat on a mat - it ate a fat rats'); - alias | description | token | dictionaries | dictionary | lexemes ------------+-----------------+-------+----------------+--------------+--------- - asciiword | Word, all ASCII | a | {english_stem} | english_stem | {} - blank | Space symbols | | {} | | - asciiword | Word, all ASCII | fat | {english_stem} | english_stem | {fat} - blank | Space symbols | | {} | | - asciiword | Word, all ASCII | cat | {english_stem} | english_stem | {cat} - blank | Space symbols | | {} | | - asciiword | Word, all ASCII | sat | {english_stem} | english_stem | {sat} - blank | Space symbols | | {} | | - asciiword | Word, all ASCII | on | {english_stem} | english_stem | {} - blank | Space symbols | | {} | | - asciiword | Word, all ASCII | a | {english_stem} | english_stem | {} - blank | Space symbols | | {} | | - asciiword | Word, all ASCII | mat | {english_stem} | english_stem | {mat} - blank | Space symbols | | {} | | - blank | Space symbols | - | {} | | - asciiword | Word, all ASCII | it | {english_stem} | english_stem | {} - blank | Space symbols | | {} | | - asciiword | Word, all ASCII | ate | {english_stem} | english_stem | {ate} - blank | Space symbols | | {} | | - asciiword | Word, all ASCII | a | {english_stem} | english_stem | {} - blank | Space symbols | | {} | | - asciiword | Word, all ASCII | fat | {english_stem} | english_stem | {fat} - blank | Space symbols | | {} | | - asciiword | Word, all ASCII | rats | {english_stem} | english_stem | {rat} -(24 rows) -``` +--- +title: Testing a Configuration +summary: Testing a Configuration +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Testing a Configuration + +The **ts_debug** function allows easy testing of a text search configuration. + +```sql +ts_debug([ config regconfig, ] document text, + OUT alias text, + OUT description text, + OUT token text, + OUT dictionaries regdictionary[], + OUT dictionary regdictionary, + OUT lexemes text[]) + returns setof record +``` + +**ts_debug** displays information about every token of document as produced by the parser and processed by the configured dictionaries. It uses the configuration specified by **config**, or **default_text_search_config** if that argument is omitted. + +**ts_debug** returns one row for each token identified in the text by the parser. The columns returned are: + +- **alias text**: short name of the token type +- **description text**: description of the token type +- **token text**: text of the token +- **dictionaries regdictionary[]**: dictionaries selected by the configuration for this token type +- **dictionary regdictionary**: the dictionary that recognized the token, or NULL if none did +- **lexemes text[]**: the lexeme(s) produced by the dictionary that recognized the token, or NULL if none did; an empty array ({}) means the token was recognized as a stop word + +Here is a simple example: + +```sql +mogdb=# SELECT * FROM ts_debug('english','a fat cat sat on a mat - it ate a fat rats'); + alias | description | token | dictionaries | dictionary | lexemes +-----------+-----------------+-------+----------------+--------------+--------- + asciiword | Word, all ASCII | a | {english_stem} | english_stem | {} + blank | Space symbols | | {} | | + asciiword | Word, all ASCII | fat | {english_stem} | english_stem | {fat} + blank | Space symbols | | {} | | + asciiword | Word, all ASCII | cat | {english_stem} | english_stem | {cat} + blank | Space symbols | | {} | | + asciiword | Word, all ASCII | sat | {english_stem} | english_stem | {sat} + blank | Space symbols | | {} | | + asciiword | Word, all ASCII | on | {english_stem} | english_stem | {} + blank | Space symbols | | {} | | + asciiword | Word, all ASCII | a | {english_stem} | english_stem | {} + blank | Space symbols | | {} | | + asciiword | Word, all ASCII | mat | {english_stem} | english_stem | {mat} + blank | Space symbols | | {} | | + blank | Space symbols | - | {} | | + asciiword | Word, all ASCII | it | {english_stem} | english_stem | {} + blank | Space symbols | | {} | | + asciiword | Word, all ASCII | ate | {english_stem} | english_stem | {ate} + blank | Space symbols | | {} | | + asciiword | Word, all ASCII | a | {english_stem} | english_stem | {} + blank | Space symbols | | {} | | + asciiword | Word, all ASCII | fat | {english_stem} | english_stem | {fat} + blank | Space symbols | | {} | | + asciiword | Word, all ASCII | rats | {english_stem} | english_stem | {rat} +(24 rows) +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-dictionary.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-dictionary.md index cff76d01..08f63f3a 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-dictionary.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-dictionary.md @@ -1,29 +1,29 @@ ---- -title: Testing a Dictionary -summary: Testing a Dictionary -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Testing a Dictionary - -The **ts_lexize** function facilitates dictionary testing. - -**ts_lexize(dict regdictionary, token text) returns text[]** **ts_lexize** returns an array of lexemes if the input **token** is known to the dictionary, or an empty array if the token is known to the dictionary but it is a stop word, or **NULL** if it is an unknown word. - -Example: - -```sql -mogdb=# SELECT ts_lexize('english_stem', 'stars'); - ts_lexize ------------ - {star} - -mogdb=# SELECT ts_lexize('english_stem', 'a'); - ts_lexize ------------ - {} -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> The **ts_lexize** function expects a single **token**, not text. +--- +title: Testing a Dictionary +summary: Testing a Dictionary +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Testing a Dictionary + +The **ts_lexize** function facilitates dictionary testing. + +**ts_lexize(dict regdictionary, token text) returns text[]** **ts_lexize** returns an array of lexemes if the input **token** is known to the dictionary, or an empty array if the token is known to the dictionary but it is a stop word, or **NULL** if it is an unknown word. + +Example: + +```sql +mogdb=# SELECT ts_lexize('english_stem', 'stars'); + ts_lexize +----------- + {star} + +mogdb=# SELECT ts_lexize('english_stem', 'a'); + ts_lexize +----------- + {} +``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> The **ts_lexize** function expects a single **token**, not text. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-parser.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-parser.md index c880441f..6c615bcf 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-parser.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-parser.md @@ -1,69 +1,69 @@ ---- -title: Testing a Parser -summary: Testing a Parser -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Testing a Parser - -The **ts_parse** function allows direct testing of a text search parser. - -``` -ts_parse(parser_name text, document text, - OUT tokid integer, OUT token text) returns setof record -``` - -**ts_parse** parses the given **document** and returns a series of records, one for each token produced by parsing. Each record includes a **tokid** showing the assigned token type and a **token** which is the text of the token. For example: - -```sql -mogdb=# SELECT * FROM ts_parse('default', '123 - a number'); - tokid | token --------+-------- - 22 | 123 - 12 | - 12 | - - 1 | a - 12 | - 1 | number -(6 rows) -``` - -The **ts_token_type** function returns the token type and description of the specified parser. - -``` -ts_token_type(parser_name text, OUT tokid integer, - OUT alias text, OUT description text) returns setof record -``` - -**ts_token_type** returns a table which describes each type of token the specified parser can recognize. For each token type, the table gives the integer **tokid** that the parser uses to label a token of that type, the **alias** that names the token type in configuration commands, and a short description. For example: - -```sql -mogdb=# SELECT * FROM ts_token_type('default'); - tokid | alias | description --------+-----------------+------------------------------------------ - 1 | asciiword | Word, all ASCII - 2 | word | Word, all letters - 3 | numword | Word, letters and digits - 4 | email | Email address - 5 | url | URL - 6 | host | Host - 7 | sfloat | Scientific notation - 8 | version | Version number - 9 | hword_numpart | Hyphenated word part, letters and digits - 10 | hword_part | Hyphenated word part, all letters - 11 | hword_asciipart | Hyphenated word part, all ASCII - 12 | blank | Space symbols - 13 | tag | XML tag - 14 | protocol | Protocol head - 15 | numhword | Hyphenated word, letters and digits - 16 | asciihword | Hyphenated word, all ASCII - 17 | hword | Hyphenated word, all letters - 18 | url_path | URL path - 19 | file | File or path name - 20 | float | Decimal notation - 21 | int | Signed integer - 22 | uint | Unsigned integer - 23 | entity | XML entity -(23 rows) -``` +--- +title: Testing a Parser +summary: Testing a Parser +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Testing a Parser + +The **ts_parse** function allows direct testing of a text search parser. + +``` +ts_parse(parser_name text, document text, + OUT tokid integer, OUT token text) returns setof record +``` + +**ts_parse** parses the given **document** and returns a series of records, one for each token produced by parsing. Each record includes a **tokid** showing the assigned token type and a **token** which is the text of the token. For example: + +```sql +mogdb=# SELECT * FROM ts_parse('default', '123 - a number'); + tokid | token +-------+-------- + 22 | 123 + 12 | + 12 | - + 1 | a + 12 | + 1 | number +(6 rows) +``` + +The **ts_token_type** function returns the token type and description of the specified parser. + +``` +ts_token_type(parser_name text, OUT tokid integer, + OUT alias text, OUT description text) returns setof record +``` + +**ts_token_type** returns a table which describes each type of token the specified parser can recognize. For each token type, the table gives the integer **tokid** that the parser uses to label a token of that type, the **alias** that names the token type in configuration commands, and a short description. For example: + +```sql +mogdb=# SELECT * FROM ts_token_type('default'); + tokid | alias | description +-------+-----------------+------------------------------------------ + 1 | asciiword | Word, all ASCII + 2 | word | Word, all letters + 3 | numword | Word, letters and digits + 4 | email | Email address + 5 | url | URL + 6 | host | Host + 7 | sfloat | Scientific notation + 8 | version | Version number + 9 | hword_numpart | Hyphenated word part, letters and digits + 10 | hword_part | Hyphenated word part, all letters + 11 | hword_asciipart | Hyphenated word part, all ASCII + 12 | blank | Space symbols + 13 | tag | XML tag + 14 | protocol | Protocol head + 15 | numhword | Hyphenated word, letters and digits + 16 | asciihword | Hyphenated word, all ASCII + 17 | hword | Hyphenated word, all letters + 18 | url_path | URL path + 19 | file | File or path name + 20 | float | Decimal notation + 21 | int | Signed integer + 22 | uint | Unsigned integer + 23 | entity | XML entity +(23 rows) +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-and-debugging-text-search.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-and-debugging-text-search.md index fb6360b4..e26b2433 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-and-debugging-text-search.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-and-debugging-text-search.md @@ -1,12 +1,12 @@ ---- -title: Testing and Debugging Text Search -summary: Testing and Debugging Text Search -author: zhang cuiping -date: 2023-04-07 ---- - -# Testing and Debugging Text Search - -- **[Testing a Configuration](testing-a-configuration.md)** -- **[Testing a Parser](testing-a-parser.md)** +--- +title: Testing and Debugging Text Search +summary: Testing and Debugging Text Search +author: zhang cuiping +date: 2023-04-07 +--- + +# Testing and Debugging Text Search + +- **[Testing a Configuration](testing-a-configuration.md)** +- **[Testing a Parser](testing-a-parser.md)** - **[Testing a Dictionary](testing-a-dictionary.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/mogdb-sql.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/mogdb-sql.md index b1a21be0..8eb92c5b 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/mogdb-sql.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/mogdb-sql.md @@ -1,37 +1,37 @@ ---- -title: MogDB SQL -summary: MogDB SQL -author: Zhang Cuiping -date: 2021-05-17 ---- - -# MogDB SQL - -## What Is SQL? - -SQL is a standard computer language used to control the access to databases and manage data in databases. - -SQL provides different statements to enable you to: - -- Query data. -- Insert, update, and delete rows. -- Create, replace, modify, and delete objects. -- Control the access to a database and its objects. -- Maintain the consistency and integrity of a database. - -SQL consists of commands and functions that are used to manage databases and database objects. SQL can also forcibly implement the rules for data types, expressions, and texts. Therefore, **SQL Reference** describes data types, expressions, functions, and operators in addition to SQL syntax. - -## Development of SQL Standards - -The development history of SQL standards is as follows: - -- 1986: ANSI X3.135-1986, ISO/IEC 9075:1986, SQL-86 -- 1989: ANSI X3.135-1989, ISO/IEC 9075:1989, SQL-89 -- 1992: ANSI X3.135-1992, ISO/IEC 9075:1992, SQL-92 (SQL2) -- 1999: ISO/IEC 9075:1999, SQL:1999 (SQL3) -- 2003: ISO/IEC 9075:2003, SQL:2003 (SQL4) -- 2011: ISO/IEC 9075:200N, SQL:2011 (SQL5) - -## SQL Standards Supported by MogDB - -MogDB supports major SQL2, SQL3, and SQL4 features by default. +--- +title: MogDB SQL +summary: MogDB SQL +author: Zhang Cuiping +date: 2021-05-17 +--- + +# MogDB SQL + +## What Is SQL? + +SQL is a standard computer language used to control the access to databases and manage data in databases. + +SQL provides different statements to enable you to: + +- Query data. +- Insert, update, and delete rows. +- Create, replace, modify, and delete objects. +- Control the access to a database and its objects. +- Maintain the consistency and integrity of a database. + +SQL consists of commands and functions that are used to manage databases and database objects. SQL can also forcibly implement the rules for data types, expressions, and texts. Therefore, **SQL Reference** describes data types, expressions, functions, and operators in addition to SQL syntax. + +## Development of SQL Standards + +The development history of SQL standards is as follows: + +- 1986: ANSI X3.135-1986, ISO/IEC 9075:1986, SQL-86 +- 1989: ANSI X3.135-1989, ISO/IEC 9075:1989, SQL-89 +- 1992: ANSI X3.135-1992, ISO/IEC 9075:1992, SQL-92 (SQL2) +- 1999: ISO/IEC 9075:1999, SQL:1999 (SQL3) +- 2003: ISO/IEC 9075:2003, SQL:2003 (SQL4) +- 2011: ISO/IEC 9075:200N, SQL:2011 (SQL5) + +## SQL Standards Supported by MogDB + +MogDB supports major SQL2, SQL3, and SQL4 features by default. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/ordinary-table.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/ordinary-table.md index 6c198eab..c7af29ea 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/ordinary-table.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/ordinary-table.md @@ -1,52 +1,52 @@ ---- -title: Ordinary Table -summary: Ordinary Table -author: zhang cuiping -date: 2023-04-07 ---- - -# Ordinary Table - -Create an empty table in the current database. The table will be owned by the creator. The same table can be stored in different databases. You can execute the CREATE TABLE statement to create a table. - -## Syntax - -``` -CREATE TABLE table_name - (column_name data_type [, ... ]); -``` - -## Parameter Description - -- **table_name** - - Specifies the name of the table to be created. - -- **column_name** - - Specifies the name of the column to be created in the new table. - -- **data_type** - - Specifies the data type of the column. - -## Examples - -Run the following commands to create a table named **customer_t1**. The table columns are **c_customer_sk**, **c_customer_id**, **c_first_name**, and **c_last_name**. The data types of the table columns are integer, char (5), char (6), and char (8), respectively. - -```sql -MogDB=# CREATE TABLE customer_t1 -( - c_customer_sk integer, - c_customer_id char(5), - c_first_name char(6), - c_last_name char(8), - Amount integer -); -``` - -If the following information is displayed, the table has been created: - -``` -CREATE TABLE +--- +title: Ordinary Table +summary: Ordinary Table +author: zhang cuiping +date: 2023-04-07 +--- + +# Ordinary Table + +Create an empty table in the current database. The table will be owned by the creator. The same table can be stored in different databases. You can execute the CREATE TABLE statement to create a table. + +## Syntax + +``` +CREATE TABLE table_name + (column_name data_type [, ... ]); +``` + +## Parameter Description + +- **table_name** + + Specifies the name of the table to be created. + +- **column_name** + + Specifies the name of the column to be created in the new table. + +- **data_type** + + Specifies the data type of the column. + +## Examples + +Run the following commands to create a table named **customer_t1**. The table columns are **c_customer_sk**, **c_customer_id**, **c_first_name**, and **c_last_name**. The data types of the table columns are integer, char (5), char (6), and char (8), respectively. + +```sql +MogDB=# CREATE TABLE customer_t1 +( + c_customer_sk integer, + c_customer_id char(5), + c_first_name char(6), + c_last_name char(8), + Amount integer +); +``` + +If the following information is displayed, the table has been created: + +``` +CREATE TABLE ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-anonymous-block.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-anonymous-block.md index 065c487b..d11b69d9 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-anonymous-block.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-anonymous-block.md @@ -1,76 +1,76 @@ ---- -title: Anonymous Blocks -summary: Anonymous Blocks -author: zhang cuiping -date: 2023-04-07 ---- - -# Anonymous Blocks - -An anonymous block is one of the character blocks of a stored procedure and has no name. It is generally used for scripts or activities that are not executed frequently. - -## Syntax - -[Figure 1](#anonymous) shows the syntax diagram for an anonymous block. - -**Figure 1** anonymous_block::= -![anonymous_block](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/anonymous_block.png) - -Details about the syntax diagram are as follows: - -- The execution section of an anonymous block starts with a BEGIN statement, has a break with an END statement, and ends with a semicolon (;). Type a slash (/) and press **Enter** to execute the statement. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** The terminator “/” must be written in an independent row. - -- The declaration section includes the variable definition, type, and cursor definition. -- A simplest anonymous block does not execute any commands. At least one statement, even a NULL statement, must be presented in any implementation blocks. - -## Parameter Description - -- **DECLARE** - - Specifies an optional keyword used to begin a DECLARE statement. This keyword can be used to declare a data type, variable, or cursor. The use of this keyword depends on the context in which the block is located. - -- **declaration_statements** - - Specifies the declaration of a data type, variable, cursor, exception, or procedure whose scope is limited to the block. Each declaration must be terminated with a semicolon (;). - -- **BEGIN** - - Specifies the mandatory keyword for introducing an executable section. The section can contain one or more SQL or PL/SQL statements. A BEGIN-END block can contain nested BEGIN-END blocks. - -- **execution_statements** - - Specifies PL/SQL or SQL statements. Each statement must be terminated with a semicolon (;). - -- **END** - - Specifies the required keyword for ending a block. - -## Examples - -```sql --- Create a null statement block. -MogDB=# BEGIN - NULL; -END; -/ - --- Create a demonstration table. -MogDB=# CREATE TABLE table1(id1 INT, id2 INT, id3 INT); -CREATE TABLE - --- Use an anonymous block to insert data. -MogDB=# BEGIN - insert into table1 values(1,2,3); - END; - / -ANONYMOUS BLOCK EXECUTE - --- Query the inserted data. -MogDB=# select * from table1; - id1 | id2 | id3 ------+-----+----- - 1 | 2 | 3 -(1 rows) +--- +title: Anonymous Blocks +summary: Anonymous Blocks +author: zhang cuiping +date: 2023-04-07 +--- + +# Anonymous Blocks + +An anonymous block is one of the character blocks of a stored procedure and has no name. It is generally used for scripts or activities that are not executed frequently. + +## Syntax + +[Figure 1](#anonymous) shows the syntax diagram for an anonymous block. + +**Figure 1** anonymous_block::= +![anonymous_block](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/anonymous_block.png) + +Details about the syntax diagram are as follows: + +- The execution section of an anonymous block starts with a BEGIN statement, has a break with an END statement, and ends with a semicolon (;). Type a slash (/) and press **Enter** to execute the statement. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** The terminator “/” must be written in an independent row. + +- The declaration section includes the variable definition, type, and cursor definition. +- A simplest anonymous block does not execute any commands. At least one statement, even a NULL statement, must be presented in any implementation blocks. + +## Parameter Description + +- **DECLARE** + + Specifies an optional keyword used to begin a DECLARE statement. This keyword can be used to declare a data type, variable, or cursor. The use of this keyword depends on the context in which the block is located. + +- **declaration_statements** + + Specifies the declaration of a data type, variable, cursor, exception, or procedure whose scope is limited to the block. Each declaration must be terminated with a semicolon (;). + +- **BEGIN** + + Specifies the mandatory keyword for introducing an executable section. The section can contain one or more SQL or PL/SQL statements. A BEGIN-END block can contain nested BEGIN-END blocks. + +- **execution_statements** + + Specifies PL/SQL or SQL statements. Each statement must be terminated with a semicolon (;). + +- **END** + + Specifies the required keyword for ending a block. + +## Examples + +```sql +-- Create a null statement block. +MogDB=# BEGIN + NULL; +END; +/ + +-- Create a demonstration table. +MogDB=# CREATE TABLE table1(id1 INT, id2 INT, id3 INT); +CREATE TABLE + +-- Use an anonymous block to insert data. +MogDB=# BEGIN + insert into table1 values(1,2,3); + END; + / +ANONYMOUS BLOCK EXECUTE + +-- Query the inserted data. +MogDB=# select * from table1; + id1 | id2 | id3 +-----+-----+----- + 1 | 2 | 3 +(1 rows) ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-contraints.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-contraints.md index 2f87a7fb..6e689d66 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-contraints.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-contraints.md @@ -1,152 +1,152 @@ ---- -title: Constraints -summary: Constraints -author: zhang cuiping -date: 2023-04-07 ---- - -# Constraints - -Constraint clauses specify constraints that new or updated rows must satisfy for an INSERT or UPDATE operation to succeed. If there is any data behavior that violates the constraints, the behavior is terminated by the constraints. - -Constraints can be specified when a table is created (by executing the CREATE TABLE statement) or after a table is created (by executing the ALTER TABLE statement). - -Constraints can be column-level or table-level. Column-level constraints apply only to columns, and table-level constraints apply to the entire table. - -The common constraints of MogDB are as follows: - -- NOT NULL: specifies that a column cannot store **NULL** values. -- UNIQUE: ensures that the value of a column is unique. -- PRIMARY KEY: functions as the combination of NOT NULL and UNIQUE and ensures that a column (or the combination of two or more columns) has a unique identifier to help quickly locate a specific record in a table. -- FOREIGN KEY: ensures the referential integrity for data in one table to match values in another table. -- CHECK: ensures that values in a column meet specified conditions. - -## NOT NULL - -If no constraint is specified during table creation, the default value is **NULL**, indicating that **NULL** values can be inserted into columns. If you do not want a column to be set to **NULL**, you need to define the NOT NULL constraint on the column to specify that **NULL** values are not allowed in the column. When you insert data, if the column contains **NULL**, an error is reported and the data fails to be inserted. - -**NULL** does not mean that there is no data. It indicates unknown data. - -For example, create the **staff** table that contains five columns. The **NAME** and **ID** columns cannot be set to **NULL**. - -```sql -MogDB=# CREATE TABLE staff( - ID INT NOT NULL, - NAME char(8) NOT NULL, - AGE INT , - ADDRESS CHAR(50), - SALARY REAL -); -``` - -Insert data into the **staff** table. When a **NULL** value is inserted into the **ID** column, the database returns an error. - -```sql -MogDB=# INSERT INTO staff VALUES (1,'lily',28); -INSERT 0 1 -MogDB=# INSERT INTO staff (NAME,AGE) VALUES ('JUCE',28); -ERROR: null value in column "id" violates not-null constraint -DETAIL: Failing row contains (null, JUCE , 28, null, null). -``` - -## UNIQUE - -The UNIQUE constraint specifies that a group of one or more columns of a table can contain only unique values. - -For the UNIQUE constraint, **NULL** is not considered equal. - -For example, create the **staff1** table that contains five columns, where **AGE** is set to **UNIQUE**. Therefore, you cannot add two records with the same age. - -```sql -MogDB=# CREATE TABLE staff1( - ID INT NOT NULL, - NAME char(8) NOT NULL, - AGE INT NOT NULL UNIQUE , - ADDRESS CHAR(50), - SALARY REAL -); -``` - -Insert data into the **staff1** table. When two identical data records are inserted into the **AGE** column, the database returns an error. - -```sql -MogDB=# INSERT INTO staff1 VALUES (1,'lily',28); -INSERT 0 1 -MogDB=# INSERT INTO staff1 VALUES (2, 'JUCE',28); -ERROR: duplicate key value violates unique constraint "staff1_age_key" -DETAIL: Key (age)=(28) already exists. -``` - -## PRIMARY KEY - -PRIMARY KEY is the unique identifier of each record in a data table. It specifies that a column or multiple columns in a table can contain only unique (non-duplicate) and non-**NULL** values. - -PRIMARY KEY is the combination of NOT NULL and UNIQUE. Only one primary key can be specified for a table. - -For example, create the **staff2** table where **ID** indicates the primary key. - -```sql -MogDB=# CREATE TABLE staff2( - ID INT PRIMARY KEY , - NAME TEXT NOT NULL, - AGE INT NOT NULL, - ADDRESS CHAR(50), - SALARY REAL -); -NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "staff2_pkey" for table "staff2" -CREATE TABLE -``` - -## FOREIGN KEY - -The FOREIGN KEY constraint specifies that the value of a column (or a group of columns) must match the value in a row of another table. Generally, the FOREIGN KEY constraint in one table points to the UNIQUE KEY constraint in another table. That is, the referential integrity between two related tables is maintained. - -For example, create the **staff3** table that contains five columns. - -```sql -MogDB=# CREATE TABLE staff3( - ID INT PRIMARY KEY NOT NULL, - NAME TEXT NOT NULL, - AGE INT NOT NULL, - ADDRESS CHAR(50), - SALARY REAL -); -``` - -Create the **DEPARTMENT** table and add three columns. The **EMP_ID** column indicates the foreign key and it is similar to the **ID** column of the **staff3** table. - -```sql -MogDB=# CREATE TABLE DEPARTMENT( - ID INT PRIMARY KEY NOT NULL, - DEPT CHAR(50) NOT NULL, - EMP_ID INT references staff3(ID) -); -``` - -## CHECK - -The CHECK constraint specifies an expression producing a Boolean result where the INSERT or UPDATE operation of new or updated rows can succeed only when the expression result is **TRUE** or **UNKNOWN**; otherwise, an error is thrown and the database is not altered. - -A CHECK constraint specified as a column constraint should reference only the column's value, while an expression in a table constraint can reference multiple columns. **<>NULL** and **!=NULL** are invalid in an expression. Change them to **IS NOT NULL**. - -For example, create the **staff4** table and add a CHECK constraint to the **SALARY** column to ensure that the inserted value is greater than **0**. - -```sql -MogDB=# CREATE TABLE staff4( - ID INT PRIMARY KEY NOT NULL, - NAME TEXT NOT NULL, - AGE INT NOT NULL, - ADDRESS CHAR(50), - SALARY REAL CHECK(SALARY > 0) -); -NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "staff4_pkey" for table "staff4" -CREATE TABLE -``` - -Insert data into the **staff4** table. When the inserted value of the **SALARY** column is not greater than **0**, the database reports an error. - -```sql -MogDB=# INSERT INTO staff4(ID,NAME,AGE,SALARY) VALUES (2, 'JUCE',16,0); -ERROR: new row for relation "staff4" violates check constraint "staff4_salary_check" -DETAIL: N/A +--- +title: Constraints +summary: Constraints +author: zhang cuiping +date: 2023-04-07 +--- + +# Constraints + +Constraint clauses specify constraints that new or updated rows must satisfy for an INSERT or UPDATE operation to succeed. If there is any data behavior that violates the constraints, the behavior is terminated by the constraints. + +Constraints can be specified when a table is created (by executing the CREATE TABLE statement) or after a table is created (by executing the ALTER TABLE statement). + +Constraints can be column-level or table-level. Column-level constraints apply only to columns, and table-level constraints apply to the entire table. + +The common constraints of MogDB are as follows: + +- NOT NULL: specifies that a column cannot store **NULL** values. +- UNIQUE: ensures that the value of a column is unique. +- PRIMARY KEY: functions as the combination of NOT NULL and UNIQUE and ensures that a column (or the combination of two or more columns) has a unique identifier to help quickly locate a specific record in a table. +- FOREIGN KEY: ensures the referential integrity for data in one table to match values in another table. +- CHECK: ensures that values in a column meet specified conditions. + +## NOT NULL + +If no constraint is specified during table creation, the default value is **NULL**, indicating that **NULL** values can be inserted into columns. If you do not want a column to be set to **NULL**, you need to define the NOT NULL constraint on the column to specify that **NULL** values are not allowed in the column. When you insert data, if the column contains **NULL**, an error is reported and the data fails to be inserted. + +**NULL** does not mean that there is no data. It indicates unknown data. + +For example, create the **staff** table that contains five columns. The **NAME** and **ID** columns cannot be set to **NULL**. + +```sql +MogDB=# CREATE TABLE staff( + ID INT NOT NULL, + NAME char(8) NOT NULL, + AGE INT , + ADDRESS CHAR(50), + SALARY REAL +); +``` + +Insert data into the **staff** table. When a **NULL** value is inserted into the **ID** column, the database returns an error. + +```sql +MogDB=# INSERT INTO staff VALUES (1,'lily',28); +INSERT 0 1 +MogDB=# INSERT INTO staff (NAME,AGE) VALUES ('JUCE',28); +ERROR: null value in column "id" violates not-null constraint +DETAIL: Failing row contains (null, JUCE , 28, null, null). +``` + +## UNIQUE + +The UNIQUE constraint specifies that a group of one or more columns of a table can contain only unique values. + +For the UNIQUE constraint, **NULL** is not considered equal. + +For example, create the **staff1** table that contains five columns, where **AGE** is set to **UNIQUE**. Therefore, you cannot add two records with the same age. + +```sql +MogDB=# CREATE TABLE staff1( + ID INT NOT NULL, + NAME char(8) NOT NULL, + AGE INT NOT NULL UNIQUE , + ADDRESS CHAR(50), + SALARY REAL +); +``` + +Insert data into the **staff1** table. When two identical data records are inserted into the **AGE** column, the database returns an error. + +```sql +MogDB=# INSERT INTO staff1 VALUES (1,'lily',28); +INSERT 0 1 +MogDB=# INSERT INTO staff1 VALUES (2, 'JUCE',28); +ERROR: duplicate key value violates unique constraint "staff1_age_key" +DETAIL: Key (age)=(28) already exists. +``` + +## PRIMARY KEY + +PRIMARY KEY is the unique identifier of each record in a data table. It specifies that a column or multiple columns in a table can contain only unique (non-duplicate) and non-**NULL** values. + +PRIMARY KEY is the combination of NOT NULL and UNIQUE. Only one primary key can be specified for a table. + +For example, create the **staff2** table where **ID** indicates the primary key. + +```sql +MogDB=# CREATE TABLE staff2( + ID INT PRIMARY KEY , + NAME TEXT NOT NULL, + AGE INT NOT NULL, + ADDRESS CHAR(50), + SALARY REAL +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "staff2_pkey" for table "staff2" +CREATE TABLE +``` + +## FOREIGN KEY + +The FOREIGN KEY constraint specifies that the value of a column (or a group of columns) must match the value in a row of another table. Generally, the FOREIGN KEY constraint in one table points to the UNIQUE KEY constraint in another table. That is, the referential integrity between two related tables is maintained. + +For example, create the **staff3** table that contains five columns. + +```sql +MogDB=# CREATE TABLE staff3( + ID INT PRIMARY KEY NOT NULL, + NAME TEXT NOT NULL, + AGE INT NOT NULL, + ADDRESS CHAR(50), + SALARY REAL +); +``` + +Create the **DEPARTMENT** table and add three columns. The **EMP_ID** column indicates the foreign key and it is similar to the **ID** column of the **staff3** table. + +```sql +MogDB=# CREATE TABLE DEPARTMENT( + ID INT PRIMARY KEY NOT NULL, + DEPT CHAR(50) NOT NULL, + EMP_ID INT references staff3(ID) +); +``` + +## CHECK + +The CHECK constraint specifies an expression producing a Boolean result where the INSERT or UPDATE operation of new or updated rows can succeed only when the expression result is **TRUE** or **UNKNOWN**; otherwise, an error is thrown and the database is not altered. + +A CHECK constraint specified as a column constraint should reference only the column's value, while an expression in a table constraint can reference multiple columns. **<>NULL** and **!=NULL** are invalid in an expression. Change them to **IS NOT NULL**. + +For example, create the **staff4** table and add a CHECK constraint to the **SALARY** column to ensure that the inserted value is greater than **0**. + +```sql +MogDB=# CREATE TABLE staff4( + ID INT PRIMARY KEY NOT NULL, + NAME TEXT NOT NULL, + AGE INT NOT NULL, + ADDRESS CHAR(50), + SALARY REAL CHECK(SALARY > 0) +); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "staff4_pkey" for table "staff4" +CREATE TABLE +``` + +Insert data into the **staff4** table. When the inserted value of the **SALARY** column is not greater than **0**, the database reports an error. + +```sql +MogDB=# INSERT INTO staff4(ID,NAME,AGE,SALARY) VALUES (2, 'JUCE',16,0); +ERROR: new row for relation "staff4" violates check constraint "staff4_salary_check" +DETAIL: N/A ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-index.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-index.md index 6f7f06bf..f7e9deab 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-index.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-index.md @@ -1,159 +1,159 @@ ---- -title: Indexes -summary: Indexes -author: zhang cuiping -date: 2023-04-07 ---- - -# Indexes - -An index is a pointer to data in a table. The index in a database is very similar to the index directory in a book. - -Indexes are primarily used to enhance database performance (though inappropriate use can result in database performance deterioration). You are advised to create indexes on: - -- Columns that are often queried -- Join conditions. For a query on joined columns, you are advised to create a composite index on the columns. For example, select * from t1 join t2 on t1. a=t2. a and t1. b=t2.b. You can create a composite index on the a and b columns of table t1. -- Columns having filter criteria (especially scope criteria) of a **WHERE** clause -- Columns that are frequently used after **ORDER BY**, **GROUP BY**, and **DISTINCT** - -## Syntax - -- Single-column index - - A single-column index is created based on only one column in a table. - - ```scss - CREATE INDEX [ [schema_name.]index_name ] ON table_name (column_name); - ``` - -- Composite index - - A composite index is created based on multiple columns in a table. - - ```scss - CREATE INDEX [ [schema_name.]index_name ] ON table_name (column1_name,column2_name,...); - ``` - -- Unique index - - Duplicate values cannot be inserted into a column with a unique index. - - ```scss - CREATE UNIQUE INDEX [ [schema_name.]index_name ] ON table_name (column_name); - ``` - -- Local index - - A Local index is created on a subset of a table. The subset is defined by a condition expression. - - ```scss - CREATE INDEX [ [schema_name.]index_name ] ON table_name (expression); - ``` - -- Partial index - - A partial index contains entries for only a portion of a table, usually a portion that is more useful for indexing than the rest of the table. - - ```scss - CREATE INDEX [ [schema_name.]index_name ] ON table_name (column_name) - [ WHERE predicate ] - ``` - -- Index deletion - - ```sql - DROP INDEX index_name; - ``` - -## Parameter Description - -- **UNIQUE** - - Creates a unique index. In this way, the system checks whether new values are unique in the index column. Attempts to insert or update data which would result in duplicate entries will generate an error. - - Currently, only the B-tree index supports unique indexes. - -- **schema_name** - - Specifies the schema name. - - Value range: an existing schema name - -- **index_name** - - Specifies the name of the index to be created. The schema of the index is the same as that of the table. - - Value range: a string. - -- **table_name** - - Specifies the name of the table for which an index is to be created (optionally schema-qualified). - - Value range: an existing table name - -- **column_name** - - Specifies the name of the column for which an index is to be created. - - Multiple columns can be specified if the index method supports multi-column indexes. A global index supports a maximum of 31 columns, and other indexes support a maximum of 32 columns. - -- **expression** - - Specifies an expression index constraint based on one or more columns of the table. It must be written in parentheses. However, the parentheses can be omitted if the expression has the form of a function call. - - The expression index can be used to obtain fast access to data based on some transformation of the basic data. For example, an index computed on **upper(col)** would allow the **WHERE upper(col) = 'JIM'** clause to use an index. - - If an expression contains the **IS NULL** clause, the index for this expression is invalid. In this case, you are advised to create a partial index. - -- **WHERE predicate** - - Creates a partial index. A partial index contains entries for only a portion of a table, usually a portion that is more useful for indexing than the rest of the table. For example, if you have a table that contains both billed and unbilled orders where the unbilled orders take up a small portion of the total table and yet that is an often used portion, you can improve performance by creating an index on just that portion. In addition, the **WHERE** clause with a UNIQUE constraint can be used to enforce uniqueness over a subset of a table. - - Value range: The predicate expression can only refer to columns of the underlying table, but it can use all columns, not just the ones being indexed. Currently, subqueries and aggregate expressions are forbidden in the **WHERE** clause. - -## Examples - -Create the **tpcds.ship_mode_t1** table. - -```sql -MogDB=# CREATE SCHEMA tpcds; -MogDB=# CREATE TABLE tpcds.ship_mode_t1 -( - SM_SHIP_MODE_SK INTEGER NOT NULL, - SM_SHIP_MODE_ID CHAR(16) NOT NULL, - SM_TYPE CHAR(30) , - SM_CODE CHAR(10) , - SM_CARRIER CHAR(20) , - SM_CONTRACT CHAR(20) -) ; -``` - -Create a single-column index on the **SM_SHIP_MODE_ID** column in the **tpcds.ship_mode_t1** table. - -```makefile -MogDB=# CREATE UNIQUE INDEX ds_ship_mode_t1_index0 ON tpcds.ship_mode_t1(SM_SHIP_MODE_ID); -``` - -Create a common unique index on the **SM_SHIP_MODE_SK** column in the **tpcds.ship_mode_t1** table. - -```makefile -MogDB=# CREATE UNIQUE INDEX ds_ship_mode_t1_index1 ON tpcds.ship_mode_t1(SM_SHIP_MODE_SK); -``` - -Create an expression index on the **SM_CODE** column in the **tpcds.ship_mode_t1** table. - -```makefile -MogDB=# CREATE INDEX ds_ship_mode_t1_index2 ON tpcds.ship_mode_t1(SUBSTR(SM_CODE,1 ,4)); -``` - -Create a partial index on the **SM_SHIP_MODE_SK** column where **SM_SHIP_MODE_SK** is greater than **10** in the **tpcds.ship_mode_t1** table. - -```sql -MogDB=# CREATE UNIQUE INDEX ds_ship_mode_t1_index3 ON tpcds.ship_mode_t1(SM_SHIP_MODE_SK) WHERE SM_SHIP_MODE_SK>10; -``` - -Delete the created index. - -```makefile -MogDB=# DROP INDEX tpcds.ds_ship_mode_t1_index2; +--- +title: Indexes +summary: Indexes +author: zhang cuiping +date: 2023-04-07 +--- + +# Indexes + +An index is a pointer to data in a table. The index in a database is very similar to the index directory in a book. + +Indexes are primarily used to enhance database performance (though inappropriate use can result in database performance deterioration). You are advised to create indexes on: + +- Columns that are often queried +- Join conditions. For a query on joined columns, you are advised to create a composite index on the columns. For example, select * from t1 join t2 on t1. a=t2. a and t1. b=t2.b. You can create a composite index on the a and b columns of table t1. +- Columns having filter criteria (especially scope criteria) of a **WHERE** clause +- Columns that are frequently used after **ORDER BY**, **GROUP BY**, and **DISTINCT** + +## Syntax + +- Single-column index + + A single-column index is created based on only one column in a table. + + ```scss + CREATE INDEX [ [schema_name.]index_name ] ON table_name (column_name); + ``` + +- Composite index + + A composite index is created based on multiple columns in a table. + + ```scss + CREATE INDEX [ [schema_name.]index_name ] ON table_name (column1_name,column2_name,...); + ``` + +- Unique index + + Duplicate values cannot be inserted into a column with a unique index. + + ```scss + CREATE UNIQUE INDEX [ [schema_name.]index_name ] ON table_name (column_name); + ``` + +- Local index + + A Local index is created on a subset of a table. The subset is defined by a condition expression. + + ```scss + CREATE INDEX [ [schema_name.]index_name ] ON table_name (expression); + ``` + +- Partial index + + A partial index contains entries for only a portion of a table, usually a portion that is more useful for indexing than the rest of the table. + + ```scss + CREATE INDEX [ [schema_name.]index_name ] ON table_name (column_name) + [ WHERE predicate ] + ``` + +- Index deletion + + ```sql + DROP INDEX index_name; + ``` + +## Parameter Description + +- **UNIQUE** + + Creates a unique index. In this way, the system checks whether new values are unique in the index column. Attempts to insert or update data which would result in duplicate entries will generate an error. + + Currently, only the B-tree index supports unique indexes. + +- **schema_name** + + Specifies the schema name. + + Value range: an existing schema name + +- **index_name** + + Specifies the name of the index to be created. The schema of the index is the same as that of the table. + + Value range: a string. + +- **table_name** + + Specifies the name of the table for which an index is to be created (optionally schema-qualified). + + Value range: an existing table name + +- **column_name** + + Specifies the name of the column for which an index is to be created. + + Multiple columns can be specified if the index method supports multi-column indexes. A global index supports a maximum of 31 columns, and other indexes support a maximum of 32 columns. + +- **expression** + + Specifies an expression index constraint based on one or more columns of the table. It must be written in parentheses. However, the parentheses can be omitted if the expression has the form of a function call. + + The expression index can be used to obtain fast access to data based on some transformation of the basic data. For example, an index computed on **upper(col)** would allow the **WHERE upper(col) = 'JIM'** clause to use an index. + + If an expression contains the **IS NULL** clause, the index for this expression is invalid. In this case, you are advised to create a partial index. + +- **WHERE predicate** + + Creates a partial index. A partial index contains entries for only a portion of a table, usually a portion that is more useful for indexing than the rest of the table. For example, if you have a table that contains both billed and unbilled orders where the unbilled orders take up a small portion of the total table and yet that is an often used portion, you can improve performance by creating an index on just that portion. In addition, the **WHERE** clause with a UNIQUE constraint can be used to enforce uniqueness over a subset of a table. + + Value range: The predicate expression can only refer to columns of the underlying table, but it can use all columns, not just the ones being indexed. Currently, subqueries and aggregate expressions are forbidden in the **WHERE** clause. + +## Examples + +Create the **tpcds.ship_mode_t1** table. + +```sql +MogDB=# CREATE SCHEMA tpcds; +MogDB=# CREATE TABLE tpcds.ship_mode_t1 +( + SM_SHIP_MODE_SK INTEGER NOT NULL, + SM_SHIP_MODE_ID CHAR(16) NOT NULL, + SM_TYPE CHAR(30) , + SM_CODE CHAR(10) , + SM_CARRIER CHAR(20) , + SM_CONTRACT CHAR(20) +) ; +``` + +Create a single-column index on the **SM_SHIP_MODE_ID** column in the **tpcds.ship_mode_t1** table. + +```makefile +MogDB=# CREATE UNIQUE INDEX ds_ship_mode_t1_index0 ON tpcds.ship_mode_t1(SM_SHIP_MODE_ID); +``` + +Create a common unique index on the **SM_SHIP_MODE_SK** column in the **tpcds.ship_mode_t1** table. + +```makefile +MogDB=# CREATE UNIQUE INDEX ds_ship_mode_t1_index1 ON tpcds.ship_mode_t1(SM_SHIP_MODE_SK); +``` + +Create an expression index on the **SM_CODE** column in the **tpcds.ship_mode_t1** table. + +```makefile +MogDB=# CREATE INDEX ds_ship_mode_t1_index2 ON tpcds.ship_mode_t1(SUBSTR(SM_CODE,1 ,4)); +``` + +Create a partial index on the **SM_SHIP_MODE_SK** column where **SM_SHIP_MODE_SK** is greater than **10** in the **tpcds.ship_mode_t1** table. + +```sql +MogDB=# CREATE UNIQUE INDEX ds_ship_mode_t1_index3 ON tpcds.ship_mode_t1(SM_SHIP_MODE_SK) WHERE SM_SHIP_MODE_SK>10; +``` + +Delete the created index. + +```makefile +MogDB=# DROP INDEX tpcds.ds_ship_mode_t1_index2; ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-llvm.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-llvm.md index d5d7e55b..63e3b41a 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-llvm.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-llvm.md @@ -1,90 +1,90 @@ ---- -title: LLVM -summary: LLVM -author: zhang cuiping -date: 2023-04-07 ---- - -# LLVM - -Based on the query execution plan tree, with the library functions provided by the Low Level Virtual Machine (LLVM), MogDB moves the process of determining the actual execution path from the executor phase to the execution initialization phase. In this way, problems such as function calling, logic condition branch determination, and a large amount of data read that are related to the original query execution are avoided, to improve the query performance. - -LLVM dynamic compilation can be used to generate customized machine code for each query to replace original common functions. Query performance is improved by reducing redundant judgment conditions and virtual function calls, and by making local data more accurate during actual queries. - -LLVM needs to consume extra time to pre-generate intermediate representation (IR) and compile it into codes. Therefore, if the data volume is small or if a query itself consumes less time, the performance deteriorates. - -## Application Scenarios - -- Expressions supporting LLVM - - The query statements that contain the following expressions support LLVM optimization: - - 1. Case…when… - 2. IN - 3. Bool - - And - - Or - - Not - 4. BooleanTest - - **IS_NOT_UNKNOWN**: corresponds to SQL statement IS NOT UNKNOWN. - - **IS_UNKNOWN**: corresponds to SQL statement IS UNKNOWN. - - **IS_TRUE**: corresponds to SQL statement IS TRUE. - - **IS_NOT_TRUE**: corresponds to SQL statement IS NOT TRUE. - - **IS_FALSE**: corresponds to SQL statement IS FALSE. - - **IS_NOT_FALSE**: corresponds to SQL statement IS NOT FALSE. - 5. NullTest - - IS_NOT_NULL - - IS_NULL - 6. Operator - 7. Function - - lpad - - substring - - btrim - - rtrim - - length - 8. Nullif - - Supported data types for expression computing are bool, tinyint, smallint, int, bigint, float4, float8, numeric, date, time, timetz, timestamp, timestamptz, interval, bpchar, varchar, text, and oid. - - Consider using LLVM only if expressions are used in the following content in a vectorized executor: **filter** in the **Scan** node; **complicate hash condition**, **hash join filter**, and **hash join target** in the **Hash Join** node; **filter** and **join filter** in the **Nested Loop** node; **merge join filter** and **merge join target** in the **Merge Join** node; and **filter** in the **Group** node. - -- Operators supporting LLVM - - 1. Join: HashJoin - 2. Agg: HashAgg - 3. Sort - - Where HashJoin supports only Hash Inner Join, and the corresponding hash cond supports comparisons between int4, bigint, and bpchar. HashAgg supports sum and avg operations of bigint and numeric data types. Group By statements supports int4, bigint, bpchar, text, varchar, timestamp, and count(*) aggregation operation. Sort supports only comparisons between int4, bigint, numeric, bpchar, text, and varchar data types. Except the preceding operations, LLVM cannot be used. You can use the explain performance tool to check whether LLVM can be used. - -## Non-applicable Scenarios - -- LLVM does not apply to tables that have small amount of data. -- Query jobs with a non-vectorized execution path cannot be generated. - -## Other Factors Affecting LLVM Performance - -The LLVM optimization effect depends on not only operations and computing in the database, but also the selected hardware environment. - -- Number of C functions called by expressions - - CodeGen does not implement full-expression calculation, that is, some expressions use CodeGen while others invoke original C code for calculation. In an entire calculation process, if the later calculation method plays a dominate role, using LLVM may deteriorate the performance. By setting **log_min_message** to **DEBUG1**, you can view expressions that directly invoke C code. - -- Memory resources - - One of the key LLVM features is to ensure the locality of data, that is, data should be stored in registers as much as possible. Data loading should be reduced at the same time. Therefore, when using LLVM, value of **work_mem** must be set as large as required to ensure that code is implemented in the memory. Otherwise, performance deteriorates. - -- Cost estimation - - LLVM realizes a simple cost estimation model. You can determine whether to use LLVM for the current node based on the tables involved in the node computing. If the optimizer underestimates the actual number of rows involved, gains cannot be achieved as expected. And vice versa. - -## Suggestions for Using LLVM - -Currently, LLVM is enabled by default in the database kernel, and users can configure it as required. The overall suggestions are as follows: - -1. Set **work_mem** to an appropriate value as large as possible. If much data is flushed to disks, you are advised to disable LLVM by setting **enable_codegen** to **off**. - -2. Set **codegen_cost_threshold** to an appropriate value (the default value is **10000**). Ensure that LLVM is not used when the data volume is small. After **codegen_cost_threshold** is set, the database performance may deteriorate due to the use of LLVM. In this case, you are advised to increase the parameter value. - -3. If a large number of C functions are called, you are advised not to use the LLVM function. - +--- +title: LLVM +summary: LLVM +author: zhang cuiping +date: 2023-04-07 +--- + +# LLVM + +Based on the query execution plan tree, with the library functions provided by the Low Level Virtual Machine (LLVM), MogDB moves the process of determining the actual execution path from the executor phase to the execution initialization phase. In this way, problems such as function calling, logic condition branch determination, and a large amount of data read that are related to the original query execution are avoided, to improve the query performance. + +LLVM dynamic compilation can be used to generate customized machine code for each query to replace original common functions. Query performance is improved by reducing redundant judgment conditions and virtual function calls, and by making local data more accurate during actual queries. + +LLVM needs to consume extra time to pre-generate intermediate representation (IR) and compile it into codes. Therefore, if the data volume is small or if a query itself consumes less time, the performance deteriorates. + +## Application Scenarios + +- Expressions supporting LLVM + + The query statements that contain the following expressions support LLVM optimization: + + 1. Case…when… + 2. IN + 3. Bool + - And + - Or + - Not + 4. BooleanTest + - **IS_NOT_UNKNOWN**: corresponds to SQL statement IS NOT UNKNOWN. + - **IS_UNKNOWN**: corresponds to SQL statement IS UNKNOWN. + - **IS_TRUE**: corresponds to SQL statement IS TRUE. + - **IS_NOT_TRUE**: corresponds to SQL statement IS NOT TRUE. + - **IS_FALSE**: corresponds to SQL statement IS FALSE. + - **IS_NOT_FALSE**: corresponds to SQL statement IS NOT FALSE. + 5. NullTest + - IS_NOT_NULL + - IS_NULL + 6. Operator + 7. Function + - lpad + - substring + - btrim + - rtrim + - length + 8. Nullif + + Supported data types for expression computing are bool, tinyint, smallint, int, bigint, float4, float8, numeric, date, time, timetz, timestamp, timestamptz, interval, bpchar, varchar, text, and oid. + + Consider using LLVM only if expressions are used in the following content in a vectorized executor: **filter** in the **Scan** node; **complicate hash condition**, **hash join filter**, and **hash join target** in the **Hash Join** node; **filter** and **join filter** in the **Nested Loop** node; **merge join filter** and **merge join target** in the **Merge Join** node; and **filter** in the **Group** node. + +- Operators supporting LLVM + + 1. Join: HashJoin + 2. Agg: HashAgg + 3. Sort + + Where HashJoin supports only Hash Inner Join, and the corresponding hash cond supports comparisons between int4, bigint, and bpchar. HashAgg supports sum and avg operations of bigint and numeric data types. Group By statements supports int4, bigint, bpchar, text, varchar, timestamp, and count(*) aggregation operation. Sort supports only comparisons between int4, bigint, numeric, bpchar, text, and varchar data types. Except the preceding operations, LLVM cannot be used. You can use the explain performance tool to check whether LLVM can be used. + +## Non-applicable Scenarios + +- LLVM does not apply to tables that have small amount of data. +- Query jobs with a non-vectorized execution path cannot be generated. + +## Other Factors Affecting LLVM Performance + +The LLVM optimization effect depends on not only operations and computing in the database, but also the selected hardware environment. + +- Number of C functions called by expressions + + CodeGen does not implement full-expression calculation, that is, some expressions use CodeGen while others invoke original C code for calculation. In an entire calculation process, if the later calculation method plays a dominate role, using LLVM may deteriorate the performance. By setting **log_min_message** to **DEBUG1**, you can view expressions that directly invoke C code. + +- Memory resources + + One of the key LLVM features is to ensure the locality of data, that is, data should be stored in registers as much as possible. Data loading should be reduced at the same time. Therefore, when using LLVM, value of **work_mem** must be set as large as required to ensure that code is implemented in the memory. Otherwise, performance deteriorates. + +- Cost estimation + + LLVM realizes a simple cost estimation model. You can determine whether to use LLVM for the current node based on the tables involved in the node computing. If the optimizer underestimates the actual number of rows involved, gains cannot be achieved as expected. And vice versa. + +## Suggestions for Using LLVM + +Currently, LLVM is enabled by default in the database kernel, and users can configure it as required. The overall suggestions are as follows: + +1. Set **work_mem** to an appropriate value as large as possible. If much data is flushed to disks, you are advised to disable LLVM by setting **enable_codegen** to **off**. + +2. Set **codegen_cost_threshold** to an appropriate value (the default value is **10000**). Ensure that LLVM is not used when the data volume is small. After **codegen_cost_threshold** is set, the database performance may deteriorate due to the use of LLVM. In this case, you are advised to increase the parameter value. + +3. If a large number of C functions are called, you are advised not to use the LLVM function. + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If resources are robust, the larger the data volume is, the better the performance improvement is. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-lock.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-lock.md index 13653e25..7939a75c 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-lock.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-lock.md @@ -1,76 +1,76 @@ ---- -title: Locks -summary: Locks -author: zhang cuiping -date: 2023-04-07 ---- - -# Locks - -To ensure database data consistency, you can execute the LOCK TABLE statement to prevent other users from modifying tables. - -For example, an application needs to ensure that data in a table is not modified during transaction running. For this purpose, table usage can be locked. This prevents data from being concurrently modified. - -The LOCK TABLE statement is useless outside a transaction block, because the lock would remain held only to the completion of the statement. - -## Syntax - -```css -LOCK [ TABLE ] name IN lock_mode MODE -``` - -## Parameter Description - -- **name** - - Specifies the name of the table to be locked. - -- **lock_mode** - - Specifies the lock mode. The basic modes are as follows: - - - **ACCESS EXCLUSIVE** - - Guarantees that the holder is the only transaction accessing the table in any way. It is the default lock mode. - - - **ACCESS SHARE** - - Indicates the lock mode for reading tables without modifying them. - -## Examples - -Obtains an **ACCESS EXCLUSIVE** lock on a table when going to perform a delete operation. - -```sql --- Create a sample table. -MogDB=# CREATE TABLE graderecord - ( - number INTEGER, - name CHAR(20), - class CHAR(20), - grade INTEGER - ); --- Insert data. -MogDB=# insert into graderecord values('210101','Alan','21.01',92); -insert into graderecord values('210102','Ben','21.01',62); -insert into graderecord values('210103','Brain','21.01',26); -insert into graderecord values('210204','Carl','21.02',77); -insert into graderecord values('210205','David','21.02',47); -insert into graderecord values('210206','Eric','21.02',97); -insert into graderecord values('210307','Frank','21.03',90); -insert into graderecord values('210308','Gavin','21.03',100); -insert into graderecord values('210309','Henry','21.03',67); -insert into graderecord values('210410','Jack','21.04',75); -insert into graderecord values('210311','Jerry','21.04',60); - --- Start the process. -MogDB=# START TRANSACTION; - --- Provide the example table. -MogDB=# LOCK TABLE graderecord IN ACCESS EXCLUSIVE MODE; - --- Delete the example table. -MogDB=# DELETE FROM graderecord WHERE name ='Alan'; - -MogDB=# COMMIT; +--- +title: Locks +summary: Locks +author: zhang cuiping +date: 2023-04-07 +--- + +# Locks + +To ensure database data consistency, you can execute the LOCK TABLE statement to prevent other users from modifying tables. + +For example, an application needs to ensure that data in a table is not modified during transaction running. For this purpose, table usage can be locked. This prevents data from being concurrently modified. + +The LOCK TABLE statement is useless outside a transaction block, because the lock would remain held only to the completion of the statement. + +## Syntax + +```css +LOCK [ TABLE ] name IN lock_mode MODE +``` + +## Parameter Description + +- **name** + + Specifies the name of the table to be locked. + +- **lock_mode** + + Specifies the lock mode. The basic modes are as follows: + + - **ACCESS EXCLUSIVE** + + Guarantees that the holder is the only transaction accessing the table in any way. It is the default lock mode. + + - **ACCESS SHARE** + + Indicates the lock mode for reading tables without modifying them. + +## Examples + +Obtains an **ACCESS EXCLUSIVE** lock on a table when going to perform a delete operation. + +```sql +-- Create a sample table. +MogDB=# CREATE TABLE graderecord + ( + number INTEGER, + name CHAR(20), + class CHAR(20), + grade INTEGER + ); +-- Insert data. +MogDB=# insert into graderecord values('210101','Alan','21.01',92); +insert into graderecord values('210102','Ben','21.01',62); +insert into graderecord values('210103','Brain','21.01',26); +insert into graderecord values('210204','Carl','21.02',77); +insert into graderecord values('210205','David','21.02',47); +insert into graderecord values('210206','Eric','21.02',97); +insert into graderecord values('210307','Frank','21.03',90); +insert into graderecord values('210308','Gavin','21.03',100); +insert into graderecord values('210309','Henry','21.03',67); +insert into graderecord values('210410','Jack','21.04',75); +insert into graderecord values('210311','Jerry','21.04',60); + +-- Start the process. +MogDB=# START TRANSACTION; + +-- Provide the example table. +MogDB=# LOCK TABLE graderecord IN ACCESS EXCLUSIVE MODE; + +-- Delete the example table. +MogDB=# DELETE FROM graderecord WHERE name ='Alan'; + +MogDB=# COMMIT; ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-trigger.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-trigger.md index 28628f08..f81c79d6 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-trigger.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference-trigger.md @@ -1,158 +1,158 @@ ---- -title: Triggers -summary: Triggers -author: zhang cuiping -date: 2023-04-07 ---- - -# Triggers - -A trigger automatically executes functions when an event occurs in a specified database. - -## Syntax - -- Create a trigger. - - ```less - CREATE TRIGGER trigger_name { BEFORE | AFTER | INSTEAD OF } { event [ OR ... ] } - ON table_name - [ FOR [ EACH ] { ROW | STATEMENT } ] - [ WHEN ( condition ) ] - EXECUTE PROCEDURE function_name ( arguments ); - ``` - -- Modify a trigger. - - ```sql - ALTER TRIGGER trigger_name ON table_name RENAME TO new_trigger_name; - ``` - -- Delete a trigger. - - ```sql - DROP TRIGGER trigger_name ON table_name [ CASCADE | RESTRICT ]; - ``` - -## Parameter Description - -- **trigger_name** - - Specifies the trigger name. - -- **BEFORE** - - Specifies that a trigger function is executed before the trigger event. - -- **AFTER** - - Specifies that a trigger function is executed after the trigger event. - -- **INSTEAD OF** - - Specifies that a trigger function directly replaces the trigger event. - -- **event** - - Specifies the event that will fire the trigger. Values are **INSERT**, **UPDATE**, **DELETE**, and **TRUNCATE**. Multiple events can be specified using **OR**. - -- **table_name** - - Specifies the name of the table corresponding to the trigger. - -- **FOR EACH ROW | FOR EACH STATEMENT** - - Specifies the frequency of firing the trigger. - - - **FOR EACH ROW** indicates that the trigger should be fired once for every row affected by the trigger event. - - **FOR EACH STATEMENT** indicates that the trigger should be fired just once per SQL statement. - - If neither is specified, the default value is **FOR EACH STATEMENT**. Constraint triggers can only be marked as **FOR EACH ROW**. - -- **function_name** - - Specifies a user-defined function, which must be declared as taking no parameters and returning data of trigger type. This function is executed when a trigger fires. - -- **arguments** - - Specifies an optional comma-separated list of parameters to be provided for the function when the trigger is executed. - -- **new_trigger_name** - - Specifies the new trigger name. - -## Examples - -```sql --- Create a source table and a target table. -MogDB=# CREATE TABLE test_trigger_src_tbl(id1 INT, id2 INT, id3 INT); -MogDB=# CREATE TABLE test_trigger_des_tbl(id1 INT, id2 INT, id3 INT); - --- Create a trigger function. -MogDB=# CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS - $$ - DECLARE - BEGIN - INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3); - RETURN NEW; - END - $$ LANGUAGE PLPGSQL; - -MogDB=# CREATE OR REPLACE FUNCTION tri_update_func() RETURNS TRIGGER AS - $$ - DECLARE - BEGIN - UPDATE test_trigger_des_tbl SET id3 = NEW.id3 WHERE id1=OLD.id1; - RETURN OLD; - END - $$ LANGUAGE PLPGSQL; - -MogDB=# CREATE OR REPLACE FUNCTION TRI_DELETE_FUNC() RETURNS TRIGGER AS - $$ - DECLARE - BEGIN - DELETE FROM test_trigger_des_tbl WHERE id1=OLD.id1; - RETURN OLD; - END - $$ LANGUAGE PLPGSQL; - --- Create an INSERT trigger. -MogDB=# CREATE TRIGGER insert_trigger - BEFORE INSERT ON test_trigger_src_tbl - FOR EACH ROW - EXECUTE PROCEDURE tri_insert_func(); - --- Create an UPDATE trigger. -MogDB=# CREATE TRIGGER update_trigger - AFTER UPDATE ON test_trigger_src_tbl - FOR EACH ROW - EXECUTE PROCEDURE tri_update_func(); - --- Create a DELETE trigger. -MogDB=# CREATE TRIGGER delete_trigger - BEFORE DELETE ON test_trigger_src_tbl - FOR EACH ROW - EXECUTE PROCEDURE tri_delete_func(); - --- Execute the INSERT event and check the trigger results. -MogDB=# INSERT INTO test_trigger_src_tbl VALUES(100,200,300); -MogDB=# SELECT * FROM test_trigger_src_tbl; -MogDB=# SELECT * FROM test_trigger_des_tbl; // Check whether the trigger operation takes effect. - --- Execute the UPDATE event and check the trigger results. -MogDB=# UPDATE test_trigger_src_tbl SET id3=400 WHERE id1=100; -MogDB=# SELECT * FROM test_trigger_src_tbl; -MogDB=# SELECT * FROM test_trigger_des_tbl; // Check whether the trigger operation takes effect. - --- Execute the DELETE event and check the trigger results. -MogDB=# DELETE FROM test_trigger_src_tbl WHERE id1=100; -MogDB=# SELECT * FROM test_trigger_src_tbl; -MogDB=# SELECT * FROM test_trigger_des_tbl; // Check whether the trigger operation takes effect. - --- Modify a trigger. -MogDB=# ALTER TRIGGER delete_trigger ON test_trigger_src_tbl RENAME TO delete_trigger_renamed; - --- Delete a trigger. -MogDB=# DROP TRIGGER insert_trigger ON test_trigger_src_tbl; -MogDB=# DROP TRIGGER update_trigger ON test_trigger_src_tbl; -MogDB=# DROP TRIGGER delete_trigger_renamed ON test_trigger_src_tbl; +--- +title: Triggers +summary: Triggers +author: zhang cuiping +date: 2023-04-07 +--- + +# Triggers + +A trigger automatically executes functions when an event occurs in a specified database. + +## Syntax + +- Create a trigger. + + ```less + CREATE TRIGGER trigger_name { BEFORE | AFTER | INSTEAD OF } { event [ OR ... ] } + ON table_name + [ FOR [ EACH ] { ROW | STATEMENT } ] + [ WHEN ( condition ) ] + EXECUTE PROCEDURE function_name ( arguments ); + ``` + +- Modify a trigger. + + ```sql + ALTER TRIGGER trigger_name ON table_name RENAME TO new_trigger_name; + ``` + +- Delete a trigger. + + ```sql + DROP TRIGGER trigger_name ON table_name [ CASCADE | RESTRICT ]; + ``` + +## Parameter Description + +- **trigger_name** + + Specifies the trigger name. + +- **BEFORE** + + Specifies that a trigger function is executed before the trigger event. + +- **AFTER** + + Specifies that a trigger function is executed after the trigger event. + +- **INSTEAD OF** + + Specifies that a trigger function directly replaces the trigger event. + +- **event** + + Specifies the event that will fire the trigger. Values are **INSERT**, **UPDATE**, **DELETE**, and **TRUNCATE**. Multiple events can be specified using **OR**. + +- **table_name** + + Specifies the name of the table corresponding to the trigger. + +- **FOR EACH ROW | FOR EACH STATEMENT** + + Specifies the frequency of firing the trigger. + + - **FOR EACH ROW** indicates that the trigger should be fired once for every row affected by the trigger event. + - **FOR EACH STATEMENT** indicates that the trigger should be fired just once per SQL statement. + + If neither is specified, the default value is **FOR EACH STATEMENT**. Constraint triggers can only be marked as **FOR EACH ROW**. + +- **function_name** + + Specifies a user-defined function, which must be declared as taking no parameters and returning data of trigger type. This function is executed when a trigger fires. + +- **arguments** + + Specifies an optional comma-separated list of parameters to be provided for the function when the trigger is executed. + +- **new_trigger_name** + + Specifies the new trigger name. + +## Examples + +```sql +-- Create a source table and a target table. +MogDB=# CREATE TABLE test_trigger_src_tbl(id1 INT, id2 INT, id3 INT); +MogDB=# CREATE TABLE test_trigger_des_tbl(id1 INT, id2 INT, id3 INT); + +-- Create a trigger function. +MogDB=# CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS + $$ + DECLARE + BEGIN + INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3); + RETURN NEW; + END + $$ LANGUAGE PLPGSQL; + +MogDB=# CREATE OR REPLACE FUNCTION tri_update_func() RETURNS TRIGGER AS + $$ + DECLARE + BEGIN + UPDATE test_trigger_des_tbl SET id3 = NEW.id3 WHERE id1=OLD.id1; + RETURN OLD; + END + $$ LANGUAGE PLPGSQL; + +MogDB=# CREATE OR REPLACE FUNCTION TRI_DELETE_FUNC() RETURNS TRIGGER AS + $$ + DECLARE + BEGIN + DELETE FROM test_trigger_des_tbl WHERE id1=OLD.id1; + RETURN OLD; + END + $$ LANGUAGE PLPGSQL; + +-- Create an INSERT trigger. +MogDB=# CREATE TRIGGER insert_trigger + BEFORE INSERT ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_insert_func(); + +-- Create an UPDATE trigger. +MogDB=# CREATE TRIGGER update_trigger + AFTER UPDATE ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_update_func(); + +-- Create a DELETE trigger. +MogDB=# CREATE TRIGGER delete_trigger + BEFORE DELETE ON test_trigger_src_tbl + FOR EACH ROW + EXECUTE PROCEDURE tri_delete_func(); + +-- Execute the INSERT event and check the trigger results. +MogDB=# INSERT INTO test_trigger_src_tbl VALUES(100,200,300); +MogDB=# SELECT * FROM test_trigger_src_tbl; +MogDB=# SELECT * FROM test_trigger_des_tbl; // Check whether the trigger operation takes effect. + +-- Execute the UPDATE event and check the trigger results. +MogDB=# UPDATE test_trigger_src_tbl SET id3=400 WHERE id1=100; +MogDB=# SELECT * FROM test_trigger_src_tbl; +MogDB=# SELECT * FROM test_trigger_des_tbl; // Check whether the trigger operation takes effect. + +-- Execute the DELETE event and check the trigger results. +MogDB=# DELETE FROM test_trigger_src_tbl WHERE id1=100; +MogDB=# SELECT * FROM test_trigger_src_tbl; +MogDB=# SELECT * FROM test_trigger_des_tbl; // Check whether the trigger operation takes effect. + +-- Modify a trigger. +MogDB=# ALTER TRIGGER delete_trigger ON test_trigger_src_tbl RENAME TO delete_trigger_renamed; + +-- Delete a trigger. +MogDB=# DROP TRIGGER insert_trigger ON test_trigger_src_tbl; +MogDB=# DROP TRIGGER update_trigger ON test_trigger_src_tbl; +MogDB=# DROP TRIGGER delete_trigger_renamed ON test_trigger_src_tbl; ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference.md index 047bbe2d..451c7b8b 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/sql-reference.md @@ -14,7 +14,6 @@ date: 2023-04-07 - **[Type Conversion](./type-conversion/type-conversion.md)** - **[Full Text Search](./full-text-search/full-text-search.md)** - **[System Operation](system-operation.md)** -- **[Controlling Transactions](controlling-transactions.md)** - **[DDL Syntax Overview](ddl-syntax-overview.md)** - **[DML Syntax Overview](dml-syntax-overview.md)** - **[DCL Syntax Overview](dcl-syntax-overview.md)** @@ -31,4 +30,4 @@ date: 2023-04-07 - **[Anonymous Blocks](sql-reference-anonymous-block.md)** - **[Triggers](sql-reference-trigger.md)** - **[INSERT_RIGHT_REF_DEFAULT_VALUE](type-base-value.md)** -- **[Appendix](appendix/appendix.md)** \ No newline at end of file +- **[Appendix](appendix/sql-reference-appendix.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/system-operation.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/system-operation.md index e7fe463b..661dc7fa 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/system-operation.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/system-operation.md @@ -1,38 +1,38 @@ ---- -title: System Operation -summary: System Operation -author: Zhang Cuiping -date: 2021-05-17 ---- - -# System Operation - -MogDB text runs SQL statements to perform different system operations, such as setting variables, displaying the execution plan, and collecting garbage data. - -## Setting Variables - -For details about how to set various parameters for a session or transaction, see [SET](../../reference-guide/sql-syntax/SET.md). - -## Displaying the Execution Plan - -For details about how to display the execution plan that MogDB makes for SQL statements, see [EXPLAIN](../../reference-guide/sql-syntax/EXPLAIN.md). - -## Specifying a Checkpoint in Transaction Logs - -By default, WALs periodically specify checkpoints in a transaction log. **CHECKPOINT** forces an immediate checkpoint when the related command is issued, without waiting for a regular checkpoint scheduled by the system. See [CHECKPOINT](../../reference-guide/sql-syntax/CHECKPOINT.md). - -## Collecting Unnecessary Data - -For details about how to collect garbage data and analyze a database as required, See [VACUUM](../../reference-guide/sql-syntax/VACUUM.md). - -## Collecting Statistics - -For details about how to collect statistics on tables in databases, See [ANALYZE | ANALYSE](../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md). - -## Setting the Constraint Check Mode for the Current Transaction - -For details about how to set the constraint check mode for the current transaction, See [SET CONSTRAINTS](../../reference-guide/sql-syntax/SET-CONSTRAINTS.md). - -## Shutting Down The Current Database Node - -For details about shutting down the current database node, see [SHUTDOWN](../../reference-guide/sql-syntax/SHUTDOWN.md). +--- +title: System Operation +summary: System Operation +author: Zhang Cuiping +date: 2021-05-17 +--- + +# System Operation + +MogDB text runs SQL statements to perform different system operations, such as setting variables, displaying the execution plan, and collecting garbage data. + +## Setting Variables + +For details about how to set various parameters for a session or transaction, see [SET](../../reference-guide/sql-syntax/SET.md). + +## Displaying the Execution Plan + +For details about how to display the execution plan that MogDB makes for SQL statements, see [EXPLAIN](../../reference-guide/sql-syntax/EXPLAIN.md). + +## Specifying a Checkpoint in Transaction Logs + +By default, WALs periodically specify checkpoints in a transaction log. **CHECKPOINT** forces an immediate checkpoint when the related command is issued, without waiting for a regular checkpoint scheduled by the system. See [CHECKPOINT](../../reference-guide/sql-syntax/CHECKPOINT.md). + +## Collecting Unnecessary Data + +For details about how to collect garbage data and analyze a database as required, See [VACUUM](../../reference-guide/sql-syntax/VACUUM.md). + +## Collecting Statistics + +For details about how to collect statistics on tables in databases, See [ANALYZE | ANALYSE](../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md). + +## Setting the Constraint Check Mode for the Current Transaction + +For details about how to set the constraint check mode for the current transaction, See [SET CONSTRAINTS](../../reference-guide/sql-syntax/SET-CONSTRAINTS.md). + +## Shutting Down The Current Database Node + +For details about shutting down the current database node, see [SHUTDOWN](../../reference-guide/sql-syntax/SHUTDOWN.md). diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/transaction/sql-reference-transaction.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/transaction/sql-reference-transaction.md index e3fa4706..775e6d59 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/transaction/sql-reference-transaction.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/transaction/sql-reference-transaction.md @@ -1,11 +1,12 @@ ---- -title: Transaction -summary: Transaction -author: zhang cuiping -date: 2023-04-07 ---- - -# Transaction - -- **[Transaction Management](transaction-management.md)** -- **[Transaction Control](transaction-control.md)** \ No newline at end of file +--- +title: Transaction +summary: Transaction +author: zhang cuiping +date: 2023-04-07 +--- + +# Transaction + +- **[Transaction Management](transaction-management.md)** +- **[Transaction Control](transaction-control.md)** +- **[SELECT Auto-Commit Transactions](transaction-auto-commit.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/transaction/transaction-auto-commit.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/transaction/transaction-auto-commit.md new file mode 100644 index 00000000..d22f9753 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/transaction/transaction-auto-commit.md @@ -0,0 +1,146 @@ +--- +title: SELECT Auto-Commit Transactions +summary: SELECT Auto-Commit Transactions +author: 郭欢 郑小进 赵金 +date: 2024-03-26 +--- + +# SELECT Auto-Commit Transactions + +## Overview + +There are differences in transaction status control between Oracle and MogDB. Oracle does not initiate transactions for read-only commands and implicitly starts transactions for write commands, which are ended explicitly. MogDB controls transaction status by implicitly starting and ending transactions, and explicitly starting and ending them for any read or write commands that require transaction semantics. + +Currently, the driver sends an explicit Begin to the kernel in non-auto-commit mode, and transactions require an explicit Commit from the user to end. This can lead to a problem: if a table has been accessed by a read-only connection, that connection will maintain an open transaction, holding resources that are not released, thereby blocking the execution of truncate operations. + +This feature implements a mechanism compatible with Oracle's transaction status, where the driver does not send Begin in non-auto-commit mode. For read-only commands, the kernel automatically commits, and for write commands, users need to commit explicitly. + +## Usage Instructions + +The GUC parameter [behavior_compat_options](../../guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md#behavior_compat_options) has a new configuration item `compat_oracle_txn_control`. + +- When the driver is in non-auto-commit mode (autocommit = off) and the kernel is configured with this parameter: `behavior_compat_options = 'compat_oracle_txn_control';`, the auto-commit transaction feature for SELECT is enabled, and the `compat_oracle_txn_control` option is not allowed to be modified via set behavior_compat_options after JDBC 5.0.0.8, Psycopg2 5.0.0.5. + +- When the driver is in auto-commit mode (autocommit = on), even if the kernel is configured with this parameter: `behavior_compat_options = 'compat_oracle_txn_control';`, the auto-commit transaction feature for SELECT will not be enabled. + + **Note**: In auto-commit mode (autocommit = on), when the driver version is JDBC 5.0.0.6/5.0.0.7, Psycopg2 5.0.0.4, ODBC 5.0.0.2, enabling this feature will cause the driver's auto-commit mode to be ineffective. + +- After using JDBC setSavepoint, SELECT does not auto-commit. + +## Usage Limitations + +1. This feature is only applicable in A compatibility mode. +2. For the driver side issuing multiple SQL statements in one command, and if the first statement is not start transaction/begin, all SQL statements will be committed together automatically. For example, set xxx; update xxxx; if set and update are issued together, the update will be automatically committed after execution, rather than being explicitly committed as expected. Therefore, when using, please write single statements to avoid statements that need explicit submission from being auto-committed. +3. For functions/storage procedures involving cursor OUT parameters or returning cursor types, after executing the function/storage procedure, auto-commit will occur, and subsequent fetch operations will result in errors as the portal is not found. + +## Auto-commit Transaction Behavior in Different Scenarios + +1. Individual read commands auto-commit (excluding select for update/share/key share/no key update) +2. Read commands within a transaction block do not auto-commit and require explicit submission +3. Read commands within functions/storage procedures do not auto-commit; if the function contains only read commands, it auto-commits upon completion +4. Individual write commands require explicit submission (insert/update/delete/merge) +5. Write commands within a transaction block do not auto-commit and require explicit submission +6. Write commands within functions/storage procedures do not auto-commit and require explicit submission +7. Individual DDL auto-commits +8. DDL commands within a transaction block do not auto-commit and require explicit submission +9. DDL within functions/storage procedures do not auto-commit; if the DDL does not contain write behavior, it auto-commits upon completion of the function +10. DDL within functions/storage procedures does not auto-commit; if the DDL contains write behavior, explicit submission is required after the function is completed +11. If functions/storage procedures contain sub-transactions, regardless of the sub-transaction's commit or rollback, it depends on whether the main transaction has write behavior; if there is write behavior, explicit submission is required, if not, it auto-commits +12. For special DDL commands like explain, anonymous blocks, execute, if there is write behavior inside, explicit submission is required +13. For single lock commands, lock commands within functions/storage procedures, the kernel will explicitly execute them in a transaction block, requiring explicit submission +14. For single declare cursor cursor definition operations, the kernel will explicitly execute them in a transaction block, requiring explicit submission. Cursor definition operations within functions/storage procedures are not actively executed in a transaction block, and the function auto-commits upon completion. +15. `select into` statements require creating a table first and then inserting data, so essentially, they are DDL statements and will auto-commit (after version 5.0.8). +16. To not affect database initialization, this Oracle transaction compatibility feature is turned off by default; during the database startup process, the feature can be enabled without affecting normal startup. + +## Behavior Description of Various SQL in Different Scenarios + +| Database Object | Object Operation | Single Statement | Statements in Transaction Block | Statements Inside Functions/Stored Procedures | +| ---------------------------- | --------------------------------- | ------------------------------------------------------------ | --------------------------------------------------------- | ------------------------------------------------------------ | +| TABLE | SELECT | Auto-commit | Explicit commit | If the function contains only select read commands, it auto-commits upon completion | +| | SELECT FOR UPDATE | Explicit commit | Explicit commit | Explicit commit | +| | SELECT FOR SHARE | Explicit commit | Explicit commit | Explicit commit | +| | SELECT FOR KEY SHARE | Explicit commit | Explicit commit | Explicit commit | +| | SELECT FOR NO KEY UPDATE | Explicit commit | Explicit commit | Explicit commit | +| | INSERT | Explicit commit | Explicit commit | Explicit commit | +| | UPDATE | Explicit commit | Explicit commit | Explicit commit | +| | DELETE | Explicit commit | Explicit commit | Explicit commit | +| | MERGE INTO | Explicit commit | Explicit commit | Explicit commit | +| DECLARE CURSOR | DECLARE CURSOR | Explicit commit | Explicit commit | If the function contains only cursor operations without write commands, it auto-commits upon completion | +| CLOSE CURSOR | CLOSE CURSOR | Explicit commit | Explicit commit | If the function contains only cursor operations without write commands, it auto-commits upon completion | +| MOVE/FETCH | MOVE/FETCH | Explicit commit | Explicit commit | If the function contains only cursor operations without write commands, it auto-commits upon completion | +| TABLESPACE | CREATE TABLESPACE | Auto-commit | Not supported in transaction blocks | Not supported in functions/stored procedures | +| | ALTER TABLESPACE | Auto-commit | Explicit commit | Explicit commit | +| | DROP TABLESPACE | Auto-commit | Not supported in transaction blocks | Not supported in functions/stored procedures | +| DATABASE | CREATE DATABASE | Auto-commit | Not supported in transaction blocks | Not supported in functions/stored procedures | +| | ALTER DATABASE | Auto-commit | Explicit commit | Explicit commit | +| | DROP DATABASE | Auto-commit | Not supported in transaction blocks | Not supported in functions/stored procedures | +| TABLE | CREATE TABLE | Auto-commit | Explicit commit | Explicit commit | +| | ALTER TABLE | Auto-commit | Explicit commit | Explicit commit | +| | TRUNCATE TABLE | Auto-commit | Explicit commit | Explicit commit | +| | DROP TABLE | Auto-commit | Explicit commit | Explicit commit | +| SCHEMA | CREATE SCHEMA | Auto-commit | Explicit commit | Explicit commit | +| | ALTER SCHEMA | Auto-commit | Explicit commit | Explicit commit | +| | DROP SCHEMA | Auto-commit | Explicit commit | Explicit commit | +| VIEW | CREATE VIEW | Auto-commit | Explicit commit | Explicit commit | +| | ALTER VIEW | Auto-commit | Explicit commit | Explicit commit | +| | DROP VIEW | Auto-commit | Explicit commit | Explicit commit | +| PACKAGE | CREATE PACKAGE | Auto-commit | Not supported | Not supported | +| | CREATE PACKAGE BODY | Auto-commit | Not supported | Not supported | +| | ALTER PACKAGE | Auto-commit | Explicit commit | Explicit commit | +| | DROP PACKAGE | Auto-commit | Explicit commit | Explicit commit | +| TRIGGER | CREATE TRIGGER | Auto-commit | Explicit commit | Explicit commit | +| | ALTER TRIGGER | Auto-commit | Explicit commit | Explicit commit | +| | DROP TRIGGER | Auto-commit | Explicit commit | Explicit commit | +| FUNCTION | CREATE FUNCTION | Auto-commit | Not supported | Not supported | +| | ALTER FUNCTION | Auto-commit | Explicit commit | Explicit commit | +| | DROP FUNCTION | Auto-commit | Explicit commit | Explicit commit | +| ROLE | CREATE ROLE | Auto-commit | Explicit commit | Explicit commit | +| | ALTER ROLE | Auto-commit | Explicit commit | Explicit commit | +| | DROP ROLE | Auto-commit | Explicit commit | Explicit commit | +| INDEX | CREATE INDEX | Auto-commit | Explicit commit | Explicit commit | +| | ALTER INDEX | Auto-commit | Explicit commit | Explicit commit | +| | REINDEX | Auto-commit | Explicit commit | Explicit commit | +| | DROP INDEX | Auto-commit | Explicit commit | Explicit commit | +| SEQUENCE | CREATE SEQUENCE | Auto-commit | Explicit commit | Not supported in functions/stored procedures | +| | ALTER SEQUENCE | Auto-commit | Explicit commit | Not supported in functions/stored procedures | +| | DROP SEQUENCE | Auto-commit | Explicit commit | Explicit commit | +| USER | CREATE USER | Auto-commit | Explicit commit | Explicit commit | +| | ALTER USER | Auto-commit | Explicit commit | Explicit commit | +| | DROP USER | Auto-commit | Explicit commit | Explicit commit | +| COMMENT | COMMENT | Auto-commit | Explicit commit | Explicit commit | +| VACUUM | VACUUM | Auto-commit | Cannot be placed inside a transaction block | Cannot be placed inside a function | +| EXPLAIN | EXPLAIN | If the EXPLAIN statement has write behavior, e.g., EXPLAIN ANALYZE INSERT INTO tb1 VALUES(1); requires explicit commit, otherwise auto-commit | Explicit commit | If the EXPLAIN statement has write behavior, requires explicit commit, otherwise auto-commit upon function completion | +| SHOW | SHOW | Auto-commit | Explicit commit | Auto-commit upon function completion | +| ALTER SYSTEM SET | ALTER SYSTEM SET | Auto-commit | Statements not supported in transaction blocks | Statements not supported in functions/stored procedures | +| SET | SET | Auto-commit | Explicit commit | Auto-commit upon function completion | +| SHUTDOWN | SHUTDOWN | Auto-commit | Will be executed before commit, after which the current session will close | Will be executed before commit, after which the current session will close | +| ANONYMOUS BLOCK EXECUTE | ANONYMOUS BLOCK EXECUTE | If the anonymous block has write behavior, e.g., begin insert into tb1 values(1); end; / requires explicit commit, otherwise auto-commit | Explicit commit | If the anonymous block has write behavior, requires explicit commit, otherwise auto-commit upon function completion | +| LOCK TABLE | LOCK TABLE | Explicit commit | Explicit commit | Explicit commit | +| SET CONSTRAINTS | SET CONSTRAINTS | Auto-commit | Explicit commit | Auto-commit upon function completion | +| CHECKPOINT | CHECKPOINT | Auto-commit | Explicit commit | Auto-commit upon function completion | +| SHRINK | SHRINK | Auto-commit | Explicit commit | Explicit commit | +| CREATE RULE | CREATE RULE | Auto-commit | Explicit commit | Explicit commit | +| TYPE | CREATE TYPE | Auto-commit | Explicit commit | Explicit commit | +| | ALTER TYPE | Auto-commit | Explicit commit | Explicit commit | +| EVENT TRIGGER | CREATE EVENT TRIGGER | Auto-commit | Explicit commit | Explicit commit | +| | ALTER EVENT TRIGGER | Auto-commit | Explicit commit | Explicit commit | +| | DROP EVENT TRIGGER | Auto-commit | Explicit commit | Explicit commit | +| PURGE | PURGE TABLE | Auto-commit | Explicit commit | Explicit commit | +| | PURGE INDEX | Auto-commit | Explicit commit | Explicit commit | +| | PURGE RECYCLEBIN | Auto-commit | Explicit commit | Explicit commit | +| TimeCapsule Table | TimeCapsule Table | Auto-commit | Explicit commit | Explicit commit | +| PREPARE | PREPARE | Auto-commit | Explicit commit | Auto-commit upon function completion | +| EXECUTE | EXECUTE | If the EXECUTE statement has write behavior, e.g., EXECUTE (INSERT INTO tb1 VALUES(1)); requires explicit commit, otherwise auto-commit | Explicit commit | If the EXECUTE statement has write behavior, requires explicit commit, otherwise auto-commit upon function completion | +| COPY | COPY | Auto-commit | Explicit commit | Explicit commit | +| LOAD | LOAD | Auto-commit | Explicit commit | Explicit commit | +| DEALLOCATE | DEALLOCATE | Auto-commit | Explicit commit | Auto-commit upon function completion | +| | GRANT | Auto-commit | Explicit commit | Explicit commit | +| | REVOKE | Auto-commit | Explicit commit | Explicit commit | +| GRANT ROLE | GRANT ROLE | Auto-commit | Explicit commit | Explicit commit | +| REVOKE ROLE | REVOKE ROLE | Auto-commit | Explicit commit | Explicit commit | +| EVENT | CREATE EVENT | Auto-commit | Explicit commit | Explicit commit | +| | ALTER EVENT | Auto-commit | Explicit commit | Explicit commit | +| | DROP EVENT | Auto-commit | Explicit commit | Explicit commit | +| OWNED | DROP OWNED | Auto-commit | Explicit commit | Explicit commit | +| DEFAULT PRIVILEGES | ALTER DEFAULT PRIVILEGES | Auto-commit | Explicit commit | Explicit commit | +| MATERIALIZED VIEW | REFRESH MATERIALIZED VIEW | Auto-commit | Explicit commit | Explicit commit | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/transaction/transaction-control.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/transaction/transaction-control.md index 2b02f781..5a0a54df 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/transaction/transaction-control.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/transaction/transaction-control.md @@ -1,28 +1,28 @@ ---- -title: Controlling Transactions -summary: Controlling Transactions -author: zhang cuiping -date: 2023-04-07 ---- - -# Transaction Control - -A transaction is a user-defined sequence of database operations, which form an integral unit of work. - -## Starting a Transaction - -MogDB starts a transaction using **START TRANSACTION** and **BEGIN**. For details, see [START TRANSACTION](./../../sql-syntax/START-TRANSACTION.md) and [BEGIN](./../../sql-syntax/BEGIN.md). - -## Setting a Transaction - -MogDB sets a transaction using **SET TRANSACTION** or **SET LOCAL TRANSACTION**. For details, see [SET TRANSACTION](./../../sql-syntax/SET-TRANSACTION.md). - -## Committing a Transaction - -MogDB commits all operations of a transaction using **COMMIT** or **END**. For details, see [COMMIT | END](./../../sql-syntax/COMMIT-END.md). - -## Rolling Back a Transaction - -If a fault occurs during a transaction and the transaction cannot proceed, the system performs rollback to cancel all the completed database operations related to the transaction. See [ROLLBACK](./../../sql-syntax/ROLLBACK.md). - +--- +title: Controlling Transactions +summary: Controlling Transactions +author: zhang cuiping +date: 2023-04-07 +--- + +# Transaction Control + +A transaction is a user-defined sequence of database operations, which form an integral unit of work. + +## Starting a Transaction + +MogDB starts a transaction using **START TRANSACTION** and **BEGIN**. For details, see [START TRANSACTION](./../../sql-syntax/START-TRANSACTION.md) and [BEGIN](./../../sql-syntax/BEGIN.md). + +## Setting a Transaction + +MogDB sets a transaction using **SET TRANSACTION** or **SET LOCAL TRANSACTION**. For details, see [SET TRANSACTION](./../../sql-syntax/SET-TRANSACTION.md). + +## Committing a Transaction + +MogDB commits all operations of a transaction using **COMMIT** or **END**. For details, see [COMMIT | END](./../../sql-syntax/COMMIT-END.md). + +## Rolling Back a Transaction + +If a fault occurs during a transaction and the transaction cannot proceed, the system performs rollback to cancel all the completed database operations related to the transaction. See [ROLLBACK](./../../sql-syntax/ROLLBACK.md). + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If an execution request (not in a transaction block) received in the database contains multiple statements, the request is packed into a transaction. If one of the statements fails, the entire request will be rolled back. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-base-value.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-base-value.md index 86afa0d0..6ff3345e 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-base-value.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-base-value.md @@ -1,83 +1,83 @@ ---- -title: INSERT_RIGHT_REF_DEFAULT_VALUE -summary: INSERT_RIGHT_REF_DEFAULT_VALUE -author: zhang cuiping -date: 2023-04-07 ---- - -# INSERT_RIGHT_REF_DEFAULT_VALUE - -In B-compatible mode, the INSERT statement supports right-value reference. If a referenced column has a NOT NULL constraint and does not have a default value, its base value is used for calculation. If there is no basic value, the NULL value is used for calculation (array type or user-defined type). The basic values of the supported types are shown in [Table 1](#type). - -**Table 1** Type basic values - -| Type | Basic Value | Description | -| :------------------------ | :----------------------------------- | :----------------------------------------------------------- | -| int | 0 | | -| tinyint | 0 | | -| smallint | 0 | | -| integer | 0 | | -| binary_integer | 0 | | -| bigint | 0 | | -| boolean | f | | -| numeric | 0 | | -| decimal | 0 | | -| dec | 0 | | -| double precision | 0 | | -| float8 | 0 | | -| float | 0 | | -| char(n) | "" | Note: When a character string is involved in calculation, the value type is converted based on built-in rules.
The length of the value stored by a fixed-length character string is the same as the specified length, and blank characters are filled (which may vary according to the storage mode). | -| varchar(n) | "" | | -| varchar2(n) | "" | | -| nchar(n) | "" | Note: When a character string is involved in calculation, the value type is converted based on built-in rules.
The length of the value stored by a fixed-length character string is the same as the specified length, and blank characters are filled (which may vary according to the storage mode). | -| nvarchar2(n) | "" | | -| nvarchar(n) | "" | | -| date | 01-01-1970 | | -| time | 00:00:00 | | -| timestamp | Current timestamp | | -| smalldatetime | Thu Jan 01 00:00:00 1970 | | -| interval year | @ 0 | | -| interval month | @ 0 | | -| interval day | @ 0 | | -| interval hour | @ 0 | | -| interval minute | @ 0 | | -| interval second | @ 0 | | -| interval day to second | @ 0 | | -| interval day to hour | @ 0 | | -| interval day to minute | @ 0 | | -| interval hour to minute | @ 0 | | -| interval hour to second | @ 0 | | -| interval minute to second | @ 0 | | -| reltime | @ 0 | | -| abstime | Wed Dec 31 16:00:00 1969 PST | | -| money | $0.00 | | -| int4range | empty | | -| blob | | Object without data content | -| raw | | Object without data content | -| bytea | \x | | -| point | (0,0) | | -| lseg | [(0,0),(0,0)] | | -| box | (0,0),(0,0) | | -| path | ((0,0)) | | -| polygon | ((0,0)) | | -| circle | <(0,0),0> | | -| cidr | 0.0.0.0/32 | | -| inet | 0.0.0.0 | | -| macaddr | 00:00:00:00:00:00 | | -| BIT | | Object without data content | -| BIT VARYING | | Object without data content | -| UUID | 00000000-0000-0000-0000-000000000000 | | -| json | null | The data content is null. | -| jsonb | null | The data content is null. | -| int8range | empty | | -| numrange | empty | | -| tsrange | empty | | -| tstzrange | empty | | -| daterange | empty | | -| hll | \x | | -| SET | "" | | -| tsvector | | Object without data content | -| tsquery | | Object without data content | -| HASH16 | 0000000000000000 | | -| HASH32 | 00000000000000000000000000000000 | | +--- +title: INSERT_RIGHT_REF_DEFAULT_VALUE +summary: INSERT_RIGHT_REF_DEFAULT_VALUE +author: zhang cuiping +date: 2023-04-07 +--- + +# INSERT_RIGHT_REF_DEFAULT_VALUE + +In B-compatible mode, the INSERT statement supports right-value reference. If a referenced column has a NOT NULL constraint and does not have a default value, its base value is used for calculation. If there is no basic value, the NULL value is used for calculation (array type or user-defined type). The basic values of the supported types are shown in [Table 1](#type). + +**Table 1** Type basic values + +| Type | Basic Value | Description | +| :------------------------ | :----------------------------------- | :----------------------------------------------------------- | +| int | 0 | | +| tinyint | 0 | | +| smallint | 0 | | +| integer | 0 | | +| binary_integer | 0 | | +| bigint | 0 | | +| boolean | f | | +| numeric | 0 | | +| decimal | 0 | | +| dec | 0 | | +| double precision | 0 | | +| float8 | 0 | | +| float | 0 | | +| char(n) | "" | Note: When a character string is involved in calculation, the value type is converted based on built-in rules.
The length of the value stored by a fixed-length character string is the same as the specified length, and blank characters are filled (which may vary according to the storage mode). | +| varchar(n) | "" | | +| varchar2(n) | "" | | +| nchar(n) | "" | Note: When a character string is involved in calculation, the value type is converted based on built-in rules.
The length of the value stored by a fixed-length character string is the same as the specified length, and blank characters are filled (which may vary according to the storage mode). | +| nvarchar2(n) | "" | | +| nvarchar(n) | "" | | +| date | 01-01-1970 | | +| time | 00:00:00 | | +| timestamp | Current timestamp | | +| smalldatetime | Thu Jan 01 00:00:00 1970 | | +| interval year | @ 0 | | +| interval month | @ 0 | | +| interval day | @ 0 | | +| interval hour | @ 0 | | +| interval minute | @ 0 | | +| interval second | @ 0 | | +| interval day to second | @ 0 | | +| interval day to hour | @ 0 | | +| interval day to minute | @ 0 | | +| interval hour to minute | @ 0 | | +| interval hour to second | @ 0 | | +| interval minute to second | @ 0 | | +| reltime | @ 0 | | +| abstime | Wed Dec 31 16:00:00 1969 PST | | +| money | $0.00 | | +| int4range | empty | | +| blob | | Object without data content | +| raw | | Object without data content | +| bytea | \x | | +| point | (0,0) | | +| lseg | [(0,0),(0,0)] | | +| box | (0,0),(0,0) | | +| path | ((0,0)) | | +| polygon | ((0,0)) | | +| circle | <(0,0),0> | | +| cidr | 0.0.0.0/32 | | +| inet | 0.0.0.0 | | +| macaddr | 00:00:00:00:00:00 | | +| BIT | | Object without data content | +| BIT VARYING | | Object without data content | +| UUID | 00000000-0000-0000-0000-000000000000 | | +| json | null | The data content is null. | +| jsonb | null | The data content is null. | +| int8range | empty | | +| numrange | empty | | +| tsrange | empty | | +| tstzrange | empty | | +| daterange | empty | | +| hll | \x | | +| SET | "" | | +| tsvector | | Object without data content | +| tsquery | | Object without data content | +| HASH16 | 0000000000000000 | | +| HASH32 | 00000000000000000000000000000000 | | | enum | The first item | | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/functions.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/functions.md index d3f56229..81758d9d 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/functions.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/functions.md @@ -1,99 +1,99 @@ ---- -title: Functions -summary: Functions -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Functions - -## Function Type Resolution - -1. Select the functions to be considered from the **pg_proc** system catalog. If a non-schema-qualified function name was used, the functions in the current search path are considered. If a qualified function name was given, only functions in the specified schema are considered. - - If the search path finds multiple functions of different argument types, a proper function in the path is considered. - -2. Check for a function accepting exactly the input argument types. If the function exists, use it. Cases involving **unknown** will never find a match at this step. - -3. If no exact match is found, see if the function call appears to be a special type conversion request. - -4. Look for the best match. - - a. Discard candidate functions for which the input types do not match and cannot be converted (using an implicit conversion) to match. **unknown** literals are assumed to be convertible to anything for this purpose. If only one candidate remains, use it; else continue to the next step. - b. Run through all candidates and keep those with the most exact matches on input types. Domains are considered the same as their base type for this purpose. Keep all candidates if none has exact matches. If only one candidate remains, use it; else continue to the next step. - c. Run through all candidates and keep those that accept preferred types at the most positions where type conversion will be required. Keep all candidates if none accepts preferred types. If only one candidate remains, use it; else continue to the next step. - d. If any input arguments are **unknown**, check the type categories accepted at those argument positions by the remaining candidates. At each position, select the string category if any candidate accepts that category. (This bias towards string is appropriate since an unknown-type literal looks like a string.) Otherwise, if all the remaining candidates accept the same type category, select that category; otherwise fail because the correct choice cannot be deduced without more clues. Now discard candidates that do not accept the selected type category. Furthermore, if any candidate accepts a preferred type in that category, discard candidates that accept non-preferred types for that argument. Keep all candidates if none survives these tests. If only one candidate remains, use it; else continue to the next step. - e. If there are both **unknown** and known-type arguments, and all the known-type arguments have the same type, assume that the **unknown** arguments are also of that type, and check which candidates can accept that type at the **unknown**-argument positions. If exactly one candidate passes this test, use it. Otherwise, an error occurs. - -## Examples - -Example 1: Use the rounding function argument type resolution as the first example. There is only one **round** function that takes two arguments; it takes a first argument of type **numeric** and a second argument of type **integer**. So the following query automatically converts the first argument of type **integer** to **numeric**: - -```sql -mogdb=# SELECT round(4, 4); - round --------- - 4.0000 -(1 row) -``` - -That query is actually transformed by the parser to: - -```sql -mogdb=# SELECT round(CAST (4 AS numeric), 4); -``` - -Since numeric constants with decimal points are initially assigned the type **numeric**, the following query will require no type conversion and therefore might be slightly more efficient: - -```sql -mogdb=# SELECT round(4.0, 4); -``` - -Example 2: Use the substring function type resolution as the second example. There are several **substr** functions, one of which takes types **text** and **integer**. If called with a string constant of unspecified type, the system chooses the candidate function that accepts an argument of the preferred category **string** (namely of type **text**). - -```sql -mogdb=# SELECT substr('1234', 3); - substr --------- - 34 -(1 row) -``` - -If the string is declared to be of type **varchar**, as might be the case if it comes from a table, then the parser will try to convert it to become **text**: - -```sql -mogdb=# SELECT substr(varchar '1234', 3); - substr --------- - 34 -(1 row) -``` - -This is transformed by the parser to effectively become: - -```sql -mogdb=# SELECT substr(CAST (varchar '1234' AS text), 3); -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> The parser learns from the **pg_cast** catalog that **text** and **varchar** are binary-compatible, meaning that one can be passed to a function that accepts the other without doing any physical conversion. Therefore, no type conversion is really inserted in this case. - -And, if the function is called with an argument of type **integer**, the parser will try to convert that to **text**: - -```sql -mogdb=# SELECT substr(1234, 3); -substr --------- - 34 -(1 row) -``` - -This is transformed by the parser to effectively become: - -```sql -mogdb=# SELECT substr(CAST (1234 AS text), 3); - substr --------- - 34 -(1 row) -``` +--- +title: Functions +summary: Functions +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Functions + +## Function Type Resolution + +1. Select the functions to be considered from the **pg_proc** system catalog. If a non-schema-qualified function name was used, the functions in the current search path are considered. If a qualified function name was given, only functions in the specified schema are considered. + + If the search path finds multiple functions of different argument types, a proper function in the path is considered. + +2. Check for a function accepting exactly the input argument types. If the function exists, use it. Cases involving **unknown** will never find a match at this step. + +3. If no exact match is found, see if the function call appears to be a special type conversion request. + +4. Look for the best match. + + a. Discard candidate functions for which the input types do not match and cannot be converted (using an implicit conversion) to match. **unknown** literals are assumed to be convertible to anything for this purpose. If only one candidate remains, use it; else continue to the next step. + b. Run through all candidates and keep those with the most exact matches on input types. Domains are considered the same as their base type for this purpose. Keep all candidates if none has exact matches. If only one candidate remains, use it; else continue to the next step. + c. Run through all candidates and keep those that accept preferred types at the most positions where type conversion will be required. Keep all candidates if none accepts preferred types. If only one candidate remains, use it; else continue to the next step. + d. If any input arguments are **unknown**, check the type categories accepted at those argument positions by the remaining candidates. At each position, select the string category if any candidate accepts that category. (This bias towards string is appropriate since an unknown-type literal looks like a string.) Otherwise, if all the remaining candidates accept the same type category, select that category; otherwise fail because the correct choice cannot be deduced without more clues. Now discard candidates that do not accept the selected type category. Furthermore, if any candidate accepts a preferred type in that category, discard candidates that accept non-preferred types for that argument. Keep all candidates if none survives these tests. If only one candidate remains, use it; else continue to the next step. + e. If there are both **unknown** and known-type arguments, and all the known-type arguments have the same type, assume that the **unknown** arguments are also of that type, and check which candidates can accept that type at the **unknown**-argument positions. If exactly one candidate passes this test, use it. Otherwise, an error occurs. + +## Examples + +Example 1: Use the rounding function argument type resolution as the first example. There is only one **round** function that takes two arguments; it takes a first argument of type **numeric** and a second argument of type **integer**. So the following query automatically converts the first argument of type **integer** to **numeric**: + +```sql +mogdb=# SELECT round(4, 4); + round +-------- + 4.0000 +(1 row) +``` + +That query is actually transformed by the parser to: + +```sql +mogdb=# SELECT round(CAST (4 AS numeric), 4); +``` + +Since numeric constants with decimal points are initially assigned the type **numeric**, the following query will require no type conversion and therefore might be slightly more efficient: + +```sql +mogdb=# SELECT round(4.0, 4); +``` + +Example 2: Use the substring function type resolution as the second example. There are several **substr** functions, one of which takes types **text** and **integer**. If called with a string constant of unspecified type, the system chooses the candidate function that accepts an argument of the preferred category **string** (namely of type **text**). + +```sql +mogdb=# SELECT substr('1234', 3); + substr +-------- + 34 +(1 row) +``` + +If the string is declared to be of type **varchar**, as might be the case if it comes from a table, then the parser will try to convert it to become **text**: + +```sql +mogdb=# SELECT substr(varchar '1234', 3); + substr +-------- + 34 +(1 row) +``` + +This is transformed by the parser to effectively become: + +```sql +mogdb=# SELECT substr(CAST (varchar '1234' AS text), 3); +``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> The parser learns from the **pg_cast** catalog that **text** and **varchar** are binary-compatible, meaning that one can be passed to a function that accepts the other without doing any physical conversion. Therefore, no type conversion is really inserted in this case. + +And, if the function is called with an argument of type **integer**, the parser will try to convert that to **text**: + +```sql +mogdb=# SELECT substr(1234, 3); +substr +-------- + 34 +(1 row) +``` + +This is transformed by the parser to effectively become: + +```sql +mogdb=# SELECT substr(CAST (1234 AS text), 3); + substr +-------- + 34 +(1 row) +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/type-conversion-overview.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/type-conversion-overview.md index 62d635f1..f1154782 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/type-conversion-overview.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/type-conversion-overview.md @@ -1,52 +1,52 @@ ---- -title: Overview -summary: Overview -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Overview - -## Background - -SQL is a typed language. That is, every data item has an associated data type which determines its behavior and allowed usage. MogDB has an extensible type system that is more general and flexible than other SQL implementations. Hence, most type conversion behaviors in MogDB are governed by general rules. This allows the use of mixed-type expressions. - -The MogDB scanner/parser divides lexical elements into five fundamental categories: integers, floating-point numbers, strings, identifiers, and keywords. Constants of most non-numeric types are first classified as strings. The SQL language definition allows specifying type names with constant strings. For example, the query: - -```sql -MogDB=# SELECT text 'Origin' AS "label", point '(0,0)' AS "value"; - label | value ---------+------- - Origin | (0,0) -(1 row) -``` - -has two literal constants, of type **text** and **point**. If a type is not specified for a string literal, then the placeholder type **unknown** is assigned initially. - -There are four fundamental SQL constructs requiring distinct type conversion rules in MogDB parser: - -- Function calls - - Much of the SQL type system is built around a rich set of functions. Functions can have one or more arguments. Since SQL permits function overloading, the function name alone does not uniquely identify the function to be called. The parser must select the right function based on the data types of the supplied arguments. - -- Operators - - SQL allows expressions with prefix and postfix unary (one-argument) operators, as well as binary (two-argument) operators. Like functions, operators can be overloaded, so the same problem of selecting the right operator exists. - -- Value storage - - SQL **INSERT** and **UPDATE** statements place the results of expressions into a table. The expressions in the statement must be matched up with, and perhaps converted to, the types of the target columns. - -- UNION, CASE, and Related Constructs - - Since all query results from a unionized **SELECT** statement must appear in a single set of columns, the types of the results of each **SELECT** clause must be matched up and converted to a uniform set. Similarly, the result expressions of a **CASE** construct must be converted to a common type so that the **CASE** expression as a whole has a known output type. The same holds for **ARRAY** constructs, and for the **GREATEST** and **LEAST** functions. - -The system catalog pg_cast stores information about which conversions, or casts, exist between which data types, and how to perform those conversions. For details, see [PG_CAST](../../../reference-guide/system-catalogs-and-system-views/system-catalogs/PG_CAST.md). - -The return type and conversion behavior of an expression are determined during semantic analysis. Data types are divided into several basic type categories, including Boolean, **numeric**, **string**, **bitstring**, **datetime**, **timespan**, **geometric**, and **network**. Within each category there can be one or more preferred types, which are preferred when there is a choice of possible types. With careful selection of preferred types and available implicit casts, it is possible to ensure that ambiguous expressions (those with multiple candidate parsing solutions) can be resolved in a useful way. - -All type conversion rules are designed based on the following principles: - -- Implicit conversions should never have surprising or unpredictable outcomes. -- There should be no extra overhead in the parser or executor if a query does not need implicit type conversion. That is, if a query is well-formed and the types already match, then the query should execute without spending extra time in the parser and without introducing unnecessary implicit conversion calls in the query. -- Additionally, if a query usually requires an implicit conversion for a function, and if then the user defines a new function with the correct argument types, the parser should use this new function. +--- +title: Overview +summary: Overview +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Overview + +## Background + +SQL is a typed language. That is, every data item has an associated data type which determines its behavior and allowed usage. MogDB has an extensible type system that is more general and flexible than other SQL implementations. Hence, most type conversion behaviors in MogDB are governed by general rules. This allows the use of mixed-type expressions. + +The MogDB scanner/parser divides lexical elements into five fundamental categories: integers, floating-point numbers, strings, identifiers, and keywords. Constants of most non-numeric types are first classified as strings. The SQL language definition allows specifying type names with constant strings. For example, the query: + +```sql +MogDB=# SELECT text 'Origin' AS "label", point '(0,0)' AS "value"; + label | value +--------+------- + Origin | (0,0) +(1 row) +``` + +has two literal constants, of type **text** and **point**. If a type is not specified for a string literal, then the placeholder type **unknown** is assigned initially. + +There are four fundamental SQL constructs requiring distinct type conversion rules in MogDB parser: + +- Function calls + + Much of the SQL type system is built around a rich set of functions. Functions can have one or more arguments. Since SQL permits function overloading, the function name alone does not uniquely identify the function to be called. The parser must select the right function based on the data types of the supplied arguments. + +- Operators + + SQL allows expressions with prefix and postfix unary (one-argument) operators, as well as binary (two-argument) operators. Like functions, operators can be overloaded, so the same problem of selecting the right operator exists. + +- Value storage + + SQL **INSERT** and **UPDATE** statements place the results of expressions into a table. The expressions in the statement must be matched up with, and perhaps converted to, the types of the target columns. + +- UNION, CASE, and Related Constructs + + Since all query results from a unionized **SELECT** statement must appear in a single set of columns, the types of the results of each **SELECT** clause must be matched up and converted to a uniform set. Similarly, the result expressions of a **CASE** construct must be converted to a common type so that the **CASE** expression as a whole has a known output type. The same holds for **ARRAY** constructs, and for the **GREATEST** and **LEAST** functions. + +The system catalog pg_cast stores information about which conversions, or casts, exist between which data types, and how to perform those conversions. For details, see [PG_CAST](../../../reference-guide/system-catalogs-and-system-views/system-catalogs/PG_CAST.md). + +The return type and conversion behavior of an expression are determined during semantic analysis. Data types are divided into several basic type categories, including Boolean, **numeric**, **string**, **bitstring**, **datetime**, **timespan**, **geometric**, and **network**. Within each category there can be one or more preferred types, which are preferred when there is a choice of possible types. With careful selection of preferred types and available implicit casts, it is possible to ensure that ambiguous expressions (those with multiple candidate parsing solutions) can be resolved in a useful way. + +All type conversion rules are designed based on the following principles: + +- Implicit conversions should never have surprising or unpredictable outcomes. +- There should be no extra overhead in the parser or executor if a query does not need implicit type conversion. That is, if a query is well-formed and the types already match, then the query should execute without spending extra time in the parser and without introducing unnecessary implicit conversion calls in the query. +- Additionally, if a query usually requires an implicit conversion for a function, and if then the user defines a new function with the correct argument types, the parser should use this new function. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/type-conversion.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/type-conversion.md index 1ee7eaf1..41109396 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/type-conversion.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/type-conversion.md @@ -1,14 +1,14 @@ ---- -title: Type Conversion -summary: Type Conversion -author: zhang cuiping -date: 2023-04-07 ---- - -# Type Conversion - -- **[Overview](type-conversion-overview.md)** -- **[Operators](operators.md)** -- **[Functions](functions.md)** -- **[Value Storage](value-storage.md)** +--- +title: Type Conversion +summary: Type Conversion +author: zhang cuiping +date: 2023-04-07 +--- + +# Type Conversion + +- **[Overview](type-conversion-overview.md)** +- **[Operators](operators.md)** +- **[Functions](functions.md)** +- **[Value Storage](value-storage.md)** - **[UNION, CASE, and Related Constructs](union-case-and-related-constructs.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/union-case-and-related-constructs.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/union-case-and-related-constructs.md index 804a3117..d979431e 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/union-case-and-related-constructs.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/union-case-and-related-constructs.md @@ -1,205 +1,205 @@ ---- -title: UNION, CASE, and Related Constructs -summary: UNION, CASE, and Related Constructs -author: Zhang Cuiping -date: 2021-05-17 ---- - -# UNION, CASE, and Related Constructs - -SQL **UNION** constructs must match up possibly dissimilar types to become a single result set. The resolution algorithm is applied separately to each output column of a union query. The **INTERSECT** and **EXCEPT** construct resolve dissimilar types in the same way as **UNION**. The **CASE**, **ARRAY**, **VALUES**, **GREATEST** and **LEAST** constructs use the identical algorithm to match up their component expressions and select a result data type. - -## Type Resolution for UNION, CASE, and Related Constructs - -- If all inputs are of the same type, and it is not **unknown**, resolve as that type. - -- If all inputs are of type **unknown**, resolve as type **text** (the preferred type of the string category). Otherwise, **unknown** inputs are ignored. - -- If the inputs are not all of the same type category, a failure will be resulted. (Type **unknown** is not included in this case.) - -- If the inputs are all of the same type category, choose the top preferred type in that category. (Exception: The UNION operation regards the type of the first branch as the selected type.) - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > **typcategory** in the **pg_type** system catalog indicates the data type category. **typispreferred** indicates whether a type is preferred in **typcategory**. - -- Convert all inputs to the selected type. (Retain the original lengths of strings). Fail if there is not an implicit conversion from a given input to the selected type. - -- If the input contains the json, txid_snapshot, sys_refcursor, or geometry type, **UNION** cannot be performed. - -## Type Resolution for CASE and COALESCE in TD Compatibility Type - -- If all inputs are of the same type, and it is not **unknown**, resolve as that type. -- If all inputs are of type **unknown**, resolve as type **text**. -- If inputs are of the string type (including **unknown** which is resolved as type **text**) and digit type, resolve as the string type. If the inputs are not of the two types, an error will be reported. -- If the inputs are all of the same type category, choose the top preferred type in that category. -- Convert all inputs to the selected type. Fail if there is not an implicit conversion from a given input to the selected type. - -## Type Resolution for CASE in ORA Compatibility Type - -**decode(expr, search1, result1, search2, result2, …, defresult)**, that is, **case expr when search1 then result1 when search2 then result2 else defresult end;**. In ORA compatibility mode,it defines the final return value type of the entire expression as the data type of result1 or a higher-precision data type that has the same type as result1. (For example, numeric and int are both numeric data types, but numeric has higher precision and priority than int.) - -- Set the data type of result1 to the final return value type preferType, which belongs to preferCategory. -- Consider the data types of result2, result3, and defresult in sequence. If the type category is also preferCategory, that is, the type category of result1 is the same as that of result1, check whether the precision (priority) of result1 is higher than that of preferType. If yes, update preferType to a data type with a higher precision. If the data type is not preferCategory, check whether the data type can be implicitly converted to preferType. If not, an error is reported. -- Uses the data type recorded by preferType as the return value type of the expression. The expression result is implicitly converted to this data type. - -Note 1: - -To be compatible with a special case in which the character type of a super-large number is converted to the numeric type, for example, **select decode(1, 2, 2, “53465465676465454657567678676”)**, the large number exceeds the range of bigint and double. Therefore, if the type of result1 is numeric, the type of the return value is set to numeric to be compatible with this special case. - -Note 2: - -Priority of the numeric types: numeric > float8 > float4 > int8 > int4 > int2 > int1 - -Priority of the character types: text > varchar = nvarchar2 > bpchar > char - -Priority of date types: timestamptz > timestamp > smalldatetime > date > abstime > timetz > time - -Priority of date span types: interval > tinterval > reltime. - -Note 3: - -The following figure shows the supported implicit type conversions when the **set sql_beta_feature = 'a_style_coerce';** parameter is enabled in ORA compatibility mode.**\** indicates that conversion is not required, **yes** indicates that conversion is supported, and blank indicates that conversion is not supported. - -## Example - -Example 1: Use type resolution with underspecified types in a union as the first example. Here, the unknown-type literal **'b'** will be resolved to type **text**. - -```sql -MogDB=# SELECT text 'a' AS "text" UNION SELECT 'b'; - text ------- - a - b -(2 rows) -``` - -Example 2: Use type resolution in a simple union as the second example. The literal **1.2** is of type **numeric**, and the **integer** value **1** can be cast implicitly to **numeric**, so that type is used. - -```sql -MogDB=# SELECT 1.2 AS "numeric" UNION SELECT 1; - numeric ---------- - 1 - 1.2 -(2 rows) -``` - -Example 3: Use type resolution in a transposed union as the third example. Since type **real** cannot be implicitly cast to **integer**, but **integer** can be implicitly cast to **real**, the union result type is resolved as **real**. - -```sql -MogDB=# SELECT 1 AS "real" UNION SELECT CAST('2.2' AS REAL); - real ------- - 1 - 2.2 -(2 rows) -``` - -Example 4: In the **TD** type, if input parameters for **COALESCE** are of **int** and **varchar** types, resolve as type **varchar**. In the **A** type, an error is reported. - -```sql --- In the A type, create the a_1 database compatible with A. -MogDB=# CREATE DATABASE a_1 dbcompatibility = 'A'; - --- Switch to the a_1 database. -MogDB=# \c a_1 - --- Create the t1 table. -a_1=# CREATE TABLE t1(a int, b varchar(10)); - --- Show the execution plan of a statement for querying the types int and varchar of input parameters for COALESCE. -a_1=# EXPLAIN SELECT coalesce(a, b) FROM t1; -ERROR: COALESCE types integer and character varying cannot be matched -LINE 1: EXPLAIN SELECT coalesce(a, b) FROM t1; - ^ -CONTEXT: referenced column: coalesce - --- Delete the table. -a_1=# DROP TABLE t1; - --- Switch to the MogDB database: -a_1=# \c postgres - --- In the TD type, create the td_1 database compatible with Teradata. -MogDB=# CREATE DATABASE td_1 dbcompatibility = 'C'; - --- Switch to the td_1 database. -MogDB=# \c td_1 - --- Create the t2 table. -td_1=# CREATE TABLE t2(a int, b varchar(10)); - --- Show the execution plan of a statement for querying the types int and varchar of input parameters for COALESCE. -td_1=# EXPLAIN VERBOSE select coalesce(a, b) from t2; - QUERY PLAN ---------------------------------------------------------------------------------------- - Data Node Scan (cost=0.00..0.00 rows=0 width=0) - Output: (COALESCE((t2.a)::character varying, t2.b)) - Node/s: All dbnodes - Remote query: SELECT COALESCE(a::character varying, b) AS "coalesce" FROM public.t2 -(4 rows) - --- Delete the table. -td_1=# DROP TABLE t2; - --- Switch to the MogDB database: -td_1=# \c postgres - --- Delete databases in A and TD types. -MogDB=# DROP DATABASE a_1; -MogDB=# DROP DATABASE td_1; -``` - -Example 5: In ORA mode, set the final return value type of the expression to the data type of result1 or a higher-precision data type that is of the same type as result1. - -```sql --- In the ORA type, create the ora_1 database compatible with ORA. -MogDB=# CREATE DATABASE ora_1 dbcompatibility = 'A'; - --- Switch to the ora_1 database. -MogDB=# \c ora_1 - --- Enable the Decode compatibility parameter. -set sql_beta_feature='a_style_coerce'; - --- Create the t1 table. -ora_1=# CREATE TABLE t1(c_int int, c_float8 float8, c_char char(10), c_text text, c_date date); - --- Insert data. -ora_1=# INSERT INTO t1 VALUES(1, 2, '3', '4', date '12-10-2010'); - --- The type of result1 is char, and the type of defresult is text. The precision of text is higher, and the type of the return value is changed from char to text. -ora_1=# SELECT decode(1, 2, c_char, c_text) AS result, pg_typeof(result) FROM t1; - result | pg_typeof ---------+----------- - 4 | text -(1 row) - --- The type of result1 is int, which is a numeric type. The type of the return value is set to numeric. -ora_1=# SELECT decode(1, 2, c_int, c_float8) AS result, pg_typeof(result) FROM t1; - result | pg_typeof ---------+----------- - 2 | numeric -(1 row) - --- The implicit conversion from the defresult data type to the result1 data type does not exist. An error is reported. -ora_1=# SELECT decode(1, 2, c_int, c_date) FROM t1; -ERROR: CASE types integer and timestamp without time zone cannot be matched -LINE 1: SELECT decode(1, 2, c_int, c_date) FROM t1; - ^ -CONTEXT: referenced column: c_date - --- Disable the Decode compatibility parameter. -set sql_beta_feature='none'; - --- Delete the table. -ora_1=# DROP TABLE t1; -DROP TABLE - --- Switch to the postgres database: -ora_1=# \c postgres - --- Delete the database in ORA mode. -MogDB=# DROP DATABASE ora_1; -DROP DATABASE +--- +title: UNION, CASE, and Related Constructs +summary: UNION, CASE, and Related Constructs +author: Zhang Cuiping +date: 2021-05-17 +--- + +# UNION, CASE, and Related Constructs + +SQL **UNION** constructs must match up possibly dissimilar types to become a single result set. The resolution algorithm is applied separately to each output column of a union query. The **INTERSECT** and **EXCEPT** construct resolve dissimilar types in the same way as **UNION**. The **CASE**, **ARRAY**, **VALUES**, **GREATEST** and **LEAST** constructs use the identical algorithm to match up their component expressions and select a result data type. + +## Type Resolution for UNION, CASE, and Related Constructs + +- If all inputs are of the same type, and it is not **unknown**, resolve as that type. + +- If all inputs are of type **unknown**, resolve as type **text** (the preferred type of the string category). Otherwise, **unknown** inputs are ignored. + +- If the inputs are not all of the same type category, a failure will be resulted. (Type **unknown** is not included in this case.) + +- If the inputs are all of the same type category, choose the top preferred type in that category. (Exception: The UNION operation regards the type of the first branch as the selected type.) + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > **typcategory** in the **pg_type** system catalog indicates the data type category. **typispreferred** indicates whether a type is preferred in **typcategory**. + +- Convert all inputs to the selected type. (Retain the original lengths of strings). Fail if there is not an implicit conversion from a given input to the selected type. + +- If the input contains the json, txid_snapshot, sys_refcursor, or geometry type, **UNION** cannot be performed. + +## Type Resolution for CASE and COALESCE in TD Compatibility Type + +- If all inputs are of the same type, and it is not **unknown**, resolve as that type. +- If all inputs are of type **unknown**, resolve as type **text**. +- If inputs are of the string type (including **unknown** which is resolved as type **text**) and digit type, resolve as the string type. If the inputs are not of the two types, an error will be reported. +- If the inputs are all of the same type category, choose the top preferred type in that category. +- Convert all inputs to the selected type. Fail if there is not an implicit conversion from a given input to the selected type. + +## Type Resolution for CASE in ORA Compatibility Type + +**decode(expr, search1, result1, search2, result2, …, defresult)**, that is, **case expr when search1 then result1 when search2 then result2 else defresult end;**. In ORA compatibility mode,it defines the final return value type of the entire expression as the data type of result1 or a higher-precision data type that has the same type as result1. (For example, numeric and int are both numeric data types, but numeric has higher precision and priority than int.) + +- Set the data type of result1 to the final return value type preferType, which belongs to preferCategory. +- Consider the data types of result2, result3, and defresult in sequence. If the type category is also preferCategory, that is, the type category of result1 is the same as that of result1, check whether the precision (priority) of result1 is higher than that of preferType. If yes, update preferType to a data type with a higher precision. If the data type is not preferCategory, check whether the data type can be implicitly converted to preferType. If not, an error is reported. +- Uses the data type recorded by preferType as the return value type of the expression. The expression result is implicitly converted to this data type. + +Note 1: + +To be compatible with a special case in which the character type of a super-large number is converted to the numeric type, for example, **select decode(1, 2, 2, “53465465676465454657567678676”)**, the large number exceeds the range of bigint and double. Therefore, if the type of result1 is numeric, the type of the return value is set to numeric to be compatible with this special case. + +Note 2: + +Priority of the numeric types: numeric > float8 > float4 > int8 > int4 > int2 > int1 + +Priority of the character types: text > varchar = nvarchar2 > bpchar > char + +Priority of date types: timestamptz > timestamp > smalldatetime > date > abstime > timetz > time + +Priority of date span types: interval > tinterval > reltime. + +Note 3: + +The following figure shows the supported implicit type conversions when the **set sql_beta_feature = 'a_style_coerce';** parameter is enabled in ORA compatibility mode.**\** indicates that conversion is not required, **yes** indicates that conversion is supported, and blank indicates that conversion is not supported. + +## Example + +Example 1: Use type resolution with underspecified types in a union as the first example. Here, the unknown-type literal **'b'** will be resolved to type **text**. + +```sql +MogDB=# SELECT text 'a' AS "text" UNION SELECT 'b'; + text +------ + a + b +(2 rows) +``` + +Example 2: Use type resolution in a simple union as the second example. The literal **1.2** is of type **numeric**, and the **integer** value **1** can be cast implicitly to **numeric**, so that type is used. + +```sql +MogDB=# SELECT 1.2 AS "numeric" UNION SELECT 1; + numeric +--------- + 1 + 1.2 +(2 rows) +``` + +Example 3: Use type resolution in a transposed union as the third example. Since type **real** cannot be implicitly cast to **integer**, but **integer** can be implicitly cast to **real**, the union result type is resolved as **real**. + +```sql +MogDB=# SELECT 1 AS "real" UNION SELECT CAST('2.2' AS REAL); + real +------ + 1 + 2.2 +(2 rows) +``` + +Example 4: In the **TD** type, if input parameters for **COALESCE** are of **int** and **varchar** types, resolve as type **varchar**. In the **A** type, an error is reported. + +```sql +-- In the A type, create the a_1 database compatible with A. +MogDB=# CREATE DATABASE a_1 dbcompatibility = 'A'; + +-- Switch to the a_1 database. +MogDB=# \c a_1 + +-- Create the t1 table. +a_1=# CREATE TABLE t1(a int, b varchar(10)); + +-- Show the execution plan of a statement for querying the types int and varchar of input parameters for COALESCE. +a_1=# EXPLAIN SELECT coalesce(a, b) FROM t1; +ERROR: COALESCE types integer and character varying cannot be matched +LINE 1: EXPLAIN SELECT coalesce(a, b) FROM t1; + ^ +CONTEXT: referenced column: coalesce + +-- Delete the table. +a_1=# DROP TABLE t1; + +-- Switch to the MogDB database: +a_1=# \c postgres + +-- In the TD type, create the td_1 database compatible with Teradata. +MogDB=# CREATE DATABASE td_1 dbcompatibility = 'C'; + +-- Switch to the td_1 database. +MogDB=# \c td_1 + +-- Create the t2 table. +td_1=# CREATE TABLE t2(a int, b varchar(10)); + +-- Show the execution plan of a statement for querying the types int and varchar of input parameters for COALESCE. +td_1=# EXPLAIN VERBOSE select coalesce(a, b) from t2; + QUERY PLAN +--------------------------------------------------------------------------------------- + Data Node Scan (cost=0.00..0.00 rows=0 width=0) + Output: (COALESCE((t2.a)::character varying, t2.b)) + Node/s: All dbnodes + Remote query: SELECT COALESCE(a::character varying, b) AS "coalesce" FROM public.t2 +(4 rows) + +-- Delete the table. +td_1=# DROP TABLE t2; + +-- Switch to the MogDB database: +td_1=# \c postgres + +-- Delete databases in A and TD types. +MogDB=# DROP DATABASE a_1; +MogDB=# DROP DATABASE td_1; +``` + +Example 5: In ORA mode, set the final return value type of the expression to the data type of result1 or a higher-precision data type that is of the same type as result1. + +```sql +-- In the ORA type, create the ora_1 database compatible with ORA. +MogDB=# CREATE DATABASE ora_1 dbcompatibility = 'A'; + +-- Switch to the ora_1 database. +MogDB=# \c ora_1 + +-- Enable the Decode compatibility parameter. +set sql_beta_feature='a_style_coerce'; + +-- Create the t1 table. +ora_1=# CREATE TABLE t1(c_int int, c_float8 float8, c_char char(10), c_text text, c_date date); + +-- Insert data. +ora_1=# INSERT INTO t1 VALUES(1, 2, '3', '4', date '12-10-2010'); + +-- The type of result1 is char, and the type of defresult is text. The precision of text is higher, and the type of the return value is changed from char to text. +ora_1=# SELECT decode(1, 2, c_char, c_text) AS result, pg_typeof(result) FROM t1; + result | pg_typeof +--------+----------- + 4 | text +(1 row) + +-- The type of result1 is int, which is a numeric type. The type of the return value is set to numeric. +ora_1=# SELECT decode(1, 2, c_int, c_float8) AS result, pg_typeof(result) FROM t1; + result | pg_typeof +--------+----------- + 2 | numeric +(1 row) + +-- The implicit conversion from the defresult data type to the result1 data type does not exist. An error is reported. +ora_1=# SELECT decode(1, 2, c_int, c_date) FROM t1; +ERROR: CASE types integer and timestamp without time zone cannot be matched +LINE 1: SELECT decode(1, 2, c_int, c_date) FROM t1; + ^ +CONTEXT: referenced column: c_date + +-- Disable the Decode compatibility parameter. +set sql_beta_feature='none'; + +-- Delete the table. +ora_1=# DROP TABLE t1; +DROP TABLE + +-- Switch to the postgres database: +ora_1=# \c postgres + +-- Delete the database in ORA mode. +MogDB=# DROP DATABASE ora_1; +DROP DATABASE ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/value-storage.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/value-storage.md index 0c6f13c0..38adf438 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/value-storage.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-reference/type-conversion/value-storage.md @@ -1,36 +1,36 @@ ---- -title: Value Storage -summary: Value Storage -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Value Storage - -## Value Storage Type Resolution - -1. Search for an exact match with the target column. -2. Try to convert the expression to the target type. This will succeed if there is a registered cast between the two types. If the expression is an unknown-type literal, the content of the literal string will be fed to the input conversion routine for the target type. -3. Check to see if there is a sizing cast for the target type. A sizing cast is a cast from that type to itself. If one is found in the **pg_cast** catalog, apply it to the expression before storing into the destination column. The implementation function for such a cast always takes an extra parameter of type **integer**. The parameter receives the destination column's **atttypmod** value (typically its declared length, although the interpretation of **atttypmod** varies for different data types), and may take a third Boolean parameter that says whether the cast is explicit or implicit. The cast function is responsible for applying any length-dependent semantics such as size checking or truncation. - -## Example - -Use the **character** storage type conversion as an example. For a target column declared as **character(20)** the following statement shows that the stored value is sized correctly: - -```sql -mogdb=# CREATE TABLE tpcds.value_storage_t1 ( - VS_COL1 CHARACTER(20) -); -mogdb=# INSERT INTO tpcds.value_storage_t1 VALUES('abcdef'); -mogdb=# SELECT VS_COL1, octet_length(VS_COL1) FROM tpcds.value_storage_t1; - vs_col1 | octet_length -----------------------+-------------- - abcdef | 20 -(1 row) -) - -mogdb=# DROP TABLE tpcds.value_storage_t1; -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> What has happened here is that the two unknown literals are resolved to **text** by default, allowing the **||** operator to be resolved as **text** concatenation. Then the **text** result of the operator is converted to **bpchar** ("blank-padded char", the internal name of the **character** data type) to match the target column type. Since the conversion from **text** to **bpchar** is binary-coercible, this conversion does not insert any real function call. Finally, the sizing function **bpchar(bpchar, integer, Boolean)** is found in the system catalog and used for the operator's result and the stored column length. This type-specific function performs the required length check and addition of padding spaces. +--- +title: Value Storage +summary: Value Storage +author: Zhang Cuiping +date: 2021-05-17 +--- + +# Value Storage + +## Value Storage Type Resolution + +1. Search for an exact match with the target column. +2. Try to convert the expression to the target type. This will succeed if there is a registered cast between the two types. If the expression is an unknown-type literal, the content of the literal string will be fed to the input conversion routine for the target type. +3. Check to see if there is a sizing cast for the target type. A sizing cast is a cast from that type to itself. If one is found in the **pg_cast** catalog, apply it to the expression before storing into the destination column. The implementation function for such a cast always takes an extra parameter of type **integer**. The parameter receives the destination column's **atttypmod** value (typically its declared length, although the interpretation of **atttypmod** varies for different data types), and may take a third Boolean parameter that says whether the cast is explicit or implicit. The cast function is responsible for applying any length-dependent semantics such as size checking or truncation. + +## Example + +Use the **character** storage type conversion as an example. For a target column declared as **character(20)** the following statement shows that the stored value is sized correctly: + +```sql +mogdb=# CREATE TABLE tpcds.value_storage_t1 ( + VS_COL1 CHARACTER(20) +); +mogdb=# INSERT INTO tpcds.value_storage_t1 VALUES('abcdef'); +mogdb=# SELECT VS_COL1, octet_length(VS_COL1) FROM tpcds.value_storage_t1; + vs_col1 | octet_length +----------------------+-------------- + abcdef | 20 +(1 row) +) + +mogdb=# DROP TABLE tpcds.value_storage_t1; +``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> What has happened here is that the two unknown literals are resolved to **text** by default, allowing the **||** operator to be resolved as **text** concatenation. Then the **text** result of the operator is converted to **bpchar** ("blank-padded char", the internal name of the **character** data type) to match the target column type. Since the conversion from **text** to **bpchar** is binary-coercible, this conversion does not insert any real function call. Finally, the sizing function **bpchar(bpchar, integer, Boolean)** is found in the system catalog and used for the operator's result and the stored column length. This type-specific function performs the required length check and addition of padding spaces. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/ALTER-EVENT-TRIGGER.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/ALTER-EVENT-TRIGGER.md index 722b6a37..9ba54d18 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/ALTER-EVENT-TRIGGER.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/ALTER-EVENT-TRIGGER.md @@ -1,56 +1,56 @@ ---- -title: ALTER EVENT TRIGGER -summary: ALTER EVENT TRIGGER -author: zhang cuiping -date: 2023-04-07 ---- - -# ALTER EVENT TRIGGER - -## Function - -ALTER EVENT TRIGGER modifies an event trigger. - -## Precautions - -Only the system administrator or super user has the permission to modify event triggers. - -## Syntax - -```ebnf+diagram -AlterEventTrigger ::= ALTER EVENT TRIGGER name DISABLE -``` - -```ebnf+diagram -AlterEventTrigger ::= ALTER EVENT TRIGGER name ENABLE [ REPLICA | ALWAYS ] -``` - -```ebnf+diagram -AlterEventTrigger ::= ALTER EVENT TRIGGER name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } -``` - -```ebnf+diagram -AlterEventTrigger ::= ALTER EVENT TRIGGER name RENAME TO new_name -``` - -## Parameter Description - -- **name** - - Specifies the name of the event trigger to be modified. - - Value range: all existing event triggers. - -- **new_name** - - Specifies the new name after modification. - - Value range: strings that comply with the identifier naming convention. A value contains a maximum of 63 characters and cannot be the same as other event triggers on the same table. - -## Examples - -For details, see examples in [CREATE EVENT TRIGGER](CREATE-EVENT-TRIGGER.md). - -## Helpful Links - +--- +title: ALTER EVENT TRIGGER +summary: ALTER EVENT TRIGGER +author: zhang cuiping +date: 2023-04-07 +--- + +# ALTER EVENT TRIGGER + +## Function + +ALTER EVENT TRIGGER modifies an event trigger. + +## Precautions + +Only the system administrator or super user has the permission to modify event triggers. + +## Syntax + +```ebnf+diagram +AlterEventTrigger ::= ALTER EVENT TRIGGER name DISABLE +``` + +```ebnf+diagram +AlterEventTrigger ::= ALTER EVENT TRIGGER name ENABLE [ REPLICA | ALWAYS ] +``` + +```ebnf+diagram +AlterEventTrigger ::= ALTER EVENT TRIGGER name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +``` + +```ebnf+diagram +AlterEventTrigger ::= ALTER EVENT TRIGGER name RENAME TO new_name +``` + +## Parameter Description + +- **name** + + Specifies the name of the event trigger to be modified. + + Value range: all existing event triggers. + +- **new_name** + + Specifies the new name after modification. + + Value range: strings that comply with the identifier naming convention. A value contains a maximum of 63 characters and cannot be the same as other event triggers on the same table. + +## Examples + +For details, see examples in [CREATE EVENT TRIGGER](CREATE-EVENT-TRIGGER.md). + +## Helpful Links + [CREATE EVENT TRIGGER](CREATE-EVENT-TRIGGER.md), [DROP EVENT TRIGGER](DROP-EVENT-TRIGGER.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/ALTER-FOREIGN-DATA-WRAPPER.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/ALTER-FOREIGN-DATA-WRAPPER.md index fb8c5bb3..04c74801 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/ALTER-FOREIGN-DATA-WRAPPER.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/ALTER-FOREIGN-DATA-WRAPPER.md @@ -1,66 +1,66 @@ ---- -title: ALTER FOREIGN DATA WRAPPER -summary: ALTER FOREIGN DATA WRAPPER -author: zhang cuiping -date: 2023-04-07 ---- - -# ALTER FOREIGN DATA WRAPPER - -## Function Description - -Modifies the definition of a foreign data wrapper (FDW). - -## Syntax - -```ebnf+diagram -AlterForeignDataWrapper ::= ALTER FOREIGN DATA WRAPPER name - [ HANDLER handler_function | NO HANDLER ] - [ VALIDATOR validator_function | NO VALIDATOR ] - [ OPTIONS ( [ ADD | SET | DROP ] option ['value'] [,...] ) ] -``` - -## Parameter Description - -- **name** - - Specifies the name of an FDW to be modified. - -- **HANDLER handler_function** - - Specifies a new handler function for an FDW. - -- **NO HANDLER** - - Specifies that an FDW no longer has a handler function. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > - > Foreign tables that use FDWs without handler functions cannot be accessed. - -- **VALIDATOR validator_function** - - Specifies a new validator function for an FDW. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > - > After a validator function is modified, options for an FDW, server, and user mapping may become invalid. Before using the FDW, the user should ensure that these options are correct. - -- **NO VALIDATOR** - - Specifies that the FDW no longer has a validator function. - -- **OPTIONS ( [ ADD | SET | DROP ] option ['value'] [,…] )** - - Specifies options to be modified (added, set, or dropped) for the FDW. If the operation is not explicitly specified, it is assumed that the operation is ADD. The option name must be unique. Use the FDW's validator function (if any) to validate the name and value. - -## Examples - -```sql ---Create an FDW named dbi. -MogDB=# CREATE FOREIGN DATA WRAPPER dbi OPTIONS (debug 'true'); ---Modify dbi: Add the foo option and delete the debug option. -MogDB=# ALTER FOREIGN DATA WRAPPER dbi OPTIONS (ADD foo '1', DROP debug); ---Change the dbi validator to myvalidator. -MogDB=# ALTER FOREIGN DATA WRAPPER dbi VALIDATOR file_fdw_validator; +--- +title: ALTER FOREIGN DATA WRAPPER +summary: ALTER FOREIGN DATA WRAPPER +author: zhang cuiping +date: 2023-04-07 +--- + +# ALTER FOREIGN DATA WRAPPER + +## Function Description + +Modifies the definition of a foreign data wrapper (FDW). + +## Syntax + +```ebnf+diagram +AlterForeignDataWrapper ::= ALTER FOREIGN DATA WRAPPER name + [ HANDLER handler_function | NO HANDLER ] + [ VALIDATOR validator_function | NO VALIDATOR ] + [ OPTIONS ( [ ADD | SET | DROP ] option ['value'] [,...] ) ] +``` + +## Parameter Description + +- **name** + + Specifies the name of an FDW to be modified. + +- **HANDLER handler_function** + + Specifies a new handler function for an FDW. + +- **NO HANDLER** + + Specifies that an FDW no longer has a handler function. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** + > + > Foreign tables that use FDWs without handler functions cannot be accessed. + +- **VALIDATOR validator_function** + + Specifies a new validator function for an FDW. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** + > + > After a validator function is modified, options for an FDW, server, and user mapping may become invalid. Before using the FDW, the user should ensure that these options are correct. + +- **NO VALIDATOR** + + Specifies that the FDW no longer has a validator function. + +- **OPTIONS ( [ ADD | SET | DROP ] option ['value'] [,…] )** + + Specifies options to be modified (added, set, or dropped) for the FDW. If the operation is not explicitly specified, it is assumed that the operation is ADD. The option name must be unique. Use the FDW's validator function (if any) to validate the name and value. + +## Examples + +```sql +--Create an FDW named dbi. +MogDB=# CREATE FOREIGN DATA WRAPPER dbi OPTIONS (debug 'true'); +--Modify dbi: Add the foo option and delete the debug option. +MogDB=# ALTER FOREIGN DATA WRAPPER dbi OPTIONS (ADD foo '1', DROP debug); +--Change the dbi validator to myvalidator. +MogDB=# ALTER FOREIGN DATA WRAPPER dbi VALIDATOR file_fdw_validator; ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/ALTER-OPERATOR.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/ALTER-OPERATOR.md index eebbf516..51be844c 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/ALTER-OPERATOR.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/ALTER-OPERATOR.md @@ -1,59 +1,59 @@ ---- -title: ALTER OPERATOR -summary: ALTER OPERATOR -author: zhang cuiping -date: 2023-04-07 ---- - -# ALTER OPERATOR - -## Function - -ALTER OPERATOR modifies the definition of an operator. - -## Precautions - -ALTER OPERATOR changes the definition of an operator. Currently, the only function available is to change the owner of the operator. - -To use ALTER OPERATOR, you must be the owner of the operator. To modify the owner, you must also be a direct or indirect member of the new owning role, and that member must have CREATE permission on the operator's schema. (These restrictions force the owner to do nothing that cannot be done by deleting and recreating the operator. However, a user with the SYSADMIN permission can modify the ownership of any operator in any way.) - -## Syntax - -```ebnf+diagram -AlterOperator ::= ALTER OPERATOR name ( { left_type | NONE } , { right_type | NONE } ) OWNER TO new_owner -ALTER OPERATOR name ( { left_type | NONE } , { right_type | NONE } ) SET SCHEMA new_schema -``` - -## Parameter Description - -- **name** - - Name of an existing operator. - -- **left_type** - - Data type of the left operand for the operator; if there is no left operand, write NONE. - -- **right_type** - - Data type of the right operand for the operator; if there is no right operand, write NONE. - -- **new_owner** - - New owner of the operator. - -- **new_schema** - - New schema name of the operator. - -## Example - -Change a user-defined operator for text a @@ b: - -```sql -ALTER OPERATOR @@ (text, text) OWNER TO joe; -``` - -## Compatibility - +--- +title: ALTER OPERATOR +summary: ALTER OPERATOR +author: zhang cuiping +date: 2023-04-07 +--- + +# ALTER OPERATOR + +## Function + +ALTER OPERATOR modifies the definition of an operator. + +## Precautions + +ALTER OPERATOR changes the definition of an operator. Currently, the only function available is to change the owner of the operator. + +To use ALTER OPERATOR, you must be the owner of the operator. To modify the owner, you must also be a direct or indirect member of the new owning role, and that member must have CREATE permission on the operator's schema. (These restrictions force the owner to do nothing that cannot be done by deleting and recreating the operator. However, a user with the SYSADMIN permission can modify the ownership of any operator in any way.) + +## Syntax + +```ebnf+diagram +AlterOperator ::= ALTER OPERATOR name ( { left_type | NONE } , { right_type | NONE } ) OWNER TO new_owner +ALTER OPERATOR name ( { left_type | NONE } , { right_type | NONE } ) SET SCHEMA new_schema +``` + +## Parameter Description + +- **name** + + Name of an existing operator. + +- **left_type** + + Data type of the left operand for the operator; if there is no left operand, write NONE. + +- **right_type** + + Data type of the right operand for the operator; if there is no right operand, write NONE. + +- **new_owner** + + New owner of the operator. + +- **new_schema** + + New schema name of the operator. + +## Example + +Change a user-defined operator for text a @@ b: + +```sql +ALTER OPERATOR @@ (text, text) OWNER TO joe; +``` + +## Compatibility + The SQL standard does not contain the ALTER OPERATOR statement. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-EVENT.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-EVENT.md index c6492943..a7446116 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-EVENT.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-EVENT.md @@ -1,102 +1,102 @@ ---- -title: CREATE EVENT -summary: CREATE EVENT -author: zhang cuiping -date: 2023-04-07 ---- - -# CREATE EVENT - -## Function - -CREATE EVENT creates a scheduled event. - -## Precautions - -- Operations related to scheduled events are supported only when **sql_compatibility** is set to **'B'**. -- A user without the sysadmin permission must obtain the permission from the user who has the sysadmin permission to create, modify or delete the scheduled event. The operation permissions of the scheduled event are the same as those of creating scheduled events for the advanced package **DBE_SCHEDULER**. -- Currently, the interval expression of a scheduled event is compatible with the syntax of floating-point number, for example, interval 0.5 minutes. However, the floating-point number is rounded up during calculation. Therefore, you are not advised to use the floating-point number for the interval. -- Scheduled events with the same name are not supported in the same database. -- The statements to be executed in a scheduled event are any SQL statements except security-related operations. However, some statements with restrictions fail to be executed. For example, a database cannot be created by using composite statements. -- The security-related operations are as follows. - - Use encryption functions. - - Create and set users and groups. - - Connect to a database. - - Encrypt a function. -- The definer fails to be specified for a scheduled event in the following scenarios: - - The user who operates the scheduled event does not have the sysadmin permission. - - If the current user is inconsistent with the specified definer: - - An initial user is specified as the definer. - - A private user, O&M administrator, or monitoring administrator is specified as the definer. - - The parameter **enableSeparationOfDuty** is set to **on** to enable the separation of duties. - -## Syntax - -```ebnf+diagram -CreateEvent ::= CREATE - [DEFINER = user] -EVENT - [IF NOT EXISTS] - event_name - [ON SCHEDULE schedule] - [ON COMPLETION [NOT] PRESERVE] - [ENABLE | DISABLE | DISABLE ON SLAVE] - [COMMENT 'string'] - [DO event_body] -``` - -```ebnf+diagram -schedule ::= { - AT timestamp [INTERVAL interval] [,...] - | EVERY interval - [STARTS timestamp [ INTERVAL interval] [,...]] - [ENDS timestamp [ INTERVAL interval] [,...]] -} -``` - -```ebnf+diagram -interval ::= - quantity {YEAR | MONTH | DAY | HOUR | MINUTE | SECOND | - YEAR TO MONTH | DAY TO HOUR | DAY TO MINUTE | - DAY TO SECOND | HOUR TO MINUTE | HOUR TO SECOND | - MINUTE TO SECOND} -``` - -## Parameter Description - -- definer - - Specifies the permission for the scheduled event statement to be executed during execution. By default, the permission of the user who creates the scheduled event is used. When definer is specified, the permission of the specified user is used. - - Only users with the sysadmin permission can specify the definer. - -- ON COMPLETION [NOT] PRESERVE - - Once a transaction is complete, the scheduled event is deleted from the system catalog immediately by default. You can overwrite the default behavior by setting **ON COMPLETION PRESERVE**. - -- ENABLE | DISABLE | DISABLE ON SLAVE - - The scheduled event is in the **ENABLE** state by default after it is created. That is, the statement to be executed is executed immediately at the specified time. You can use the keyword **DISABLE** to change the **ENABLE** state. The performance of **DISABLE ON SLAVE** is the same as that of **DISABLE**. - -- COMMENT 'string' - - You can add comments to the scheduled event. The comments can be viewed in the **GS_JOB_ATTRIBUTE** table. - -- event_body - - Specifies the statement to be executed for a scheduled event. - -## Examples - -``` -MogDB=# CREATE TABLE t_ev(num int); - -MogDB=# CREATE EVENT IF NOT EXISTS event_e1 ON SCHEDULE AT sysdate + interval 5 second + interval 33 minute DISABLE DO insert into t_ev values(0); - -MogDB=# CREATE EVENT IF NOT EXISTS event_e1 ON SCHEDULE EVERY 1 minute DO insert into t_ev values(1); -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - If a scheduled event fails to be executed after being created, you can view the failure cause in the **SHOW EVENTS** or **PG_JOB** table. +--- +title: CREATE EVENT +summary: CREATE EVENT +author: zhang cuiping +date: 2023-04-07 +--- + +# CREATE EVENT + +## Function + +CREATE EVENT creates a scheduled event. + +## Precautions + +- Operations related to scheduled events are supported only when **sql_compatibility** is set to **'B'**. +- A user without the sysadmin permission must obtain the permission from the user who has the sysadmin permission to create, modify or delete the scheduled event. The operation permissions of the scheduled event are the same as those of creating scheduled events for the advanced package **DBE_SCHEDULER**. +- Currently, the interval expression of a scheduled event is compatible with the syntax of floating-point number, for example, interval 0.5 minutes. However, the floating-point number is rounded up during calculation. Therefore, you are not advised to use the floating-point number for the interval. +- Scheduled events with the same name are not supported in the same database. +- The statements to be executed in a scheduled event are any SQL statements except security-related operations. However, some statements with restrictions fail to be executed. For example, a database cannot be created by using composite statements. +- The security-related operations are as follows. + - Use encryption functions. + - Create and set users and groups. + - Connect to a database. + - Encrypt a function. +- The definer fails to be specified for a scheduled event in the following scenarios: + - The user who operates the scheduled event does not have the sysadmin permission. + - If the current user is inconsistent with the specified definer: + - An initial user is specified as the definer. + - A private user, O&M administrator, or monitoring administrator is specified as the definer. + - The parameter **enableSeparationOfDuty** is set to **on** to enable the separation of duties. + +## Syntax + +```ebnf+diagram +CreateEvent ::= CREATE + [DEFINER = user] +EVENT + [IF NOT EXISTS] + event_name + [ON SCHEDULE schedule] + [ON COMPLETION [NOT] PRESERVE] + [ENABLE | DISABLE | DISABLE ON SLAVE] + [COMMENT 'string'] + [DO event_body] +``` + +```ebnf+diagram +schedule ::= { + AT timestamp [INTERVAL interval] [,...] + | EVERY interval + [STARTS timestamp [ INTERVAL interval] [,...]] + [ENDS timestamp [ INTERVAL interval] [,...]] +} +``` + +```ebnf+diagram +interval ::= + quantity {YEAR | MONTH | DAY | HOUR | MINUTE | SECOND | + YEAR TO MONTH | DAY TO HOUR | DAY TO MINUTE | + DAY TO SECOND | HOUR TO MINUTE | HOUR TO SECOND | + MINUTE TO SECOND} +``` + +## Parameter Description + +- definer + + Specifies the permission for the scheduled event statement to be executed during execution. By default, the permission of the user who creates the scheduled event is used. When definer is specified, the permission of the specified user is used. + + Only users with the sysadmin permission can specify the definer. + +- ON COMPLETION [NOT] PRESERVE + + Once a transaction is complete, the scheduled event is deleted from the system catalog immediately by default. You can overwrite the default behavior by setting **ON COMPLETION PRESERVE**. + +- ENABLE | DISABLE | DISABLE ON SLAVE + + The scheduled event is in the **ENABLE** state by default after it is created. That is, the statement to be executed is executed immediately at the specified time. You can use the keyword **DISABLE** to change the **ENABLE** state. The performance of **DISABLE ON SLAVE** is the same as that of **DISABLE**. + +- COMMENT 'string' + + You can add comments to the scheduled event. The comments can be viewed in the **GS_JOB_ATTRIBUTE** table. + +- event_body + + Specifies the statement to be executed for a scheduled event. + +## Examples + +``` +MogDB=# CREATE TABLE t_ev(num int); + +MogDB=# CREATE EVENT IF NOT EXISTS event_e1 ON SCHEDULE AT sysdate + interval 5 second + interval 33 minute DISABLE DO insert into t_ev values(0); + +MogDB=# CREATE EVENT IF NOT EXISTS event_e1 ON SCHEDULE EVERY 1 minute DO insert into t_ev values(1); +``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> +> - If a scheduled event fails to be executed after being created, you can view the failure cause in the **SHOW EVENTS** or **PG_JOB** table. > - When operations related to user passwords (such as creating weak passwords) are performed in the statements to be executed for a scheduled event, system catalog records the password in plaintext. Therefore, you are not advised to perform operations related to user passwords in the statements to be executed for the scheduled event. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-FOREIGN-DATA-WRAPPER.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-FOREIGN-DATA-WRAPPER.md index 896af4b2..7fac4b60 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-FOREIGN-DATA-WRAPPER.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-FOREIGN-DATA-WRAPPER.md @@ -1,52 +1,52 @@ ---- -title: CREATE FOREIGN DATA WRAPPER -summary: CREATE FOREIGN DATA WRAPPER -author: zhang cuiping -date: 2023-04-07 ---- - -# CREATE FOREIGN DATA WRAPPER - -## Function Description - -Defines a new foreign data wrapper (FDW). - -## Syntax - -```ebnf+diagram -CreateForeignDataWrapper ::= CREATE FOREIGN DATA WRAPPER name - [ HANDLER handler_function | NO HANDLER ] - [ VALIDATOR validator_function | NO VALIDATOR ] - [ OPTIONS ( option value [,...] ) ] -``` - -## Parameter Description - -- **name** - - Specifies the name of an FDW to be created. - -- **HANDLER handler_function** - - **handler_function** is the name of the previously registered function that will be called to retrieve the execution function of the foreign table. The handler function cannot contain any parameter, and its return type must be fdw_handler. - -- **VALIDATOR validator_function** - - **validator_function** is the name of the previously registered function that will be called to check the general options of the given FDW, as well as the options for the foreign server and user mapping using the FDW. If no validator function is specified, options are not checked at creation time. (The FDW may ignore or reject invalid option specifications at runtime, depending on the implementation.) The validator function must accept two arguments: one is of type text[], which will contain an array of options stored in the system directory, and the other is of type oid, which will be the oid of the system directory that contains the options. The return type is ignored. The function should report invalid options using the ereport (ERROR) function. - -- **OPTIONS (option 'value' [,…])** - - Specifies options for the new FDW. The allowed option names and values are specific to each FDW and validated using the FDW validator function. The option name must be unique. - -## Examples - -```sql ---Creates a useless FDW named dummy. -MogDB=# CREATE FOREIGN DATA WRAPPER dummy; - ---Use the handler function file_fdw_handler to create an FDW named file. -MogDB=# CREATE FOREIGN DATA WRAPPER file HANDLER file_fdw_handler; - ---Create an FDW named mywrapper. -MogDB=# CREATE FOREIGN DATA WRAPPER mywrapper OPTIONS (debug 'true'); +--- +title: CREATE FOREIGN DATA WRAPPER +summary: CREATE FOREIGN DATA WRAPPER +author: zhang cuiping +date: 2023-04-07 +--- + +# CREATE FOREIGN DATA WRAPPER + +## Function Description + +Defines a new foreign data wrapper (FDW). + +## Syntax + +```ebnf+diagram +CreateForeignDataWrapper ::= CREATE FOREIGN DATA WRAPPER name + [ HANDLER handler_function | NO HANDLER ] + [ VALIDATOR validator_function | NO VALIDATOR ] + [ OPTIONS ( option value [,...] ) ] +``` + +## Parameter Description + +- **name** + + Specifies the name of an FDW to be created. + +- **HANDLER handler_function** + + **handler_function** is the name of the previously registered function that will be called to retrieve the execution function of the foreign table. The handler function cannot contain any parameter, and its return type must be fdw_handler. + +- **VALIDATOR validator_function** + + **validator_function** is the name of the previously registered function that will be called to check the general options of the given FDW, as well as the options for the foreign server and user mapping using the FDW. If no validator function is specified, options are not checked at creation time. (The FDW may ignore or reject invalid option specifications at runtime, depending on the implementation.) The validator function must accept two arguments: one is of type text[], which will contain an array of options stored in the system directory, and the other is of type oid, which will be the oid of the system directory that contains the options. The return type is ignored. The function should report invalid options using the ereport (ERROR) function. + +- **OPTIONS (option 'value' [,…])** + + Specifies options for the new FDW. The allowed option names and values are specific to each FDW and validated using the FDW validator function. The option name must be unique. + +## Examples + +```sql +--Creates a useless FDW named dummy. +MogDB=# CREATE FOREIGN DATA WRAPPER dummy; + +--Use the handler function file_fdw_handler to create an FDW named file. +MogDB=# CREATE FOREIGN DATA WRAPPER file HANDLER file_fdw_handler; + +--Create an FDW named mywrapper. +MogDB=# CREATE FOREIGN DATA WRAPPER mywrapper OPTIONS (debug 'true'); ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-GROUP.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-GROUP.md index 275aed18..3c560ed5 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-GROUP.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-GROUP.md @@ -48,7 +48,7 @@ option ::= {SYSADMIN | NOSYSADMIN} | IN ROLE role_name [, ...] | IN GROUP role_name [, ...] | ROLE role_name [, ...] - | ADMIN role_name [, ...] + | ADMIN rol e_name [, ...] | USER role_name [, ...] | SYSID uid | DEFAULT TABLESPACE tablespace_name diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-RESOURCE-POOL.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-RESOURCE-POOL.md index 4dd0a256..998f733c 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-RESOURCE-POOL.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-RESOURCE-POOL.md @@ -1,152 +1,152 @@ ---- -title: CREATE RESOURCE POOL -summary: CREATE RESOURCE POOL -author: Zhang Cuiping -date: 2021-11-01 ---- - -# CREATE RESOURCE POOL - -## Function - -**CREATE RESOURCE POOL** creates a resource pool and specifies the Cgroup of the resource pool. - -## Precautions - -Only SYSADMIN and VCADMIN users can create resource pools. - -## Syntax - -```ebnf+diagram -CreateResourcePool ::= CREATE RESOURCE POOL pool_name - [WITH ({MEM_PERCENT=pct | - CONTROL_GROUP="group_name" | - ACTIVE_STATEMENTS=stmt | - MAX_DOP = dop | - MEMORY_LIMIT='memory_size' | - io_limits=io_limits | - io_priority='io_priority' | - nodegroup="nodegroupname" | - is_foreign=boolean } - [, ... ]) - ]; -``` - -## Parameter Description - -- **pool_name** - - Specifies the name of a resource pool. - - The name of a resource pool cannot be same as that of an existing resource pool. - - Value range: a string. It must comply with the identifier naming convention. - -- **group_name** - - Specifies the name of a Cgroup. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - You can use either double quotation marks ("") or single quotation marks (") in the syntax when setting the name of a Cgroup. - > - The value of **group_name** is case-sensitive. - > - If **group_name** is not specified, the string "Medium" will be used by default in the syntax, indicating the **Medium** Timeshare Cgroup under **DefaultClass**. - > - If a database administrator specifies a Workload Cgroup under **Class**, for example, **control_group** set to **class1:workload1**, the resource pool will be associated with the **workload1** Cgroup under **class1**. The level of the Workload Cgroup can also be specified. For example, **control_group** is set to **class1:workload1:1**. - > - If a database user specifies the Timeshare Cgroup string (**Rush**, **High**, **Medium**, or **Low**) in the syntax, for example, **control_group** is set to **High**, the resource pool will be associated with the **High** Timeshare Cgroup under **DefaultClass**. - - Value range: a string. It must comply with the rule in the description, which specifies the created Cgroup. - -- **stmt** - - Specifies the maximum number of statements that can be concurrently executed in a resource pool. - - Value range: numeric data ranging from -1 to 2147483647 - -- **dop** - - Specifies the maximum statement concurrency degree for a resource pool, equivalent to the number of threads that can be created for executing a statement. - - Value range: numeric data ranging from 1 to 2147483647 - -- **memory_size** - - Specifies the maximum memory size of a resource pool. - - Value range: a string from 1 KB to 2047 GB - -- **mem_percent** - - Specifies the proportion of available resource pool memory to the total memory or group user memory. - - In multi-tenant scenarios, the value of **mem_percent** of group users or service users ranges from 1 to 100. The default value is **20**. - - In common scenarios, the value of **mem_percent** of common users ranges from 0 to 100. The default value is **0**. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** When both **mem_percent** and **memory_limit** are specified, only **mem_percent** takes effect. - -- **io_limits** - - Specifies the upper limit of IOPS in a resource pool. - - The IOPS is counted by ones for column storage and by 10 thousands for row storage. - - Value range: numeric data ranging from 0 to 2147483647 - -- **io_priority** - - Specifies the I/O priority for jobs that consume many I/O resources. It takes effect when the I/O usage reaches 90%. - - There are three priorities: **Low**, **Medium**, and **High**. If you do not want to control I/O resources, use the default value **None**. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The settings of **io_limits** and **io_priority** are valid only for complex jobs, such as batch import (using **INSERT INTO SELECT**, **COPY FROM**, or **CREATE TABLE AS**), complex queries involving over 500 MB data on each DN, and **VACUUM FULL**. - -- **nodegroup** - - Specifies the name of a logical cluster. The logical cluster must already exist. - - If the logical cluster name contains uppercase letters or special characters or begins with a digit, enclose the name with double quotation marks ("") in SQL statements. - - This parameter is invalid in a standalone system. - -- **is_foreign** - - In logical cluster mode, specifies the current resource pool to control the resources of common users that are not associated with the logical cluster specified by **nodegroup**. - - This parameter is invalid in a standalone system. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - **nodegroup** must specify an existing logical cluster, and cannot be **elastic_group** or the default node group (**group_version1**), which is generated during cluster installation. - > - If **is_foreign** is set to **true**, the resource pool cannot be associated with users. That is, **CREATE USER … RESOURCE POOL** cannot be used to configure resource pools for users. The resource pool automatically checks whether the users are associated with its logical cluster. If they are not, they will be controlled by the resource pool when performing operations on DNs in the logical cluster. - -## Examples - -This example assumes that Cgroups have been created by users in advance. - -```sql --- Create a default resource pool, and associate it with the Medium Timeshare Cgroup under Workload under DefaultClass. -MogDB=# CREATE RESOURCE POOL pool1; - --- Create a resource pool, and associate it with the High Timeshare Workload Cgroup under DefaultClass. -MogDB=# CREATE RESOURCE POOL pool2 WITH (CONTROL_GROUP="High"); - --- Create a resource pool, and associate it with the Low Timeshare Workload Cgroup under class1. -MogDB=# CREATE RESOURCE POOL pool3 WITH (CONTROL_GROUP="class1:Low"); - --- Create a resource pool, and associate it with the wg1 Workload Cgroup under class1. -MogDB=# CREATE RESOURCE POOL pool4 WITH (CONTROL_GROUP="class1:wg1"); - --- Create a resource pool, and associate it with the wg2 Workload Cgroup under class1. -MogDB=# CREATE RESOURCE POOL pool5 WITH (CONTROL_GROUP="class1:wg2:3"); - --- Delete the resource pool. -MogDB=# DROP RESOURCE POOL pool1; -MogDB=# DROP RESOURCE POOL pool2; -MogDB=# DROP RESOURCE POOL pool3; -MogDB=# DROP RESOURCE POOL pool4; -MogDB=# DROP RESOURCE POOL pool5; -``` - -## Helpful Links - -[ALTER RESOURCE POOL](ALTER-RESOURCE-POOL.md),[DROP RESOURCE POOL](DROP-RESOURCE-POOL.md) +--- +title: CREATE RESOURCE POOL +summary: CREATE RESOURCE POOL +author: Zhang Cuiping +date: 2021-11-01 +--- + +# CREATE RESOURCE POOL + +## Function + +**CREATE RESOURCE POOL** creates a resource pool and specifies the Cgroup of the resource pool. + +## Precautions + +Only SYSADMIN and VCADMIN users can create resource pools. + +## Syntax + +```ebnf+diagram +CreateResourcePool ::= CREATE RESOURCE POOL pool_name + [WITH ({MEM_PERCENT=pct | + CONTROL_GROUP="group_name" | + ACTIVE_STATEMENTS=stmt | + MAX_DOP = dop | + MEMORY_LIMIT='memory_size' | + io_limits=io_limits | + io_priority='io_priority' | + nodegroup="nodegroupname" | + is_foreign=boolean } + [, ... ]) + ]; +``` + +## Parameter Description + +- **pool_name** + + Specifies the name of a resource pool. + + The name of a resource pool cannot be same as that of an existing resource pool. + + Value range: a string. It must comply with the identifier naming convention. + +- **group_name** + + Specifies the name of a Cgroup. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > + > - You can use either double quotation marks ("") or single quotation marks (") in the syntax when setting the name of a Cgroup. + > - The value of **group_name** is case-sensitive. + > - If **group_name** is not specified, the string "Medium" will be used by default in the syntax, indicating the **Medium** Timeshare Cgroup under **DefaultClass**. + > - If a database administrator specifies a Workload Cgroup under **Class**, for example, **control_group** set to **class1:workload1**, the resource pool will be associated with the **workload1** Cgroup under **class1**. The level of the Workload Cgroup can also be specified. For example, **control_group** is set to **class1:workload1:1**. + > - If a database user specifies the Timeshare Cgroup string (**Rush**, **High**, **Medium**, or **Low**) in the syntax, for example, **control_group** is set to **High**, the resource pool will be associated with the **High** Timeshare Cgroup under **DefaultClass**. + + Value range: a string. It must comply with the rule in the description, which specifies the created Cgroup. + +- **stmt** + + Specifies the maximum number of statements that can be concurrently executed in a resource pool. + + Value range: numeric data ranging from -1 to 2147483647 + +- **dop** + + Specifies the maximum statement concurrency degree for a resource pool, equivalent to the number of threads that can be created for executing a statement. + + Value range: numeric data ranging from 1 to 2147483647 + +- **memory_size** + + Specifies the maximum memory size of a resource pool. + + Value range: a string from 1 KB to 2047 GB + +- **mem_percent** + + Specifies the proportion of available resource pool memory to the total memory or group user memory. + + In multi-tenant scenarios, the value of **mem_percent** of group users or service users ranges from 1 to 100. The default value is **20**. + + In common scenarios, the value of **mem_percent** of common users ranges from 0 to 100. The default value is **0**. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** When both **mem_percent** and **memory_limit** are specified, only **mem_percent** takes effect. + +- **io_limits** + + Specifies the upper limit of IOPS in a resource pool. + + The IOPS is counted by ones for column storage and by 10 thousands for row storage. + + Value range: numeric data ranging from 0 to 2147483647 + +- **io_priority** + + Specifies the I/O priority for jobs that consume many I/O resources. It takes effect when the I/O usage reaches 90%. + + There are three priorities: **Low**, **Medium**, and **High**. If you do not want to control I/O resources, use the default value **None**. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The settings of **io_limits** and **io_priority** are valid only for complex jobs, such as batch import (using **INSERT INTO SELECT**, **COPY FROM**, or **CREATE TABLE AS**), complex queries involving over 500 MB data on each DN, and **VACUUM FULL**. + +- **nodegroup** + + Specifies the name of a logical cluster. The logical cluster must already exist. + + If the logical cluster name contains uppercase letters or special characters or begins with a digit, enclose the name with double quotation marks ("") in SQL statements. + + This parameter is invalid in a standalone system. + +- **is_foreign** + + In logical cluster mode, specifies the current resource pool to control the resources of common users that are not associated with the logical cluster specified by **nodegroup**. + + This parameter is invalid in a standalone system. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > + > - **nodegroup** must specify an existing logical cluster, and cannot be **elastic_group** or the default node group (**group_version1**), which is generated during cluster installation. + > - If **is_foreign** is set to **true**, the resource pool cannot be associated with users. That is, **CREATE USER … RESOURCE POOL** cannot be used to configure resource pools for users. The resource pool automatically checks whether the users are associated with its logical cluster. If they are not, they will be controlled by the resource pool when performing operations on DNs in the logical cluster. + +## Examples + +This example assumes that Cgroups have been created by users in advance. + +```sql +-- Create a default resource pool, and associate it with the Medium Timeshare Cgroup under Workload under DefaultClass. +MogDB=# CREATE RESOURCE POOL pool1; + +-- Create a resource pool, and associate it with the High Timeshare Workload Cgroup under DefaultClass. +MogDB=# CREATE RESOURCE POOL pool2 WITH (CONTROL_GROUP="High"); + +-- Create a resource pool, and associate it with the Low Timeshare Workload Cgroup under class1. +MogDB=# CREATE RESOURCE POOL pool3 WITH (CONTROL_GROUP="class1:Low"); + +-- Create a resource pool, and associate it with the wg1 Workload Cgroup under class1. +MogDB=# CREATE RESOURCE POOL pool4 WITH (CONTROL_GROUP="class1:wg1"); + +-- Create a resource pool, and associate it with the wg2 Workload Cgroup under class1. +MogDB=# CREATE RESOURCE POOL pool5 WITH (CONTROL_GROUP="class1:wg2:3"); + +-- Delete the resource pool. +MogDB=# DROP RESOURCE POOL pool1; +MogDB=# DROP RESOURCE POOL pool2; +MogDB=# DROP RESOURCE POOL pool3; +MogDB=# DROP RESOURCE POOL pool4; +MogDB=# DROP RESOURCE POOL pool5; +``` + +## Helpful Links + +[ALTER RESOURCE POOL](ALTER-RESOURCE-POOL.md),[DROP RESOURCE POOL](DROP-RESOURCE-POOL.md) diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-ROLE.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-ROLE.md index 64489b18..e5c74a48 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-ROLE.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-ROLE.md @@ -53,7 +53,7 @@ option ::= {SYSADMIN | NOSYSADMIN} | IN ROLE role_name [, ...] | IN GROUP role_name [, ...] | ROLE role_name [, ...] - | ADMIN role_name [, ...] + | ADMIN rol e_name [, ...] | USER role_name [, ...] | SYSID uid | DEFAULT TABLESPACE tablespace_name diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-SERVER.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-SERVER.md index f5c63031..12b6b032 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-SERVER.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-SERVER.md @@ -1,128 +1,128 @@ ---- -title: CREATE SERVER -summary: CREATE SERVER -author: Zhang Cuiping -date: 2021-05-10 ---- - -# CREATE SERVER - -## Function - -**CREATE SERVER** defines a new foreign server. - -## Syntax - -```ebnf+diagram -CreateServer ::= CREATE SERVER server_name - FOREIGN DATA WRAPPER fdw_name - OPTIONS ( { option_name ' value ' } [, ...] ) ; -``` - -## Parameter Description - -- **server_name** - - Specifies the server name. - - Value range: a string containing no more than 63 characters - -- **server_type** - - Optional server type, which may be useful for foreign data wrappers. - -- **server_version** - - Optional server version, which may be useful for foreign data wrappers. - -- **fdw_name** - - Specifies the name of the foreign data wrapper. - - Value range: **dist\_fdw**, **hdfs\_fdw**, **log\_fdw**, **mot\_fdw**, **file\_fdw**, **oracle\_fdw**, **mysql\_fdw**, and **postgres\_fdw**. - -- **OPTIONS ( { option_name ' value ' } [, …] )** - - Specifies options for the server. These options typically define the connection details of the server, but the actual names and values depend on the foreign data wrapper of the server. - - - Options supported by **oracle_fdw** are as follows: - - - **dbserver** - - Connection string of the remote Oracle database. - - - **isolation_level** (default value: **serializable**) - - Oracle database transaction isolation level. - - Value range: serializable, read_committed, and read_only - - - Options supported by mysql_fdw are as follows: - - - **host** (default value: **127.0.0.1**) - - IP address of the MySQL server or MariaDB. - - - **port** (default value: **3306**) - - Listening port number of the MySQL server or MariaDB. - - - The options supported by postgres_fdw are the same as those supported by libpq. For details, see [Link Parameters](../../developer-guide/dev/4-development-based-on-libpq/link-parameters.md). Note that the following options cannot be set: - - - **user** and **password** - - The user name and password are specified when the user mapping is created. - - - **client_encoding** - - The encoding mode of the local server is automatically obtained and set. - - - **application_name** - - This option is always set to **postgres_fdw**. - - - Specifies the parameters for the foreign server. The detailed parameter description is as follows: - - - encrypt - - Specifies whether data is encrypted. This parameter is available only when **type** is **OBS**. The default value is **on**. - - Value range: - - - **on** indicates that data is encrypted and HTTPS is used for communication. - - **off** indicates that data is not encrypted and HTTP is used for communication. - - - access_key - - Specifies the access key (AK) (obtained by users from the OBS console) used for the OBS access protocol. When you create a foreign table, the AK value is encrypted and saved to the metadata table of the database. This parameter is available only when **type** is set to **OBS**. - - - secret_access_key - - Specifies the secret key (SK) value (obtained by users from the OBS console) used for the OBS access protocol. When you create a foreign table, the SK value is encrypted and saved to the metadata table of the database. This parameter is available only when **type** is set to **OBS**. - -In addition to the connection parameters supported by libpq, the following options are provided: - -- **use_remote_estimate** - - Controls whether **postgres_fdw** issues the EXPLAIN command to obtain the estimated run time. The default value is **false**. - -- **fdw_startup_cost** - - Estimates the startup time required for a foreign table scan, including the time to establish a connection, analyzes the request at the remote server, and generates a plan. The default value is **100**. - -- **fdw_typle_cost** - - Specifies the additional consumption when each tuple is scanned on a remote server. The value specifies the extra consumption of data transmission between servers. The default value is **0.01**. - -## Examples - -Create a server. - -```sql -MogDB=# create server my_server foreign data wrapper log_fdw; -CREATE SERVER -``` - -## Helpful Links - -[ALTER SERVER](ALTER-SERVER.md) and [DROP SERVER](DROP-SERVER.md) +--- +title: CREATE SERVER +summary: CREATE SERVER +author: Zhang Cuiping +date: 2021-05-10 +--- + +# CREATE SERVER + +## Function + +**CREATE SERVER** defines a new foreign server. + +## Syntax + +```ebnf+diagram +CreateServer ::= CREATE SERVER server_name + FOREIGN DATA WRAPPER fdw_name + OPTIONS ( { option_name ' value ' } [, ...] ) ; +``` + +## Parameter Description + +- **server_name** + + Specifies the server name. + + Value range: a string containing no more than 63 characters + +- **server_type** + + Optional server type, which may be useful for foreign data wrappers. + +- **server_version** + + Optional server version, which may be useful for foreign data wrappers. + +- **fdw_name** + + Specifies the name of the foreign data wrapper. + + Value range: **dist\_fdw**, **hdfs\_fdw**, **log\_fdw**, **mot\_fdw**, **file\_fdw**, **oracle\_fdw**, **mysql\_fdw**, and **postgres\_fdw**. + +- **OPTIONS ( { option_name ' value ' } [, …] )** + + Specifies options for the server. These options typically define the connection details of the server, but the actual names and values depend on the foreign data wrapper of the server. + + - Options supported by **oracle_fdw** are as follows: + + - **dbserver** + + Connection string of the remote Oracle database. + + - **isolation_level** (default value: **serializable**) + + Oracle database transaction isolation level. + + Value range: serializable, read_committed, and read_only + + - Options supported by mysql_fdw are as follows: + + - **host** (default value: **127.0.0.1**) + + IP address of the MySQL server or MariaDB. + + - **port** (default value: **3306**) + + Listening port number of the MySQL server or MariaDB. + + - The options supported by postgres_fdw are the same as those supported by libpq. For details, see [Link Parameters](../../developer-guide/dev/4-development-based-on-libpq/link-parameters.md). Note that the following options cannot be set: + + - **user** and **password** + + The user name and password are specified when the user mapping is created. + + - **client_encoding** + + The encoding mode of the local server is automatically obtained and set. + + - **application_name** + + This option is always set to **postgres_fdw**. + + - Specifies the parameters for the foreign server. The detailed parameter description is as follows: + + - encrypt + + Specifies whether data is encrypted. This parameter is available only when **type** is **OBS**. The default value is **on**. + + Value range: + + - **on** indicates that data is encrypted and HTTPS is used for communication. + - **off** indicates that data is not encrypted and HTTP is used for communication. + + - access_key + + Specifies the access key (AK) (obtained by users from the OBS console) used for the OBS access protocol. When you create a foreign table, the AK value is encrypted and saved to the metadata table of the database. This parameter is available only when **type** is set to **OBS**. + + - secret_access_key + + Specifies the secret key (SK) value (obtained by users from the OBS console) used for the OBS access protocol. When you create a foreign table, the SK value is encrypted and saved to the metadata table of the database. This parameter is available only when **type** is set to **OBS**. + +In addition to the connection parameters supported by libpq, the following options are provided: + +- **use_remote_estimate** + + Controls whether **postgres_fdw** issues the EXPLAIN command to obtain the estimated run time. The default value is **false**. + +- **fdw_startup_cost** + + Estimates the startup time required for a foreign table scan, including the time to establish a connection, analyzes the request at the remote server, and generates a plan. The default value is **100**. + +- **fdw_typle_cost** + + Specifies the additional consumption when each tuple is scanned on a remote server. The value specifies the extra consumption of data transmission between servers. The default value is **0.01**. + +## Examples + +Create a server. + +```sql +MogDB=# create server my_server foreign data wrapper log_fdw; +CREATE SERVER +``` + +## Helpful Links + +[ALTER SERVER](ALTER-SERVER.md) and [DROP SERVER](DROP-SERVER.md) diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-USER.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-USER.md index d87e11f1..b388c8e6 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-USER.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/CREATE-USER.md @@ -1,127 +1,127 @@ ---- -title: CREATE USER -summary: CREATE USER -author: Zhang Cuiping -date: 2021-05-10 ---- - -# CREATE USER - -## Function - -**CREATE USER** creates a user. - -## Precautions - -- A user created using the **CREATE USER** statement has the **LOGIN** permission by default. -- When you run the **CREATE USER** command to create a user, the system creates a schema with the same name as the user in the database where the command is executed. -- The owner of an object created by a system administrator in a schema with the same name as a common user is the common user, not the system administrator. - -## Syntax - -```ebnf+diagram -CreateUser ::= CREATE USER [IF NOT EXISTS] user_name [ [ WITH ] option [ ... ] ] [ ENCRYPTED | UNENCRYPTED ] { PASSWORD | IDENTIFIED BY } { 'password' [EXPIRED] | DISABLE }; -``` - -The **option** clause is used to configure information, including permissions and properties. - -```ebnf+diagram -option ::= {SYSADMIN | NOSYSADMIN} - | {MONADMIN | NOMONADMIN} - | {OPRADMIN | NOOPRADMIN} - | {POLADMIN | NOPOLADMIN} - | {AUDITADMIN | NOAUDITADMIN} - | {CREATEDB | NOCREATEDB} - | {USEFT | NOUSEFT} - | {CREATEROLE | NOCREATEROLE} - | {INHERIT | NOINHERIT} - | {LOGIN | NOLOGIN} - | {REPLICATION | NOREPLICATION} - | {INDEPENDENT | NOINDEPENDENT} - | {VCADMIN | NOVCADMIN} - | {PERSISTENCE | NOPERSISTENCE} - | CONNECTION LIMIT connlimit - | VALID BEGIN 'timestamp' - | VALID UNTIL 'timestamp' - | RESOURCE POOL 'respool' - | USER GROUP 'groupuser' - | PERM SPACE 'spacelimit' - | TEMP SPACE 'tmpspacelimit' - | SPILL SPACE 'spillspacelimit' - | NODE GROUP logic_cluster_name - | IN ROLE role_name [, ...] - | IN GROUP role_name [, ...] - | ROLE role_name [, ...] - | ADMIN role_name [, ...] - | USER role_name [, ...] - | SYSID uid - | DEFAULT TABLESPACE tablespace_name - | PROFILE DEFAULT - | PROFILE profile_name - | PGUSER -``` - -## Parameter Description - -- **IF NOT EXISTS** - - Sends a notice, but does not throw an error, if a user with the same name exists. - -- **user_name** - - Specifies the name of the user to be created. - - Value range: a string. It must comply with the naming convention. A value can contain a maximum of 63 characters. - -- **password** - - Specifies the login password. - - The new password must: - - - Contain at least eight characters. This is the default length. - - Differ from the username or the username spelled backward. - - Contain at least three of the following character types: uppercase characters, lowercase characters, digits, and special characters (limited to \~!@\#$ %^&\*()-_=+\\|[\{\}];:,<.\>/?). - - The password can also be a ciphertext character string that meets the format requirements. This mode is mainly used to import user data. You are not advised to use it directly. If a ciphertext password is directly used, the user must know the plaintext corresponding to the ciphertext password and ensure the complexity of the plaintext password. The database does not verify the complexity of the ciphertext password. The security of the ciphertext password is ensured by the user. - - Be enclosed by single or double quotation marks. - - Value range: a string - -For other parameters, see [CREATE ROLE](CREATE-ROLE.md#parameter-description). - -## Examples - -```sql --- Create user jim whose login password is xxxxxxxxx: -MogDB=# CREATE USER jim PASSWORD 'xxxxxxxxx'; - --- Alternatively, you can run the following statement: -MogDB=# CREATE USER kim IDENTIFIED BY 'xxxxxxxxx'; - --- To create a user with the CREATEDB permission, add the CREATEDB keyword. -MogDB=# CREATE USER dim CREATEDB PASSWORD 'xxxxxxxxx'; - --- Change user jim's login password from xxxxxxxxx to Abcd@123: -MogDB=# ALTER USER jim IDENTIFIED BY 'Abcd@123' REPLACE 'xxxxxxxxx'; - --- Add the CREATEROLE permission to jim. -MogDB=# ALTER USER jim CREATEROLE; - --- Set enable_seqscan to on. (The setting will take effect in the next session.) -MogDB=# ALTER USER jim SET enable_seqscan TO on; - --- Reset the enable_seqscan parameter for jim. -MogDB=# ALTER USER jim RESET enable_seqscan; - --- Lock jim. -MogDB=# ALTER USER jim ACCOUNT LOCK; - --- Delete users. -MogDB=# DROP USER kim CASCADE; -MogDB=# DROP USER jim CASCADE; -MogDB=# DROP USER dim CASCADE; -``` - -## Helpful Links - -[ALTER USER](CREATE-USER.md),[CREATE ROLE](CREATE-ROLE.md),[DROP USER](DROP-USER.md) +--- +title: CREATE USER +summary: CREATE USER +author: Zhang Cuiping +date: 2021-05-10 +--- + +# CREATE USER + +## Function + +**CREATE USER** creates a user. + +## Precautions + +- A user created using the **CREATE USER** statement has the **LOGIN** permission by default. +- When you run the **CREATE USER** command to create a user, the system creates a schema with the same name as the user in the database where the command is executed. +- The owner of an object created by a system administrator in a schema with the same name as a common user is the common user, not the system administrator. + +## Syntax + +```ebnf+diagram +CreateUser ::= CREATE USER [IF NOT EXISTS] user_name [ [ WITH ] option [ ... ] ] [ ENCRYPTED | UNENCRYPTED ] { PASSWORD | IDENTIFIED BY } { 'password' [EXPIRED] | DISABLE }; +``` + +The **option** clause is used to configure information, including permissions and properties. + +```ebnf+diagram +option ::= {SYSADMIN | NOSYSADMIN} + | {MONADMIN | NOMONADMIN} + | {OPRADMIN | NOOPRADMIN} + | {POLADMIN | NOPOLADMIN} + | {AUDITADMIN | NOAUDITADMIN} + | {CREATEDB | NOCREATEDB} + | {USEFT | NOUSEFT} + | {CREATEROLE | NOCREATEROLE} + | {INHERIT | NOINHERIT} + | {LOGIN | NOLOGIN} + | {REPLICATION | NOREPLICATION} + | {INDEPENDENT | NOINDEPENDENT} + | {VCADMIN | NOVCADMIN} + | {PERSISTENCE | NOPERSISTENCE} + | CONNECTION LIMIT connlimit + | VALID BEGIN 'timestamp' + | VALID UNTIL 'timestamp' + | RESOURCE POOL 'respool' + | USER GROUP 'groupuser' + | PERM SPACE 'spacelimit' + | TEMP SPACE 'tmpspacelimit' + | SPILL SPACE 'spillspacelimit' + | NODE GROUP logic_cluster_name + | IN ROLE role_name [, ...] + | IN GROUP role_name [, ...] + | ROLE role_name [, ...] + | ADMIN role_name [, ...] + | USER role_name [, ...] + | SYSID uid + | DEFAULT TABLESPACE tablespace_name + | PROFILE DEFAULT + | PROFILE profile_name + | PGUSER +``` + +## Parameter Description + +- **IF NOT EXISTS** + + Sends a notice, but does not throw an error, if a user with the same name exists. + +- **user_name** + + Specifies the name of the user to be created. + + Value range: a string. It must comply with the naming convention. A value can contain a maximum of 63 characters. + +- **password** + + Specifies the login password. + + The new password must: + + - Contain at least eight characters. This is the default length. + - Differ from the username or the username spelled backward. + - Contain at least three of the following character types: uppercase characters, lowercase characters, digits, and special characters (limited to \~!@\#$ %^&\*()-_=+\\|[\{\}];:,<.\>/?). + - The password can also be a ciphertext character string that meets the format requirements. This mode is mainly used to import user data. You are not advised to use it directly. If a ciphertext password is directly used, the user must know the plaintext corresponding to the ciphertext password and ensure the complexity of the plaintext password. The database does not verify the complexity of the ciphertext password. The security of the ciphertext password is ensured by the user. + - Be enclosed by single or double quotation marks. + + Value range: a string + +For other parameters, see [CREATE ROLE](CREATE-ROLE.md#parameter-description). + +## Examples + +```sql +-- Create user jim whose login password is xxxxxxxxx: +MogDB=# CREATE USER jim PASSWORD 'xxxxxxxxx'; + +-- Alternatively, you can run the following statement: +MogDB=# CREATE USER kim IDENTIFIED BY 'xxxxxxxxx'; + +-- To create a user with the CREATEDB permission, add the CREATEDB keyword. +MogDB=# CREATE USER dim CREATEDB PASSWORD 'xxxxxxxxx'; + +-- Change user jim's login password from xxxxxxxxx to Abcd@123: +MogDB=# ALTER USER jim IDENTIFIED BY 'Abcd@123' REPLACE 'xxxxxxxxx'; + +-- Add the CREATEROLE permission to jim. +MogDB=# ALTER USER jim CREATEROLE; + +-- Set enable_seqscan to on. (The setting will take effect in the next session.) +MogDB=# ALTER USER jim SET enable_seqscan TO on; + +-- Reset the enable_seqscan parameter for jim. +MogDB=# ALTER USER jim RESET enable_seqscan; + +-- Lock jim. +MogDB=# ALTER USER jim ACCOUNT LOCK; + +-- Delete users. +MogDB=# DROP USER kim CASCADE; +MogDB=# DROP USER jim CASCADE; +MogDB=# DROP USER dim CASCADE; +``` + +## Helpful Links + +[ALTER USER](CREATE-USER.md),[CREATE ROLE](CREATE-ROLE.md),[DROP USER](DROP-USER.md) diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DELIMITER.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DELIMITER.md index b20897ef..d770d017 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DELIMITER.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DELIMITER.md @@ -1,58 +1,58 @@ ---- -title: DELIMITER -summary: DELIMITER -author: zhang cuiping -date: 2023-04-07 ---- - -# DELIMITER - -## Function - -DELIMITER defines a delimiter, indicating that the input command ends when the delimiter is encountered. When there are many input statements and semicolons (;) exist in the statements, you can specify a special symbol as the delimiter. By default, the delimiter is a semicolon (;). - -## Precautions - -Currently, the delimiter cannot be set freely. The delimiter can be a keyword, identifier, character string, operator, and semicolon. The common usage is “//”. For details, see the examples. - -The delimiter is of the session level, supported only by the gsql client, and available only in B-compatible mode. - -## Syntax - -- Define a delimiter. - - ```ebnf+diagram - Delimiter ::= DELIMITER delimiter_str_name END_OF_INPUT - DELIMITER delimiter_str_name END_OF_INPUT_COLON - ``` - -## Parameter Description - -- **delim_str_name** - - Indicates types of delimiters that can be defined. - -- **END_OF_INPUT/END_OF_INPUT_COLON** - - Indicates the end status. - -## Examples - -```sql ---Define an identifier. -MogDB=# delimiter abcd - ---Define a character string. -MogDB=# delimiter "sds;" - ---Define an operator. -MogDB=# delimiter + -MogDB=# delimiter / - ---Define a default value. -MogDB=# delimtier ; -``` - -## Helpful Links - +--- +title: DELIMITER +summary: DELIMITER +author: zhang cuiping +date: 2023-04-07 +--- + +# DELIMITER + +## Function + +DELIMITER defines a delimiter, indicating that the input command ends when the delimiter is encountered. When there are many input statements and semicolons (;) exist in the statements, you can specify a special symbol as the delimiter. By default, the delimiter is a semicolon (;). + +## Precautions + +Currently, the delimiter cannot be set freely. The delimiter can be a keyword, identifier, character string, operator, and semicolon. The common usage is “//”. For details, see the examples. + +The delimiter is of the session level, supported only by the gsql client, and available only in B-compatible mode. + +## Syntax + +- Define a delimiter. + + ```ebnf+diagram + Delimiter ::= DELIMITER delimiter_str_name END_OF_INPUT + DELIMITER delimiter_str_name END_OF_INPUT_COLON + ``` + +## Parameter Description + +- **delim_str_name** + + Indicates types of delimiters that can be defined. + +- **END_OF_INPUT/END_OF_INPUT_COLON** + + Indicates the end status. + +## Examples + +```sql +--Define an identifier. +MogDB=# delimiter abcd + +--Define a character string. +MogDB=# delimiter "sds;" + +--Define an operator. +MogDB=# delimiter + +MogDB=# delimiter / + +--Define a default value. +MogDB=# delimtier ; +``` + +## Helpful Links + None. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-EVENT-TRIGGER.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-EVENT-TRIGGER.md index 6a93e572..203191c5 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-EVENT-TRIGGER.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-EVENT-TRIGGER.md @@ -1,47 +1,47 @@ ---- -title: DROP EVENT TRIGGER -summary: DROP EVENT TRIGGER -author: zhang cuiping -date: 2023-04-07 ---- - -# DROP EVENT TRIGGER - -## Function - -DROP EVENT TRIGGER deletes an event trigger. - -## Precautions - -Only the super user or system administrator has the permission to delete an event trigger. - -## Syntax - -```ebnf+diagram -DropEventTrigger ::= DROP EVENT TRIGGER [ IF EXISTS ] name [ CASCADE | RESTRICT ] -``` - -## Parameter Description - -- **IF EXISTS** - - Reports a notice instead of an error if the specified event trigger does not exist. - -- **name** - - Specifies the name of the event trigger to be deleted. - - Value range: all existing event triggers. - -- **CASCADE | RESTRICT** - - - **CASCADE**: automatically deletes the objects that depend on the trigger. - - **RESTRICT**: refuses to delete the trigger if any objects depend on it. This is the default action. - -## Examples - -For details, see examples in [CREATE EVENT TRIGGER](CREATE-EVENT-TRIGGER.md). - -## Helpful Links - +--- +title: DROP EVENT TRIGGER +summary: DROP EVENT TRIGGER +author: zhang cuiping +date: 2023-04-07 +--- + +# DROP EVENT TRIGGER + +## Function + +DROP EVENT TRIGGER deletes an event trigger. + +## Precautions + +Only the super user or system administrator has the permission to delete an event trigger. + +## Syntax + +```ebnf+diagram +DropEventTrigger ::= DROP EVENT TRIGGER [ IF EXISTS ] name [ CASCADE | RESTRICT ] +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if the specified event trigger does not exist. + +- **name** + + Specifies the name of the event trigger to be deleted. + + Value range: all existing event triggers. + +- **CASCADE | RESTRICT** + + - **CASCADE**: automatically deletes the objects that depend on the trigger. + - **RESTRICT**: refuses to delete the trigger if any objects depend on it. This is the default action. + +## Examples + +For details, see examples in [CREATE EVENT TRIGGER](CREATE-EVENT-TRIGGER.md). + +## Helpful Links + [ALTER EVENT TRIGGER](ALTER-EVENT-TRIGGER.md), [CREATE EVENT TRIGGER](CREATE-EVENT-TRIGGER.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-EVENT.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-EVENT.md index e93702ca..a4fb843f 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-EVENT.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-EVENT.md @@ -1,38 +1,38 @@ ---- -title: DROP EVENT -summary: DROP EVENT -author: zhang cuiping -date: 2023-04-07 ---- - -# DROP EVENT - -## Function - -DROP EVENT deletes a scheduled task. - -## Precautions - -Operations related to scheduled events are supported only when **sql_compatibility** is set to **'B'**. - -## Syntax - -```ebnf+diagram -DropEvent ::=DROP EVENT [IF EXISTS] event_name -``` - -## Parameter Description - -- IF EXISTS - - If the scheduled task does not exist, a NOTICE message is displayed. - -- name - - Name of the scheduled task to be deleted. - -## Examples - -```sql -MogDB=# DROP EVENT event_e1; +--- +title: DROP EVENT +summary: DROP EVENT +author: zhang cuiping +date: 2023-04-07 +--- + +# DROP EVENT + +## Function + +DROP EVENT deletes a scheduled task. + +## Precautions + +Operations related to scheduled events are supported only when **sql_compatibility** is set to **'B'**. + +## Syntax + +```ebnf+diagram +DropEvent ::=DROP EVENT [IF EXISTS] event_name +``` + +## Parameter Description + +- IF EXISTS + + If the scheduled task does not exist, a NOTICE message is displayed. + +- name + + Name of the scheduled task to be deleted. + +## Examples + +```sql +MogDB=# DROP EVENT event_e1; ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-FOREIGN-DATA-WRAPPER.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-FOREIGN-DATA-WRAPPER.md index 5a80ea20..1d232822 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-FOREIGN-DATA-WRAPPER.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-FOREIGN-DATA-WRAPPER.md @@ -1,41 +1,41 @@ ---- -title: DROP FOREIGN DATA WRAPPER -summary: DROP FOREIGN DATA WRAPPER -author: zhang cuiping -date: 2023-04-07 ---- - -# DROP FOREIGN DATA WRAPPER - -## Function Description - -Drops a foreign data wrapper (FDW). - -## Syntax - -```ebnf+diagram -DropForeignDataWrapper ::= DROP FOREIGN DATA WRAPPER [ IF EXISTS ] name [ CASCADE | RESTRICT ] -``` - -## Parameter Description - -- **name** - - Specifies the name of an FDW to be dropped. - -- **CASCADE** - - Automatically drops objects (such as servers) that depend on the FDW. - -- **RESTRICT** - - Refuses to drop the FDW if there is any dependency on the FDW. This option is the default option. - -## Examples - -```sql ---Create an FDW named dbi. -MogDB=# CREATE FOREIGN DATA WRAPPER dbi OPTIONS (debug 'true'); ---Drop dbi. -MogDB=# DROP FOREIGN DATA WRAPPER dbi; +--- +title: DROP FOREIGN DATA WRAPPER +summary: DROP FOREIGN DATA WRAPPER +author: zhang cuiping +date: 2023-04-07 +--- + +# DROP FOREIGN DATA WRAPPER + +## Function Description + +Drops a foreign data wrapper (FDW). + +## Syntax + +```ebnf+diagram +DropForeignDataWrapper ::= DROP FOREIGN DATA WRAPPER [ IF EXISTS ] name [ CASCADE | RESTRICT ] +``` + +## Parameter Description + +- **name** + + Specifies the name of an FDW to be dropped. + +- **CASCADE** + + Automatically drops objects (such as servers) that depend on the FDW. + +- **RESTRICT** + + Refuses to drop the FDW if there is any dependency on the FDW. This option is the default option. + +## Examples + +```sql +--Create an FDW named dbi. +MogDB=# CREATE FOREIGN DATA WRAPPER dbi OPTIONS (debug 'true'); +--Drop dbi. +MogDB=# DROP FOREIGN DATA WRAPPER dbi; ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-RESOURCE-POOL.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-RESOURCE-POOL.md index c37a0f2c..a189f971 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-RESOURCE-POOL.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/DROP-RESOURCE-POOL.md @@ -1,44 +1,44 @@ ---- -title: DROP RESOURCE POOL -summary: DROP RESOURCE POOL -author: Zhang Cuiping -date: 2021-11-01 ---- - -# DROP RESOURCE POOL - -## Function - -**DROP RESOURCE POOL** deletes a resource pool. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The resource pool cannot be deleted if it is associated with a role. - -## Precautions - -Only SYSADMIN and VCADMIN users can delete resource pools. - -## Syntax - -```ebnf+diagram -DropResourcePool ::= DROP RESOURCE POOL [ IF EXISTS ] pool_name; -``` - -## Parameter Description - -- **IF EXISTS** - - Reports a notice instead of an error if a specified resource pool does not exist. - -- **pool_name** - - Specifies the name of a created resource pool. - - Value range: a string. It must comply with the identifier naming convention. - -## Examples - -See **Examples** in **CREATE RESOURCE POOL**. - -## Helpful Links - +--- +title: DROP RESOURCE POOL +summary: DROP RESOURCE POOL +author: Zhang Cuiping +date: 2021-11-01 +--- + +# DROP RESOURCE POOL + +## Function + +**DROP RESOURCE POOL** deletes a resource pool. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The resource pool cannot be deleted if it is associated with a role. + +## Precautions + +Only SYSADMIN and VCADMIN users can delete resource pools. + +## Syntax + +```ebnf+diagram +DropResourcePool ::= DROP RESOURCE POOL [ IF EXISTS ] pool_name; +``` + +## Parameter Description + +- **IF EXISTS** + + Reports a notice instead of an error if a specified resource pool does not exist. + +- **pool_name** + + Specifies the name of a created resource pool. + + Value range: a string. It must comply with the identifier naming convention. + +## Examples + +See **Examples** in **CREATE RESOURCE POOL**. + +## Helpful Links + [ALTER RESOURCE POOL](ALTER-RESOURCE-POOL.md) and [CREATE RESOURCE POOL](CREATE-RESOURCE-POOL.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/SHOW-EVENTS.md b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/SHOW-EVENTS.md index 38a0b252..3d48e906 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/SHOW-EVENTS.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/sql-syntax/SHOW-EVENTS.md @@ -1,43 +1,43 @@ ---- -title: SHOW EVENTS -summary: SHOW EVENTS -author: zhang cuiping -date: 2023-04-07 ---- - -# SHOW EVENTS - -SHOW EVENTS displays basic information about all scheduled tasks in a specified schema. - -## Precautions - -Operations related to scheduled events are supported only when **sql_compatibility** is set to **'B'**. - -## Syntax - -```ebnf+diagram -ShowEvents ::=SHOW EVENTS - [{FROM | IN} schema_name] - [LIKE 'pattern' | WHERE condition] -``` - -## Parameter Description - -- {FROM | IN} schema_name - - Specifies the schema to be queried. By default, the current schema is queried. - -- LIKE 'pattern' - - Matches a scheduled task by name. If this parameter is not specified, all scheduled tasks in the current schema are printed. - -- WHERE condition - - Forms an expression for row selection to narrow down the query range of **SHOW EVENTS**. **condition** indicates any expression that returns a value of Boolean type. Rows that do not meet this condition will not be retrieved. - -## Examples - -```sql ---View information about all scheduled tasks queried through pattern matching '_e' in the event_a schema. -MogDB=# SHOW EVENTS IN event_a LIKE '_e'; +--- +title: SHOW EVENTS +summary: SHOW EVENTS +author: zhang cuiping +date: 2023-04-07 +--- + +# SHOW EVENTS + +SHOW EVENTS displays basic information about all scheduled tasks in a specified schema. + +## Precautions + +Operations related to scheduled events are supported only when **sql_compatibility** is set to **'B'**. + +## Syntax + +```ebnf+diagram +ShowEvents ::=SHOW EVENTS + [{FROM | IN} schema_name] + [LIKE 'pattern' | WHERE condition] +``` + +## Parameter Description + +- {FROM | IN} schema_name + + Specifies the schema to be queried. By default, the current schema is queried. + +- LIKE 'pattern' + + Matches a scheduled task by name. If this parameter is not specified, all scheduled tasks in the current schema are printed. + +- WHERE condition + + Forms an expression for row selection to narrow down the query range of **SHOW EVENTS**. **condition** indicates any expression that returns a value of Boolean type. Rows that do not meet this condition will not be retrieved. + +## Examples + +```sql +--View information about all scheduled tasks queried through pattern matching '_e' in the event_a schema. +MogDB=# SHOW EVENTS IN event_a LIKE '_e'; ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/HLL.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/HLL.md index 6d128d19..4ad90173 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/HLL.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/HLL.md @@ -1,207 +1,207 @@ ---- -title: HLL -summary: HLL -author: Guo Huan -date: 2021-04-06 ---- - -# HLL - -HyperLoglog (HLL) is an approximation algorithm for efficiently counting the number of distinct values in a data set. It features faster computing and lower space usage. You only need to store HLL data structures, instead of data sets. When new data is added to a data set, make hash calculation on the data and insert the result to an HLL. Then, you can obtain the final result based on the HLL. - -[Table 1](#biao1) compares HLL with other algorithms. - -**Table 1** Comparison between HLL and other algorithms - -| Item | Sorting Algorithm | Hash Algorithm | HLL | -| :------------------------ | :-------------------- | :-------------------- | :------------------------------------ | -| Time complexity | O(nlogn) | O(n) | O(n) | -| Space complexity | O(n) | O(n) | log(logn) | -| Error rate | 0 | 0 | ≈0.8% | -| Storage space requirement | Size of original data | Size of original data | The maximum size is 16 KB by default. | - -HLL has advantages over others in the computing speed and storage space requirement. In terms of time complexity, the sorting algorithm needs O(nlogn) time for sorting, and the hash algorithm and HLL need O(n) time for full table scanning. In terms of storage space requirements, the sorting algorithm and hash algorithm need to store raw data before collecting statistics, whereas the HLL algorithm needs to store only the HLL data structures rather than the raw data, and thereby occupying a fixed space of about 16 KB. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - In the current default specifications, the maximum number of distinct values that can be calculated is about 1.1e + 15, and the error rate is 0.8%. If the calculation result exceeds the maximum, the error rate of the calculation result will increase, or the calculation will fail and an error will be reported. -> - When using this feature for the first time, you need to evaluate the distinct values of the service, properly select configuration parameters, and perform verification to ensure that the accuracy meets requirements. -> - By default, the distinct value is 1.1e + 15. If the distinct value is NaN, you need to adjust log2m or use another algorithm to calculate the distinct value. -> - The hash algorithm has an extremely low probability of collision. However, you are still advised to select 2 or 3 hash seeds for verification when using the hash algorithm for the first time. If there is only a small difference between the distinct values, you can select any one of the seeds as the hash seed. - -[Table 2](#hyper) describes main HLL data structures. - -**Table 2** Main HLL data structures - -| Data Type | Description | -| :-------- | :----------------------------------------------------------- | -| hll | The HLL header is a 27-byte field. By default, the data length ranges from 0 KB to 16 KB. The distinct value can be obtained. | - -When you create an HLL data type, 0 to 4 input parameters are supported. The parameter meanings and specifications are the same as those of the **hll_empty** function. The first parameter is **log2m**, indicating the logarithm of the number of buckets, and its value ranges from 10 to 16. The second parameter is **log2explicit**, indicating the threshold in explicit mode, and its value ranges from 0 to 12. The third parameter is **log2sparse**, indicating the threshold of the Sparse mode, and its value ranges from 0 to 14. The fourth parameter is **duplicatecheck**, indicating whether to enable duplicatecheck, and its value ranges from 0 to 1. When the input parameter is set to **-1**, the default value of the HLL parameter is used. You can run the **\d** or **\d+** command to view the parameters of the HLL type. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** When the HLL data type is created, the result varies depending on the input parameter behavior: -> -> - When creating an HLL type, do not set the input parameter or set it to **-1**. Use the default value of the corresponding HLL parameter. -> - If a valid value is set for the input parameter, the corresponding HLL parameter uses the input value. -> - If the input value is invalid, an error is reported when the HLL type is created. - -```sql --- Create an HLL table without specifying input parameters. -MogDB=# create table t1 (id integer, set hll); -MogDB=# \d t1 - Table "public.t1" - Column | Type | Modifiers ---------+---------+----------- - id | integer | - set | hll | - --- Create an HLL table, specify the first two input parameters, and use the default values for the last two input parameters. -MogDB=# create table t2 (id integer, set hll(12,4)); -MogDB=# \d t2 - Table "public.t2" - Column | Type | Modifiers ---------+----------------+----------- - id | integer | - set | hll(12,4,12,0) | - --- Create an HLL table, specify the third input parameter, and use default values for other parameters. -MogDB=# create table t3(id int, set hll(-1,-1,8,-1)); -MogDB=# \d t3 - Table "public.t3" - Column | Type | Modifiers ---------+----------------+----------- - id | integer | - set | hll(14,10,8,0) | - --- When a user creates an HLL table and specifies an invalid input parameter, an error is reported. -MogDB=# create table t4(id int, set hll(5,-1)); -ERROR: log2m = 5 is out of range, it should be in range 10 to 16, or set -1 as default -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** When inserting an HLL object to an HLL table, ensure that the parameters of the HLL type are the same as those of the inserted object. Otherwise, an error is reported. - -```sql --- Create an HLL table: -MogDB=# create table t1(id integer, set hll(14)); - --- Insert an HLL object to a table. The insertion succeeds because parameter types are consistent. -MogDB=# insert into t1 values (1, hll_empty(14,-1)); - --- Insert an HLL object to a table. The insertion fails because parameter types are inconsistent. -MogDB=# insert into t1(id, set) values (1, hll_empty(14,5)); -ERROR: log2explicit does not match: source is 5 and dest is 10 -``` - -The following describes HLL application scenarios. - -- Scenario 1: "Hello World" - - The following example shows how to use the HLL data type: - - ```sql - -- Create a table with the HLL type: - MogDB=# create table helloworld (id integer, set hll); - - -- Insert an empty HLL to the table: - MogDB=# insert into helloworld(id, set) values (1, hll_empty()); - - -- Add a hashed integer to the HLL: - MogDB=# update helloworld set set = hll_add(set, hll_hash_integer(12345)) where id = 1; - - -- Add a hashed string to the HLL: - MogDB=# update helloworld set set = hll_add(set, hll_hash_text('hello world')) where id = 1; - - -- Obtain the number of distinct values of the HLL: - MogDB=# select hll_cardinality(set) from helloworld where id = 1; - hll_cardinality - ----------------- - 2 - (1 row) - - -- Delete the table. - MogDB=# drop table helloworld; - ``` - -- Scenario 2: Collect statistics about website visitors. - - The following example shows how an HLL collects statistics on the number of users visiting a website within a period of time: - - ```sql - -- Create a raw data table to show that a user has visited the website at a certain time: - MogDB=# create table facts ( - date date, - user_id integer - ); - - -- Create a raw data table to show that a user has visited the website at a certain time: - MogDB=# insert into facts values ('2019-02-20', generate_series(1,100)); - MogDB=# insert into facts values ('2019-02-21', generate_series(1,200)); - MogDB=# insert into facts values ('2019-02-22', generate_series(1,300)); - MogDB=# insert into facts values ('2019-02-23', generate_series(1,400)); - MogDB=# insert into facts values ('2019-02-24', generate_series(1,500)); - MogDB=# insert into facts values ('2019-02-25', generate_series(1,600)); - MogDB=# insert into facts values ('2019-02-26', generate_series(1,700)); - MogDB=# insert into facts values ('2019-02-27', generate_series(1,800)); - - -- Create another table and specify an HLL column: - MogDB=# create table daily_uniques ( - date date UNIQUE, - users hll - ); - - -- Group data by date and insert the data into the HLL: - MogDB=# insert into daily_uniques(date, users) - select date, hll_add_agg(hll_hash_integer(user_id)) - from facts - group by 1; - - -- Calculate the numbers of users visiting the website every day: - MogDB=# select date, hll_cardinality(users) from daily_uniques order by date; - date | hll_cardinality - ------------+------------------ - 2019-02-20 | 100 - 2019-02-21 | 200.217913059312 - 2019-02-22 | 301.76494508014 - 2019-02-23 | 400.862858326446 - 2019-02-24 | 502.626933349694 - 2019-02-25 | 601.922606454213 - 2019-02-26 | 696.602316769498 - 2019-02-27 | 798.111731634412 - (8 rows) - - -- Calculate the number of users who had visited the website in the week from February 20, 2019 to February 26, 2019: - MogDB=# select hll_cardinality(hll_union_agg(users)) from daily_uniques where date >= '2019-02-20'::date and date <= '2019-02-26'::date; - hll_cardinality - ------------------ - 702.941844662509 - (1 row) - - -- Calculate the number of users who had visited the website yesterday but have not visited the website today: - MogDB=# SELECT date, (#hll_union_agg(users) OVER two_days) - #users AS lost_uniques FROM daily_uniques WINDOW two_days AS (ORDER BY date ASC ROWS 1 PRECEDING); - date | lost_uniques - ------------+-------------- - 2019-02-20 | 0 - 2019-02-21 | 0 - 2019-02-22 | 0 - 2019-02-23 | 0 - 2019-02-24 | 0 - 2019-02-25 | 0 - 2019-02-26 | 0 - 2019-02-27 | 0 - (8 rows) - - -- Delete the table. - MogDB=# drop table facts; - MogDB=# drop table daily_uniques; - ``` - -- Scenario 3: The data to be inserted does not meet the requirements of the HLL data structure. - - When inserting data into a column of the HLL type, ensure that the data meets the requirements of the HLL data structure. If the data does not meet the requirements after being parsed, an error will be reported. In the following example, **E\\1234** to be inserted does not meet the requirements of the HLL data structure after being parsed. As a result, an error is reported. - - ```sql - MogDB=# create table test(id integer, set hll); - MogDB=# insert into test values(1, 'E\\1234'); - ERROR: not a hll type, size=6 is not enough - MogDB=# drop table test; - ``` +--- +title: HLL +summary: HLL +author: Guo Huan +date: 2021-04-06 +--- + +# HLL + +HyperLoglog (HLL) is an approximation algorithm for efficiently counting the number of distinct values in a data set. It features faster computing and lower space usage. You only need to store HLL data structures, instead of data sets. When new data is added to a data set, make hash calculation on the data and insert the result to an HLL. Then, you can obtain the final result based on the HLL. + +[Table 1](#biao1) compares HLL with other algorithms. + +**Table 1** Comparison between HLL and other algorithms + +| Item | Sorting Algorithm | Hash Algorithm | HLL | +| :------------------------ | :-------------------- | :-------------------- | :------------------------------------ | +| Time complexity | O(nlogn) | O(n) | O(n) | +| Space complexity | O(n) | O(n) | log(logn) | +| Error rate | 0 | 0 | ≈0.8% | +| Storage space requirement | Size of original data | Size of original data | The maximum size is 16 KB by default. | + +HLL has advantages over others in the computing speed and storage space requirement. In terms of time complexity, the sorting algorithm needs O(nlogn) time for sorting, and the hash algorithm and HLL need O(n) time for full table scanning. In terms of storage space requirements, the sorting algorithm and hash algorithm need to store raw data before collecting statistics, whereas the HLL algorithm needs to store only the HLL data structures rather than the raw data, and thereby occupying a fixed space of about 16 KB. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** +> +> - In the current default specifications, the maximum number of distinct values that can be calculated is about 1.1e + 15, and the error rate is 0.8%. If the calculation result exceeds the maximum, the error rate of the calculation result will increase, or the calculation will fail and an error will be reported. +> - When using this feature for the first time, you need to evaluate the distinct values of the service, properly select configuration parameters, and perform verification to ensure that the accuracy meets requirements. +> - By default, the distinct value is 1.1e + 15. If the distinct value is NaN, you need to adjust log2m or use another algorithm to calculate the distinct value. +> - The hash algorithm has an extremely low probability of collision. However, you are still advised to select 2 or 3 hash seeds for verification when using the hash algorithm for the first time. If there is only a small difference between the distinct values, you can select any one of the seeds as the hash seed. + +[Table 2](#hyper) describes main HLL data structures. + +**Table 2** Main HLL data structures + +| Data Type | Description | +| :-------- | :----------------------------------------------------------- | +| hll | The HLL header is a 27-byte field. By default, the data length ranges from 0 KB to 16 KB. The distinct value can be obtained. | + +When you create an HLL data type, 0 to 4 input parameters are supported. The parameter meanings and specifications are the same as those of the **hll_empty** function. The first parameter is **log2m**, indicating the logarithm of the number of buckets, and its value ranges from 10 to 16. The second parameter is **log2explicit**, indicating the threshold in explicit mode, and its value ranges from 0 to 12. The third parameter is **log2sparse**, indicating the threshold of the Sparse mode, and its value ranges from 0 to 14. The fourth parameter is **duplicatecheck**, indicating whether to enable duplicatecheck, and its value ranges from 0 to 1. When the input parameter is set to **-1**, the default value of the HLL parameter is used. You can run the **\d** or **\d+** command to view the parameters of the HLL type. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** When the HLL data type is created, the result varies depending on the input parameter behavior: +> +> - When creating an HLL type, do not set the input parameter or set it to **-1**. Use the default value of the corresponding HLL parameter. +> - If a valid value is set for the input parameter, the corresponding HLL parameter uses the input value. +> - If the input value is invalid, an error is reported when the HLL type is created. + +```sql +-- Create an HLL table without specifying input parameters. +MogDB=# create table t1 (id integer, set hll); +MogDB=# \d t1 + Table "public.t1" + Column | Type | Modifiers +--------+---------+----------- + id | integer | + set | hll | + +-- Create an HLL table, specify the first two input parameters, and use the default values for the last two input parameters. +MogDB=# create table t2 (id integer, set hll(12,4)); +MogDB=# \d t2 + Table "public.t2" + Column | Type | Modifiers +--------+----------------+----------- + id | integer | + set | hll(12,4,12,0) | + +-- Create an HLL table, specify the third input parameter, and use default values for other parameters. +MogDB=# create table t3(id int, set hll(-1,-1,8,-1)); +MogDB=# \d t3 + Table "public.t3" + Column | Type | Modifiers +--------+----------------+----------- + id | integer | + set | hll(14,10,8,0) | + +-- When a user creates an HLL table and specifies an invalid input parameter, an error is reported. +MogDB=# create table t4(id int, set hll(5,-1)); +ERROR: log2m = 5 is out of range, it should be in range 10 to 16, or set -1 as default +``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** When inserting an HLL object to an HLL table, ensure that the parameters of the HLL type are the same as those of the inserted object. Otherwise, an error is reported. + +```sql +-- Create an HLL table: +MogDB=# create table t1(id integer, set hll(14)); + +-- Insert an HLL object to a table. The insertion succeeds because parameter types are consistent. +MogDB=# insert into t1 values (1, hll_empty(14,-1)); + +-- Insert an HLL object to a table. The insertion fails because parameter types are inconsistent. +MogDB=# insert into t1(id, set) values (1, hll_empty(14,5)); +ERROR: log2explicit does not match: source is 5 and dest is 10 +``` + +The following describes HLL application scenarios. + +- Scenario 1: "Hello World" + + The following example shows how to use the HLL data type: + + ```sql + -- Create a table with the HLL type: + MogDB=# create table helloworld (id integer, set hll); + + -- Insert an empty HLL to the table: + MogDB=# insert into helloworld(id, set) values (1, hll_empty()); + + -- Add a hashed integer to the HLL: + MogDB=# update helloworld set set = hll_add(set, hll_hash_integer(12345)) where id = 1; + + -- Add a hashed string to the HLL: + MogDB=# update helloworld set set = hll_add(set, hll_hash_text('hello world')) where id = 1; + + -- Obtain the number of distinct values of the HLL: + MogDB=# select hll_cardinality(set) from helloworld where id = 1; + hll_cardinality + ----------------- + 2 + (1 row) + + -- Delete the table. + MogDB=# drop table helloworld; + ``` + +- Scenario 2: Collect statistics about website visitors. + + The following example shows how an HLL collects statistics on the number of users visiting a website within a period of time: + + ```sql + -- Create a raw data table to show that a user has visited the website at a certain time: + MogDB=# create table facts ( + date date, + user_id integer + ); + + -- Create a raw data table to show that a user has visited the website at a certain time: + MogDB=# insert into facts values ('2019-02-20', generate_series(1,100)); + MogDB=# insert into facts values ('2019-02-21', generate_series(1,200)); + MogDB=# insert into facts values ('2019-02-22', generate_series(1,300)); + MogDB=# insert into facts values ('2019-02-23', generate_series(1,400)); + MogDB=# insert into facts values ('2019-02-24', generate_series(1,500)); + MogDB=# insert into facts values ('2019-02-25', generate_series(1,600)); + MogDB=# insert into facts values ('2019-02-26', generate_series(1,700)); + MogDB=# insert into facts values ('2019-02-27', generate_series(1,800)); + + -- Create another table and specify an HLL column: + MogDB=# create table daily_uniques ( + date date UNIQUE, + users hll + ); + + -- Group data by date and insert the data into the HLL: + MogDB=# insert into daily_uniques(date, users) + select date, hll_add_agg(hll_hash_integer(user_id)) + from facts + group by 1; + + -- Calculate the numbers of users visiting the website every day: + MogDB=# select date, hll_cardinality(users) from daily_uniques order by date; + date | hll_cardinality + ------------+------------------ + 2019-02-20 | 100 + 2019-02-21 | 200.217913059312 + 2019-02-22 | 301.76494508014 + 2019-02-23 | 400.862858326446 + 2019-02-24 | 502.626933349694 + 2019-02-25 | 601.922606454213 + 2019-02-26 | 696.602316769498 + 2019-02-27 | 798.111731634412 + (8 rows) + + -- Calculate the number of users who had visited the website in the week from February 20, 2019 to February 26, 2019: + MogDB=# select hll_cardinality(hll_union_agg(users)) from daily_uniques where date >= '2019-02-20'::date and date <= '2019-02-26'::date; + hll_cardinality + ------------------ + 702.941844662509 + (1 row) + + -- Calculate the number of users who had visited the website yesterday but have not visited the website today: + MogDB=# SELECT date, (#hll_union_agg(users) OVER two_days) - #users AS lost_uniques FROM daily_uniques WINDOW two_days AS (ORDER BY date ASC ROWS 1 PRECEDING); + date | lost_uniques + ------------+-------------- + 2019-02-20 | 0 + 2019-02-21 | 0 + 2019-02-22 | 0 + 2019-02-23 | 0 + 2019-02-24 | 0 + 2019-02-25 | 0 + 2019-02-26 | 0 + 2019-02-27 | 0 + (8 rows) + + -- Delete the table. + MogDB=# drop table facts; + MogDB=# drop table daily_uniques; + ``` + +- Scenario 3: The data to be inserted does not meet the requirements of the HLL data structure. + + When inserting data into a column of the HLL type, ensure that the data meets the requirements of the HLL data structure. If the data does not meet the requirements after being parsed, an error will be reported. In the following example, **E\\1234** to be inserted does not meet the requirements of the HLL data structure after being parsed. As a result, an error is reported. + + ```sql + MogDB=# create table test(id integer, set hll); + MogDB=# insert into test values(1, 'E\\1234'); + ERROR: not a hll type, size=6 is not enough + MogDB=# drop table test; + ``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/bit-string-types.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/bit-string-types.md index e5db41ff..dffd2398 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/bit-string-types.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/bit-string-types.md @@ -1,50 +1,50 @@ ---- -title: Bit String Types -summary: Bit String Types -author: Guo Huan -date: 2021-04-06 ---- - -# Bit String Types - -Bit strings are strings of 1's and 0's. They can be used to store bit masks. - -MogDB supports two bit string types: bit(n) and bit varying(n), in which **n** is a positive integer. - -The **bit** type data must match the length **n** exactly. It is an error to attempt to store shorter or longer bit strings. The **bit varying** data is of variable length up to the maximum length **n**; longer strings will be rejected. Writing **bit** without a length is equivalent to **bit(1)**, while **bit varying** without a length specification means unlimited length. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> If one explicitly casts a bit-string value to **bit(n)**, it will be truncated or zero-padded on the right to be exactly **n** bits, without raising an error. -> Similarly, if one explicitly casts a bit-string value to **bit varying(n)**, it will be truncated on the right if it is more than **n** bits. - -```sql --- Create a table. -MogDB=# CREATE TABLE bit_type_t1 -( - BT_COL1 INTEGER, - BT_COL2 BIT(3), - BT_COL3 BIT VARYING(5) -) ; - --- Insert data. -MogDB=# INSERT INTO bit_type_t1 VALUES(1, B'101', B'00'); - --- Specify the type length. An error is reported if an inserted string exceeds this length. -MogDB=# INSERT INTO bit_type_t1 VALUES(2, B'10', B'101'); -ERROR: bit string length 2 does not match type bit(3) -CONTEXT: referenced column: bt_col2 - --- Specify the type length. Data is converted if it exceeds this length. -MogDB=# INSERT INTO bit_type_t1 VALUES(2, B'10'::bit(3), B'101'); - --- View data. -MogDB=# SELECT * FROM bit_type_t1; - bt_col1 | bt_col2 | bt_col3 ----------+---------+--------- - 1 | 101 | 00 - 2 | 100 | 101 -(2 rows) - --- Delete the table. -MogDB=# DROP TABLE bit_type_t1; -``` +--- +title: Bit String Types +summary: Bit String Types +author: Guo Huan +date: 2021-04-06 +--- + +# Bit String Types + +Bit strings are strings of 1's and 0's. They can be used to store bit masks. + +MogDB supports two bit string types: bit(n) and bit varying(n), in which **n** is a positive integer. + +The **bit** type data must match the length **n** exactly. It is an error to attempt to store shorter or longer bit strings. The **bit varying** data is of variable length up to the maximum length **n**; longer strings will be rejected. Writing **bit** without a length is equivalent to **bit(1)**, while **bit varying** without a length specification means unlimited length. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> If one explicitly casts a bit-string value to **bit(n)**, it will be truncated or zero-padded on the right to be exactly **n** bits, without raising an error. +> Similarly, if one explicitly casts a bit-string value to **bit varying(n)**, it will be truncated on the right if it is more than **n** bits. + +```sql +-- Create a table. +MogDB=# CREATE TABLE bit_type_t1 +( + BT_COL1 INTEGER, + BT_COL2 BIT(3), + BT_COL3 BIT VARYING(5) +) ; + +-- Insert data. +MogDB=# INSERT INTO bit_type_t1 VALUES(1, B'101', B'00'); + +-- Specify the type length. An error is reported if an inserted string exceeds this length. +MogDB=# INSERT INTO bit_type_t1 VALUES(2, B'10', B'101'); +ERROR: bit string length 2 does not match type bit(3) +CONTEXT: referenced column: bt_col2 + +-- Specify the type length. Data is converted if it exceeds this length. +MogDB=# INSERT INTO bit_type_t1 VALUES(2, B'10'::bit(3), B'101'); + +-- View data. +MogDB=# SELECT * FROM bit_type_t1; + bt_col1 | bt_col2 | bt_col3 +---------+---------+--------- + 1 | 101 | 00 + 2 | 100 | 101 +(2 rows) + +-- Delete the table. +MogDB=# DROP TABLE bit_type_t1; +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/data-type-used-by-the-ledger-database.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/data-type-used-by-the-ledger-database.md index cadf006d..89e1be4a 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/data-type-used-by-the-ledger-database.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/data-type-used-by-the-ledger-database.md @@ -1,31 +1,31 @@ ---- -title: Data Type Used by the Ledger Database -summary: Data Type Used by the Ledger Database -author: Zhang Cuiping -date: 2021-10-25 ---- - -# Data Type Used by the Ledger Database - -The ledger database uses the hash16 data type to store row-level hash digests or table-level hash digests, and uses the hash32 data type to store global hash digests or history table verification hashes. - -**Table 1** Hash type of the ledger database - -| Name | Description | Storage Space | Value Range | -| :---------- | :------------------------ | :------------ | :-------------------------------- | -| HASH16 | Stored as an unsigned 64-bit integer | 8 bytes | 0 to +18446744073709551615 | -| HASH32 | Stored as a group of 16 unsigned integer elements | 16 bytes | Value range of an unsigned integer array of 16 elements | - -The hash16 data type is used to store row-level or table-level hash digests in the ledger database. After obtaining the hash sequence of a 16-character hexadecimal string, the system invokes the **hash16in** function to convert the sequence into an unsigned 64-bit integer and stores the integer in a hash16 variable. For example: - -```sql -Hexadecimal string: e697da2eaa3a775b; 64-bit unsigned integer: 16615989244166043483 -Hexadecimal string: ffffffffffffffff; 64-bit unsigned integer: 18446744073709551615 -``` - -The hash32 data type is used to store the global hash digest or history table verification hash in the ledger database. After obtaining the hash sequence of a 32-character hexadecimal string, the system invokes the **hash32in** function to convert the sequence to an array containing 16 unsigned integer elements. For example: - -```sql -Hexadecimal string: 685847ed1fe38e18f6b0e2b18c00edee -Hash32 array: [104,88,71,237,31,227,142,24,246,176,226,177,140,0,237,238] -``` +--- +title: Data Type Used by the Ledger Database +summary: Data Type Used by the Ledger Database +author: Zhang Cuiping +date: 2021-10-25 +--- + +# Data Type Used by the Ledger Database + +The ledger database uses the hash16 data type to store row-level hash digests or table-level hash digests, and uses the hash32 data type to store global hash digests or history table verification hashes. + +**Table 1** Hash type of the ledger database + +| Name | Description | Storage Space | Value Range | +| :---------- | :------------------------ | :------------ | :-------------------------------- | +| HASH16 | Stored as an unsigned 64-bit integer | 8 bytes | 0 to +18446744073709551615 | +| HASH32 | Stored as a group of 16 unsigned integer elements | 16 bytes | Value range of an unsigned integer array of 16 elements | + +The hash16 data type is used to store row-level or table-level hash digests in the ledger database. After obtaining the hash sequence of a 16-character hexadecimal string, the system invokes the **hash16in** function to convert the sequence into an unsigned 64-bit integer and stores the integer in a hash16 variable. For example: + +```sql +Hexadecimal string: e697da2eaa3a775b; 64-bit unsigned integer: 16615989244166043483 +Hexadecimal string: ffffffffffffffff; 64-bit unsigned integer: 18446744073709551615 +``` + +The hash32 data type is used to store the global hash digest or history table verification hash in the ledger database. After obtaining the hash sequence of a 32-character hexadecimal string, the system invokes the **hash32in** function to convert the sequence to an array containing 16 unsigned integer elements. For example: + +```sql +Hexadecimal string: 685847ed1fe38e18f6b0e2b18c00edee +Hash32 array: [104,88,71,237,31,227,142,24,246,176,226,177,140,0,237,238] +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/data-types-supported-by-column-store-tables.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/data-types-supported-by-column-store-tables.md index 56894d8e..5fc4375a 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/data-types-supported-by-column-store-tables.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/data-types-supported-by-column-store-tables.md @@ -1,233 +1,233 @@ ---- -title: Data Types Supported by Column-store Tables -summary: Data Types Supported by Column-store Tables -author: Guo Huan -date: 2021-04-06 ---- - -# Data Types Supported by Column-store Tables - -Table 1 lists the data types supported by column-store tables. - -**Table 1** Data types supported by column-store tables - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +--- +title: Data Types Supported by Column-store Tables +summary: Data Types Supported by Column-store Tables +author: Guo Huan +date: 2021-04-06 +--- + +# Data Types Supported by Column-store Tables + +Table 1 lists the data types supported by column-store tables. + +**Table 1** Data types supported by column-store tables + +

Category

-

Data Type

-

Length

-

Supported or Not

-

Numeric Types

-

smallint

-

2

-

Supported

-

integer

-

4

-

Supported

-

bigint

-

8

-

Supported

-

decimal

-

-1

-

Supported

-

numeric

-

-1

-

Supported

-

real

-

4

-

Supported

-

double precision

-

8

-

Supported

-

smallserial

-

2

-

Supported

-

serial

-

4

-

Supported

-

bigserial

-

8

-

Supported

-

largeserial

-

-1

-

Supported

-

Monetary Types

-

money

-

8

-

Supported

-

Character Types

-

character varying(n), varchar(n)

-

-1

-

Supported

-

character(n), char(n)

-

n

-

Supported

-

character, char

-

1

-

Supported

-

text

-

-1

-

Supported

-

nvarchar

-

-1

-

Supported

-

nvarchar2

-

-1

-

Supported

-

name

-

64

-

Not supported

-

Date/Time Types

-

timestamp with time zone

-

8

-

Supported

-

timestamp without time zone

-

8

-

Supported

-

date

-

4

-

Supported

-

time without time zone

-

8

-

Supported

-

time with time zone

-

12

-

Supported

-

interval

-

16

-

Supported

-

big object

-

clob

-

-1

-

Supported

-

blob

-

-1

-

Not supported

-

other types

-

...

-

...

-

Not supported

-
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Category

+

Data Type

+

Length

+

Supported or Not

+

Numeric Types

+

smallint

+

2

+

Supported

+

integer

+

4

+

Supported

+

bigint

+

8

+

Supported

+

decimal

+

-1

+

Supported

+

numeric

+

-1

+

Supported

+

real

+

4

+

Supported

+

double precision

+

8

+

Supported

+

smallserial

+

2

+

Supported

+

serial

+

4

+

Supported

+

bigserial

+

8

+

Supported

+

largeserial

+

-1

+

Supported

+

Monetary Types

+

money

+

8

+

Supported

+

Character Types

+

character varying(n), varchar(n)

+

-1

+

Supported

+

character(n), char(n)

+

n

+

Supported

+

character, char

+

1

+

Supported

+

text

+

-1

+

Supported

+

nvarchar

+

-1

+

Supported

+

nvarchar2

+

-1

+

Supported

+

name

+

64

+

Not supported

+

Date/Time Types

+

timestamp with time zone

+

8

+

Supported

+

timestamp without time zone

+

8

+

Supported

+

date

+

4

+

Supported

+

time without time zone

+

8

+

Supported

+

time with time zone

+

12

+

Supported

+

interval

+

16

+

Supported

+

big object

+

clob

+

-1

+

Supported

+

blob

+

-1

+

Not supported

+

other types

+

...

+

...

+

Not supported

+
\ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/date-time-types.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/date-time-types.md index 03784909..56b43f6b 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/date-time-types.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/date-time-types.md @@ -1,323 +1,323 @@ ---- -title: Date/Time -summary: Date/Time -author: Guo Huan -date: 2021-04-06 ---- - -# Date/Time - -Table 1 lists the date/time types supported by MogDB. For the operators and built-in functions of the types, see Date and Time Processing Functions and Operators. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If the time format of another database is different from that of MogDB, modify the value of the **DateStyle** parameter to keep them consistent. - -**Table 1** Date/Time types - -| Name | Description | Storage Space | -| :--------------------------------- | :----------------------------------------------------------- | :--------------------------------------------- | -| DATE | Date and time. | 4 bytes (The actual storage space is 8 bytes.) | -| TIME [(p)] [WITHOUT TIME ZONE] | Time within one day.
**p** indicates the precision after the decimal point. The value ranges from 0 to 6. | 8 bytes | -| TIME [(p)] [WITH TIME ZONE] | Time within one day (with time zone).
**p** indicates the precision after the decimal point. The value ranges from 0 to 6. | 12 bytes | -| TIMESTAMP[(p)] [WITHOUT TIME ZONE] | Date and time.
**p** indicates the precision after the decimal point. The value ranges from 0 to 6. | 8 bytes | -| TIMESTAMP[(p)][WITH TIME ZONE] | Date and time (with time zone). TIMESTAMP is also called TIMESTAMPTZ.
**p** indicates the precision after the decimal point. The value ranges from 0 to 6. | 8 bytes | -| SMALLDATETIME | Date and time (without time zone).
The precision level is minute. 31s to 59s are rounded into 1 minute. | 8 bytes | -| INTERVAL DAY (l) TO SECOND (p) | Time interval (X days X hours X minutes X seconds).
- **l**: indicates the precision of days. The value ranges from 0 to 6. For compatibility, the precision functions are not supported.
- **p**: indicates the precision of seconds. The value ranges from 0 to 6. The digit 0 at the end of a decimal number is not displayed. | 16 bytes | -| INTERVAL [FIELDS] [ (p) ] | Time interval.
- fields:**YEAR**, **MONTH**, **DAY**, **HOUR**, **MINUTE**, **SECOND**, **DAY TO HOUR**, **DAY TO MINUTE**, **DAY TO SECOND**, **HOUR TO MINUTE**, **HOUR TO SECOND**, and **MINUTE TO SECOND**.
- **p**: indicates the precision of seconds. The value ranges from 0 to 6. **p** takes effect only when fields are **SECOND**, **DAY TO SECOND**, **HOUR TO SECOND**, or **MINUTE TO SECOND**. The digit 0 at the end of a decimal number is not displayed. | 12 bytes | -| reltime | Relative time interval. The format is as follows:
**X** years **X** mons **X** days **XX:XX:XX**
The Julian calendar is used. It specifies that a year has 365.25 days and a month has 30 days. The relative time interval needs to be calculated based on the input value. The output format is POSTGRES. | 4 bytes | -| abstime | Date and time. The format is as follows:
YYYY-MM-DD hh:mm:ss+timezone
The value range is from 1901-12-13 20:45:53 GMT to 2038-01-18 23:59:59 GMT. The precision level is second. | 4 bytes | - -Examples - -```sql --- Create a table. -MogDB=# CREATE TABLE date_type_tab(coll date); - --- Insert data. -MogDB=# INSERT INTO date_type_tab VALUES (date '12-10-2010'); - --- View data. -MogDB=# SELECT * FROM date_type_tab; - coll ---------------------- - 2010-12-10 00:00:00 -(1 row) - --- Drop the table. -MogDB=# DROP TABLE date_type_tab; - --- Create a table. -MogDB=# CREATE TABLE time_type_tab (da time without time zone ,dai time with time zone,dfgh timestamp without time zone,dfga timestamp with time zone, vbg smalldatetime); - --- Insert data. -MogDB=# INSERT INTO time_type_tab VALUES ('21:21:21','21:21:21 pst','2010-12-12','2013-12-11 pst','2003-04-12 04:05:06'); - --- View data. -MogDB=# SELECT * FROM time_type_tab; - da | dai | dfgh | dfga | vbg -----------+-------------+---------------------+------------------------+--------------------- - 21:21:21 | 21:21:21-08 | 2010-12-12 00:00:00 | 2013-12-11 16:00:00+08 | 2003-04-12 04:05:00 -(1 row) - --- Drop the table. -MogDB=# DROP TABLE time_type_tab; - --- Create a table. -MogDB=# CREATE TABLE day_type_tab (a int,b INTERVAL DAY(3) TO SECOND (4)); - --- Insert data. -MogDB=# INSERT INTO day_type_tab VALUES (1, INTERVAL '3' DAY); - --- View data. -MogDB=# SELECT * FROM day_type_tab; - a | b ----+-------- - 1 | 3 days -(1 row) - --- Drop the table. -MogDB=# DROP TABLE day_type_tab; - --- Create a table. -MogDB=# CREATE TABLE year_type_tab(a int, b interval year (6)); - --- Insert data. -MogDB=# INSERT INTO year_type_tab VALUES(1,interval '2' year); - --- View data. -MogDB=# SELECT * FROM year_type_tab; - a | b ----+--------- - 1 | 2 years -(1 row) - --- Drop the table. -MogDB=# DROP TABLE year_type_tab; -``` - -## Date Input - -Date and time input is accepted in almost any reasonable formats, including ISO 8601, SQL-compatible, and traditional POSTGRES. The system allows you to customize the sequence of day, month, and year in the date input. Set the **DateStyle** parameter to **MDY** to select month-day-year interpretation, **DMY** to select day-month-year interpretation, or **YMD** to select year-month-day interpretation. - -Remember that any date or time literal input needs to be enclosed with single quotes, and the syntax is as follows: - -type [ ( p ) ] 'value' - -The **p** that can be selected in the precision statement is an integer, indicating the number of fractional digits in the **seconds** column. Table 2 shows some possible inputs for the **date** type. - -**Table 2** Date input - -| Example | Description | -| :--------------- | :----------------------------------------------------------- | -| 1999-01-08 | ISO 8601 (recommended format). January 8, 1999 in any mode | -| January 8, 1999 | Unambiguous in any **datestyle** input mode | -| 1/8/1999 | January 8 in **MDY** mode. August 1 in **DMY** mode | -| 1/18/1999 | January 18 in **MDY** mode, rejected in other modes | -| 01/02/03 | - January 2, 2003 in **MDY** mode
- February 1, 2003 in **DMY** mode
- February 3, 2001 in **YMD** mode | -| 1999-Jan-08 | January 8 in any mode | -| Jan-08-1999 | January 8 in any mode | -| 08-Jan-1999 | January 8 in any mode | -| 99-Jan-08 | January 8 in **YMD** mode, else error | -| 08-Jan-99 | January 8, except error in **YMD** mode | -| Jan-08-99 | January 8, except error in **YMD** mode | -| 19990108 | ISO 8601. January 8, 1999 in any mode | -| 990108 | ISO 8601. January 8, 1999 in any mode | -| 1999.008 | Year and day of year | -| J2451187 | Julian date | -| January 8, 99 BC | Year 99 BC | - -Examples - -```sql --- Create a table. -MogDB=# CREATE TABLE date_type_tab(coll date); - --- Insert data. -MogDB=# INSERT INTO date_type_tab VALUES (date '12-10-2010'); - --- View data. -MogDB=# SELECT * FROM date_type_tab; - coll ---------------------- - 2010-12-10 00:00:00 -(1 row) - --- View the date format. -MogDB=# SHOW datestyle; - DateStyle ------------ - ISO, MDY -(1 row) - --- Set the date format. -MogDB=# SET datestyle='YMD'; -SET - --- Insert data. -MogDB=# INSERT INTO date_type_tab VALUES(date '2010-12-11'); - --- View data. -MogDB=# SELECT * FROM date_type_tab; - coll ---------------------- - 2010-12-10 00:00:00 - 2010-12-11 00:00:00 -(2 rows) - --- Drop the table. -MogDB=# DROP TABLE date_type_tab; -``` - -## Time - -The time-of-day types are **TIME [(p)] [WITHOUT TIME ZONE]** and **TIME [(p)] [WITH TIME ZONE]**. **TIME** alone is equivalent to **TIME WITHOUT TIME ZONE**. - -If a time zone is specified in the input for **TIME WITHOUT TIME ZONE**, it is silently ignored. - -For details about the time input types, see Table 3. For details about time zone input types, see Table 4. - -**Table 3** Time input types - -| Example | Description | -| :----------------------------------- | :-------------------------------------- | -| 05:06.8 | ISO 8601 | -| 4:05:06 | ISO 8601 | -| 4:05 | ISO 8601 | -| 40506 | ISO 8601 | -| 4:05 AM | Same as 04:05. AM does not affect value | -| 4:05 PM | Same as 16:05. Input hour must be <= 12 | -| 04:05:06.789-8 | ISO 8601 | -| 04:05:06-08:00 | ISO 8601 | -| 04:05-08:00 | ISO 8601 | -| 040506-08 | ISO 8601 | -| 04:05:06 PST | Time zone specified by abbreviation | -| 2003-04-12 04:05:06 America/New_York | Time zone specified by full name | - -**Table 4** Time zone input types - -| Example | Description | -| :--------------- | :--------------------------------------- | -| PST | Abbreviation (for Pacific Standard Time) | -| America/New_York | Full time zone name | -| -8:00 | ISO-8601 offset for PST | -| -800 | ISO-8601 offset for PST | -| -8 | ISO-8601 offset for PST | - -Examples - -```sql -MogDB=# SELECT time '04:05:06'; - time ----------- - 04:05:06 -(1 row) - -MogDB=# SELECT time '04:05:06 PST'; - time ----------- - 04:05:06 -(1 row) - -MogDB=# SELECT time with time zone '04:05:06 PST'; - timetz -------------- - 04:05:06-08 -(1 row) -``` - -## Special Values - -The special values supported by MogDB are converted to common date/time values when being read. For details, see Table 5. - -**Table 5** Special values - -| Input String | Applicable Type | Description | -| :----------- | :-------------------- | :--------------------------------------------- | -| epoch | date, timestamp | 1970-01-01 00:00:00+00 (Unix system time zero) | -| infinity | timestamp | Later than any other timestamps | -| -infinity | timestamp | Earlier than any other timestamps | -| now | date, time, timestamp | Start time of the current transaction | -| today | date, timestamp | Midnight today | -| tomorrow | date, timestamp | Midnight tomorrow | -| yesterday | date, timestamp | Midnight yesterday | -| allballs | time | 00:00:00.00 UTC | - -## Interval Input - -The input of **reltime** can be any valid interval in text format. It can be a number (negative numbers and decimals are also allowed) or a specific time, which must be in SQL standard format, ISO-8601 format, or POSTGRES format. In addition, the text input needs to be enclosed with single quotation marks ("). - -For details, see Table 6 Interval input. - -**Table 6** Interval input - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
InputOutputDescription
602 monsNumbers are used to indicate intervals. The default unit is day. Decimals and negative numbers are allowed. Particularly, a negative interval syntactically means how long before.
31.251 mons 1 days 06:00:00
-365-12 mons -5 days
1 years 1 mons 8 days 12:00:001 years 1 mons 8 days 12:00:00Intervals are in POSTGRES format. They can contain both positive and negative numbers and are case-insensitive. Output is a simplified POSTGRES interval converted from the input.
-13 months -10 hours-1 years -25 days -04:00:00
-2 YEARS +5 MONTHS 10 DAYS-1 years -6 mons -25 days -06:00:00
P-1.1Y10M-3 mons -5 days -06:00:00Intervals are in ISO-8601 format. They can contain both positive and negative numbers and are case-insensitive. Output is a simplified POSTGRES interval converted from the input.
-12H-12:00:00
- -Examples - -```sql --- Create a table. -MogDB=# CREATE TABLE reltime_type_tab(col1 character(30), col2 reltime); - --- Insert data. -MogDB=# INSERT INTO reltime_type_tab VALUES ('90', '90'); -MogDB=# INSERT INTO reltime_type_tab VALUES ('-366', '-366'); -MogDB=# INSERT INTO reltime_type_tab VALUES ('1975.25', '1975.25'); -MogDB=# INSERT INTO reltime_type_tab VALUES ('-2 YEARS +5 MONTHS 10 DAYS', '-2 YEARS +5 MONTHS 10 DAYS'); -MogDB=# INSERT INTO reltime_type_tab VALUES ('30 DAYS 12:00:00', '30 DAYS 12:00:00'); -MogDB=# INSERT INTO reltime_type_tab VALUES ('P-1.1Y10M', 'P-1.1Y10M'); - --- View data. -MogDB=# SELECT * FROM reltime_type_tab; - col1 | col2 ---------------------------------+------------------------------------- - 1975.25 | 5 years 4 mons 29 days - -2 YEARS +5 MONTHS 10 DAYS | -1 years -6 mons -25 days -06:00:00 - P-1.1Y10M | -3 mons -5 days -06:00:00 - -366 | -1 years -18:00:00 - 90 | 3 mons - 30 DAYS 12:00:00 | 1 mon 12:00:00 -(6 rows) - --- Drop the table. -MogDB=# DROP TABLE reltime_type_tab; -``` +--- +title: Date/Time +summary: Date/Time +author: Guo Huan +date: 2021-04-06 +--- + +# Date/Time + +Table 1 lists the date/time types supported by MogDB. For the operators and built-in functions of the types, see Date and Time Processing Functions and Operators. + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If the time format of another database is different from that of MogDB, modify the value of the **DateStyle** parameter to keep them consistent. + +**Table 1** Date/Time types + +| Name | Description | Storage Space | +| :--------------------------------- | :----------------------------------------------------------- | :--------------------------------------------- | +| DATE | Date and time. | 4 bytes (The actual storage space is 8 bytes.) | +| TIME [(p)] [WITHOUT TIME ZONE] | Time within one day.
**p** indicates the precision after the decimal point. The value ranges from 0 to 6. | 8 bytes | +| TIME [(p)] [WITH TIME ZONE] | Time within one day (with time zone).
**p** indicates the precision after the decimal point. The value ranges from 0 to 6. | 12 bytes | +| TIMESTAMP[(p)] [WITHOUT TIME ZONE] | Date and time.
**p** indicates the precision after the decimal point. The value ranges from 0 to 6. | 8 bytes | +| TIMESTAMP[(p)][WITH TIME ZONE] | Date and time (with time zone). TIMESTAMP is also called TIMESTAMPTZ.
**p** indicates the precision after the decimal point. The value ranges from 0 to 6. | 8 bytes | +| SMALLDATETIME | Date and time (without time zone).
The precision level is minute. 31s to 59s are rounded into 1 minute. | 8 bytes | +| INTERVAL DAY (l) TO SECOND (p) | Time interval (X days X hours X minutes X seconds).
- **l**: indicates the precision of days. The value ranges from 0 to 6. For compatibility, the precision functions are not supported.
- **p**: indicates the precision of seconds. The value ranges from 0 to 6. The digit 0 at the end of a decimal number is not displayed. | 16 bytes | +| INTERVAL [FIELDS] [ (p) ] | Time interval.
- fields:**YEAR**, **MONTH**, **DAY**, **HOUR**, **MINUTE**, **SECOND**, **DAY TO HOUR**, **DAY TO MINUTE**, **DAY TO SECOND**, **HOUR TO MINUTE**, **HOUR TO SECOND**, and **MINUTE TO SECOND**.
- **p**: indicates the precision of seconds. The value ranges from 0 to 6. **p** takes effect only when fields are **SECOND**, **DAY TO SECOND**, **HOUR TO SECOND**, or **MINUTE TO SECOND**. The digit 0 at the end of a decimal number is not displayed. | 12 bytes | +| reltime | Relative time interval. The format is as follows:
**X** years **X** mons **X** days **XX:XX:XX**
The Julian calendar is used. It specifies that a year has 365.25 days and a month has 30 days. The relative time interval needs to be calculated based on the input value. The output format is POSTGRES. | 4 bytes | +| abstime | Date and time. The format is as follows:
YYYY-MM-DD hh:mm:ss+timezone
The value range is from 1901-12-13 20:45:53 GMT to 2038-01-18 23:59:59 GMT. The precision level is second. | 4 bytes | + +Examples + +```sql +-- Create a table. +MogDB=# CREATE TABLE date_type_tab(coll date); + +-- Insert data. +MogDB=# INSERT INTO date_type_tab VALUES (date '12-10-2010'); + +-- View data. +MogDB=# SELECT * FROM date_type_tab; + coll +--------------------- + 2010-12-10 00:00:00 +(1 row) + +-- Drop the table. +MogDB=# DROP TABLE date_type_tab; + +-- Create a table. +MogDB=# CREATE TABLE time_type_tab (da time without time zone ,dai time with time zone,dfgh timestamp without time zone,dfga timestamp with time zone, vbg smalldatetime); + +-- Insert data. +MogDB=# INSERT INTO time_type_tab VALUES ('21:21:21','21:21:21 pst','2010-12-12','2013-12-11 pst','2003-04-12 04:05:06'); + +-- View data. +MogDB=# SELECT * FROM time_type_tab; + da | dai | dfgh | dfga | vbg +----------+-------------+---------------------+------------------------+--------------------- + 21:21:21 | 21:21:21-08 | 2010-12-12 00:00:00 | 2013-12-11 16:00:00+08 | 2003-04-12 04:05:00 +(1 row) + +-- Drop the table. +MogDB=# DROP TABLE time_type_tab; + +-- Create a table. +MogDB=# CREATE TABLE day_type_tab (a int,b INTERVAL DAY(3) TO SECOND (4)); + +-- Insert data. +MogDB=# INSERT INTO day_type_tab VALUES (1, INTERVAL '3' DAY); + +-- View data. +MogDB=# SELECT * FROM day_type_tab; + a | b +---+-------- + 1 | 3 days +(1 row) + +-- Drop the table. +MogDB=# DROP TABLE day_type_tab; + +-- Create a table. +MogDB=# CREATE TABLE year_type_tab(a int, b interval year (6)); + +-- Insert data. +MogDB=# INSERT INTO year_type_tab VALUES(1,interval '2' year); + +-- View data. +MogDB=# SELECT * FROM year_type_tab; + a | b +---+--------- + 1 | 2 years +(1 row) + +-- Drop the table. +MogDB=# DROP TABLE year_type_tab; +``` + +## Date Input + +Date and time input is accepted in almost any reasonable formats, including ISO 8601, SQL-compatible, and traditional POSTGRES. The system allows you to customize the sequence of day, month, and year in the date input. Set the **DateStyle** parameter to **MDY** to select month-day-year interpretation, **DMY** to select day-month-year interpretation, or **YMD** to select year-month-day interpretation. + +Remember that any date or time literal input needs to be enclosed with single quotes, and the syntax is as follows: + +type [ ( p ) ] 'value' + +The **p** that can be selected in the precision statement is an integer, indicating the number of fractional digits in the **seconds** column. Table 2 shows some possible inputs for the **date** type. + +**Table 2** Date input + +| Example | Description | +| :--------------- | :----------------------------------------------------------- | +| 1999-01-08 | ISO 8601 (recommended format). January 8, 1999 in any mode | +| January 8, 1999 | Unambiguous in any **datestyle** input mode | +| 1/8/1999 | January 8 in **MDY** mode. August 1 in **DMY** mode | +| 1/18/1999 | January 18 in **MDY** mode, rejected in other modes | +| 01/02/03 | - January 2, 2003 in **MDY** mode
- February 1, 2003 in **DMY** mode
- February 3, 2001 in **YMD** mode | +| 1999-Jan-08 | January 8 in any mode | +| Jan-08-1999 | January 8 in any mode | +| 08-Jan-1999 | January 8 in any mode | +| 99-Jan-08 | January 8 in **YMD** mode, else error | +| 08-Jan-99 | January 8, except error in **YMD** mode | +| Jan-08-99 | January 8, except error in **YMD** mode | +| 19990108 | ISO 8601. January 8, 1999 in any mode | +| 990108 | ISO 8601. January 8, 1999 in any mode | +| 1999.008 | Year and day of year | +| J2451187 | Julian date | +| January 8, 99 BC | Year 99 BC | + +Examples + +```sql +-- Create a table. +MogDB=# CREATE TABLE date_type_tab(coll date); + +-- Insert data. +MogDB=# INSERT INTO date_type_tab VALUES (date '12-10-2010'); + +-- View data. +MogDB=# SELECT * FROM date_type_tab; + coll +--------------------- + 2010-12-10 00:00:00 +(1 row) + +-- View the date format. +MogDB=# SHOW datestyle; + DateStyle +----------- + ISO, MDY +(1 row) + +-- Set the date format. +MogDB=# SET datestyle='YMD'; +SET + +-- Insert data. +MogDB=# INSERT INTO date_type_tab VALUES(date '2010-12-11'); + +-- View data. +MogDB=# SELECT * FROM date_type_tab; + coll +--------------------- + 2010-12-10 00:00:00 + 2010-12-11 00:00:00 +(2 rows) + +-- Drop the table. +MogDB=# DROP TABLE date_type_tab; +``` + +## Time + +The time-of-day types are **TIME [(p)] [WITHOUT TIME ZONE]** and **TIME [(p)] [WITH TIME ZONE]**. **TIME** alone is equivalent to **TIME WITHOUT TIME ZONE**. + +If a time zone is specified in the input for **TIME WITHOUT TIME ZONE**, it is silently ignored. + +For details about the time input types, see Table 3. For details about time zone input types, see Table 4. + +**Table 3** Time input types + +| Example | Description | +| :----------------------------------- | :-------------------------------------- | +| 05:06.8 | ISO 8601 | +| 4:05:06 | ISO 8601 | +| 4:05 | ISO 8601 | +| 40506 | ISO 8601 | +| 4:05 AM | Same as 04:05. AM does not affect value | +| 4:05 PM | Same as 16:05. Input hour must be <= 12 | +| 04:05:06.789-8 | ISO 8601 | +| 04:05:06-08:00 | ISO 8601 | +| 04:05-08:00 | ISO 8601 | +| 040506-08 | ISO 8601 | +| 04:05:06 PST | Time zone specified by abbreviation | +| 2003-04-12 04:05:06 America/New_York | Time zone specified by full name | + +**Table 4** Time zone input types + +| Example | Description | +| :--------------- | :--------------------------------------- | +| PST | Abbreviation (for Pacific Standard Time) | +| America/New_York | Full time zone name | +| -8:00 | ISO-8601 offset for PST | +| -800 | ISO-8601 offset for PST | +| -8 | ISO-8601 offset for PST | + +Examples + +```sql +MogDB=# SELECT time '04:05:06'; + time +---------- + 04:05:06 +(1 row) + +MogDB=# SELECT time '04:05:06 PST'; + time +---------- + 04:05:06 +(1 row) + +MogDB=# SELECT time with time zone '04:05:06 PST'; + timetz +------------- + 04:05:06-08 +(1 row) +``` + +## Special Values + +The special values supported by MogDB are converted to common date/time values when being read. For details, see Table 5. + +**Table 5** Special values + +| Input String | Applicable Type | Description | +| :----------- | :-------------------- | :--------------------------------------------- | +| epoch | date, timestamp | 1970-01-01 00:00:00+00 (Unix system time zero) | +| infinity | timestamp | Later than any other timestamps | +| -infinity | timestamp | Earlier than any other timestamps | +| now | date, time, timestamp | Start time of the current transaction | +| today | date, timestamp | Midnight today | +| tomorrow | date, timestamp | Midnight tomorrow | +| yesterday | date, timestamp | Midnight yesterday | +| allballs | time | 00:00:00.00 UTC | + +## Interval Input + +The input of **reltime** can be any valid interval in text format. It can be a number (negative numbers and decimals are also allowed) or a specific time, which must be in SQL standard format, ISO-8601 format, or POSTGRES format. In addition, the text input needs to be enclosed with single quotation marks ("). + +For details, see Table 6 Interval input. + +**Table 6** Interval input + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
InputOutputDescription
602 monsNumbers are used to indicate intervals. The default unit is day. Decimals and negative numbers are allowed. Particularly, a negative interval syntactically means how long before.
31.251 mons 1 days 06:00:00
-365-12 mons -5 days
1 years 1 mons 8 days 12:00:001 years 1 mons 8 days 12:00:00Intervals are in POSTGRES format. They can contain both positive and negative numbers and are case-insensitive. Output is a simplified POSTGRES interval converted from the input.
-13 months -10 hours-1 years -25 days -04:00:00
-2 YEARS +5 MONTHS 10 DAYS-1 years -6 mons -25 days -06:00:00
P-1.1Y10M-3 mons -5 days -06:00:00Intervals are in ISO-8601 format. They can contain both positive and negative numbers and are case-insensitive. Output is a simplified POSTGRES interval converted from the input.
-12H-12:00:00
+ +Examples + +```sql +-- Create a table. +MogDB=# CREATE TABLE reltime_type_tab(col1 character(30), col2 reltime); + +-- Insert data. +MogDB=# INSERT INTO reltime_type_tab VALUES ('90', '90'); +MogDB=# INSERT INTO reltime_type_tab VALUES ('-366', '-366'); +MogDB=# INSERT INTO reltime_type_tab VALUES ('1975.25', '1975.25'); +MogDB=# INSERT INTO reltime_type_tab VALUES ('-2 YEARS +5 MONTHS 10 DAYS', '-2 YEARS +5 MONTHS 10 DAYS'); +MogDB=# INSERT INTO reltime_type_tab VALUES ('30 DAYS 12:00:00', '30 DAYS 12:00:00'); +MogDB=# INSERT INTO reltime_type_tab VALUES ('P-1.1Y10M', 'P-1.1Y10M'); + +-- View data. +MogDB=# SELECT * FROM reltime_type_tab; + col1 | col2 +--------------------------------+------------------------------------- + 1975.25 | 5 years 4 mons 29 days + -2 YEARS +5 MONTHS 10 DAYS | -1 years -6 mons -25 days -06:00:00 + P-1.1Y10M | -3 mons -5 days -06:00:00 + -366 | -1 years -18:00:00 + 90 | 3 mons + 30 DAYS 12:00:00 | 1 mon 12:00:00 +(6 rows) + +-- Drop the table. +MogDB=# DROP TABLE reltime_type_tab; +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/geometric.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/geometric.md index 97fa4ea7..04ca1fe8 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/geometric.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/geometric.md @@ -1,118 +1,118 @@ ---- -title: Geometric -summary: Geometric -author: Guo Huan -date: 2021-04-06 ---- - -# Geometric - -Table 1 lists the geometric types that can be used in MogDB. The most fundamental type, the point, forms the basis for all of the other types. - -**Table 1** Geometric types - -| Name | Storage Space | Description | Representation | -| :------ | :------------ | :------------------------------- | :---------------------------------- | -| point | 16 bytes | Point on a plane | (x,y) | -| lseg | 32 bytes | Finite line segment | ((x1,y1),(x2,y2)) | -| box | 32 bytes | Rectangle | ((x1,y1),(x2,y2)) | -| path | 16+16n bytes | Closed path (similar to polygon) | ((x1,y1),…) | -| path | 16+16n bytes | Open path | [(x1,y1),…] | -| polygon | 40+16n bytes | Polygon (similar to closed path) | ((x1,y1),…) | -| circle | 24 bytes | Circle | <(x,y),r> (center point and radius) | - -A rich set of functions and operators is available in MogDB to perform various geometric operations, such as scaling, translation, rotation, and determining intersections. For details, see Geometric Functions and Operators. - -## Points - -Points are the fundamental two-dimensional building block for geometric types. Values of the **point** type are specified using either of the following syntax: - -```sql -( x , y ) -x , y -``` - -where x and y are the respective coordinates, as floating-point numbers. - -Points are output using the first syntax. - -## Line Segments - -Line segments (**lseg**) are represented by pairs of points. Values of the **lseg** type are specified using any of the following syntax: - -```sql -[ ( x1 , y1 ) , ( x2 , y2 ) ] -( ( x1 , y1 ) , ( x2 , y2 ) ) -( x1 , y1 ) , ( x2 , y2 ) -x1 , y1 , x2 , y2 -``` - -where (x1,y1) and (x2,y2) are the end points of the line segment. - -Line segments are output using the first syntax. - -## Boxes - -Boxes are represented by pairs of points that are opposite corners of the box. Values of the **box** type are specified using any of the following syntax: - -``` -( ( x1 , y1 ) , ( x2 , y2 ) ) -( x1 , y1 ) , ( x2 , y2 ) -x1 , y1 , x2 , y2 -``` - -where (x1,y1) and (x2,y2) are any two opposite corners of the box. - -Boxes are output using the second syntax. - -Any two opposite corners can be supplied on input, but in this order, the values will be reordered as needed to store the upper right and lower left corners. - -## Paths - -Paths are represented by lists of connected points. Paths can be open, where the first and last points in the list are considered not connected, or closed, where the first and last points are considered connected. - -Values of the **path** type are specified using any of the following syntax: - -``` -[ ( x1 , y1 ) , ... , ( xn , yn ) ] -( ( x1 , y1 ) , ... , ( xn , yn ) ) -( x1 , y1 ) , ... , ( xn , yn ) -( x1 , y1 , ... , xn , yn ) -x1 , y1 , ... , xn , yn -``` - -where the points are the end points of the line segments comprising the path. Square brackets ([]) indicate an open path, while parentheses (()) indicate a closed path. When the outermost parentheses are omitted, as in the third through fifth syntax, a closed path is assumed. - -Paths are output using the first or second syntax. - -## Polygons - -Polygons are represented by lists of points (the vertexes of the polygon). Polygons are very similar to closed paths, but are stored differently and have their own set of support functions. - -Values of the **polygon** type are specified using any of the following syntax: - -``` -( ( x1 , y1 ) , ... , ( xn , yn ) ) -( x1 , y1 ) , ... , ( xn , yn ) -( x1 , y1 , ... , xn , yn ) -x1 , y1 , ... , xn , yn -``` - -where the points are the end points of the line segments comprising the boundary of the polygon. - -Polygons are output using the first syntax. - -## Circles - -Circles are represented by a center point and radius. Values of the **circle** type are specified using any of the following syntax: - -``` -< ( x , y ) , r > -( ( x , y ) , r ) -( x , y ) , r -x , y , r -``` - -where **(x,y)** is the center point and **r** is the radius of the circle. - -Circles are output using the first syntax. +--- +title: Geometric +summary: Geometric +author: Guo Huan +date: 2021-04-06 +--- + +# Geometric + +Table 1 lists the geometric types that can be used in MogDB. The most fundamental type, the point, forms the basis for all of the other types. + +**Table 1** Geometric types + +| Name | Storage Space | Description | Representation | +| :------ | :------------ | :------------------------------- | :---------------------------------- | +| point | 16 bytes | Point on a plane | (x,y) | +| lseg | 32 bytes | Finite line segment | ((x1,y1),(x2,y2)) | +| box | 32 bytes | Rectangle | ((x1,y1),(x2,y2)) | +| path | 16+16n bytes | Closed path (similar to polygon) | ((x1,y1),…) | +| path | 16+16n bytes | Open path | [(x1,y1),…] | +| polygon | 40+16n bytes | Polygon (similar to closed path) | ((x1,y1),…) | +| circle | 24 bytes | Circle | <(x,y),r> (center point and radius) | + +A rich set of functions and operators is available in MogDB to perform various geometric operations, such as scaling, translation, rotation, and determining intersections. For details, see Geometric Functions and Operators. + +## Points + +Points are the fundamental two-dimensional building block for geometric types. Values of the **point** type are specified using either of the following syntax: + +```sql +( x , y ) +x , y +``` + +where x and y are the respective coordinates, as floating-point numbers. + +Points are output using the first syntax. + +## Line Segments + +Line segments (**lseg**) are represented by pairs of points. Values of the **lseg** type are specified using any of the following syntax: + +```sql +[ ( x1 , y1 ) , ( x2 , y2 ) ] +( ( x1 , y1 ) , ( x2 , y2 ) ) +( x1 , y1 ) , ( x2 , y2 ) +x1 , y1 , x2 , y2 +``` + +where (x1,y1) and (x2,y2) are the end points of the line segment. + +Line segments are output using the first syntax. + +## Boxes + +Boxes are represented by pairs of points that are opposite corners of the box. Values of the **box** type are specified using any of the following syntax: + +``` +( ( x1 , y1 ) , ( x2 , y2 ) ) +( x1 , y1 ) , ( x2 , y2 ) +x1 , y1 , x2 , y2 +``` + +where (x1,y1) and (x2,y2) are any two opposite corners of the box. + +Boxes are output using the second syntax. + +Any two opposite corners can be supplied on input, but in this order, the values will be reordered as needed to store the upper right and lower left corners. + +## Paths + +Paths are represented by lists of connected points. Paths can be open, where the first and last points in the list are considered not connected, or closed, where the first and last points are considered connected. + +Values of the **path** type are specified using any of the following syntax: + +``` +[ ( x1 , y1 ) , ... , ( xn , yn ) ] +( ( x1 , y1 ) , ... , ( xn , yn ) ) +( x1 , y1 ) , ... , ( xn , yn ) +( x1 , y1 , ... , xn , yn ) +x1 , y1 , ... , xn , yn +``` + +where the points are the end points of the line segments comprising the path. Square brackets ([]) indicate an open path, while parentheses (()) indicate a closed path. When the outermost parentheses are omitted, as in the third through fifth syntax, a closed path is assumed. + +Paths are output using the first or second syntax. + +## Polygons + +Polygons are represented by lists of points (the vertexes of the polygon). Polygons are very similar to closed paths, but are stored differently and have their own set of support functions. + +Values of the **polygon** type are specified using any of the following syntax: + +``` +( ( x1 , y1 ) , ... , ( xn , yn ) ) +( x1 , y1 ) , ... , ( xn , yn ) +( x1 , y1 , ... , xn , yn ) +x1 , y1 , ... , xn , yn +``` + +where the points are the end points of the line segments comprising the boundary of the polygon. + +Polygons are output using the first syntax. + +## Circles + +Circles are represented by a center point and radius. Values of the **circle** type are specified using any of the following syntax: + +``` +< ( x , y ) , r > +( ( x , y ) , r ) +( x , y ) , r +x , y , r +``` + +where **(x,y)** is the center point and **r** is the radius of the circle. + +Circles are output using the first syntax. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/monetary.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/monetary.md index 7cb39a26..a4d89511 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/monetary.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/monetary.md @@ -1,34 +1,34 @@ ---- -title: Monetary Types -summary: Monetary Types -author: Guo Huan -date: 2021-04-06 ---- - -# Monetary Types - -The money type stores a currency amount with fixed fractional precision. - -The range shown in Table 1 assumes there are two fractional digits. Input is accepted in a variety of formats, including integer and floating-point literals, as well as typical currency formatting, such as "$1,000.00". Output is generally in the last format but depends on the locale. - -**Table 1** Monetary type - -| Name | Storage Space | Description | Range | -| :---- | :------------ | :-------------- | :--------------------------------------------- | -| money | 8 bytes | Currency amount | -92233720368547758.08 to +92233720368547758.07 | - -Values of the numeric, int, and bigint data types can be cast to money. Conversion from the real and double precision data types can be done by casting to numeric first, for example: - -```sql -MogDB=# SELECT '12.34'::float8::numeric::money; -``` - -However, this is not recommended. Floating point numbers should not be used to handle money due to the potential for rounding errors. - -A money value can be cast to numeric without loss of precision. Conversion to other types could potentially lose precision, and must also be done in two stages: - -```sql -MogDB=# SELECT '52093.89'::money::numeric::float8; -``` - -When a money value is divided by another money value, the result is double precision (that is, a pure number, not money); the currency units cancel each other out in the division. +--- +title: Monetary Types +summary: Monetary Types +author: Guo Huan +date: 2021-04-06 +--- + +# Monetary Types + +The money type stores a currency amount with fixed fractional precision. + +The range shown in Table 1 assumes there are two fractional digits. Input is accepted in a variety of formats, including integer and floating-point literals, as well as typical currency formatting, such as "$1,000.00". Output is generally in the last format but depends on the locale. + +**Table 1** Monetary type + +| Name | Storage Space | Description | Range | +| :---- | :------------ | :-------------- | :--------------------------------------------- | +| money | 8 bytes | Currency amount | -92233720368547758.08 to +92233720368547758.07 | + +Values of the numeric, int, and bigint data types can be cast to money. Conversion from the real and double precision data types can be done by casting to numeric first, for example: + +```sql +MogDB=# SELECT '12.34'::float8::numeric::money; +``` + +However, this is not recommended. Floating point numbers should not be used to handle money due to the potential for rounding errors. + +A money value can be cast to numeric without loss of precision. Conversion to other types could potentially lose precision, and must also be done in two stages: + +```sql +MogDB=# SELECT '52093.89'::money::numeric::float8; +``` + +When a money value is divided by another money value, the result is double precision (that is, a pure number, not money); the currency units cancel each other out in the division. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/network-address.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/network-address.md index ca8b613e..64f6df4f 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/network-address.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/network-address.md @@ -1,67 +1,67 @@ ---- -title: Network Address -summary: Network Address -author: Guo Huan -date: 2021-04-06 ---- - -# Network Address - -MogDB offers data types to store IPv4, IPv6, and MAC addresses. - -It is better to use these types instead of plain text types to store network addresses, because these types offer input error checking and specialized operators and functions. - -**Table 1** Network address types - -| Name | Storage Space | Description | -| :------ | :------------ | :------------------------------ | -| cidr | 7 or 19 bytes | IPv4 or IPv6 networks | -| inet | 7 or 19 bytes | IPv4 or IPv6 hosts and networks | -| macaddr | 6 bytes | MAC address | - -When sorting **inet** or **cidr** data types, IPv4 addresses will always sort before IPv6 addresses, including IPv4 addresses encapsulated or mapped to IPv6 addresses, such as ::10.2.3.4 or ::ffff:10.4.3.2. - -## cidr - -The **cidr** type (Classless Inter-Domain Routing) holds an IPv4 or IPv6 network specification. The format for specifying networks is **address/y** where **address** is the network represented as an IPv4 or IPv6 address, and **y** is the number of bits in the netmask. If **y** is omitted, it is calculated using assumptions from the older classful network numbering system, except it will be at least large enough to include all of the octets written in the input. - -**Table 2** **cidr** type input examples - -| cidr Input | cidr Output | abbrev(cidr) | -| :----------------------------------- | :----------------------------------- | :------------------------------- | -| 192.168.100.128/25 | 192.168.100.128/25 | 192.168.100.128/25 | -| 192.168/24 | 192.168.0.0/24 | 192.168.0/24 | -| 192.168/25 | 192.168.0.0/25 | 192.168.0.0/25 | -| 192.168.1 | 192.168.1.0/24 | 192.168.1/24 | -| 192.168 | 192.168.0.0/24 | 192.168.0/24 | -| 10.1.2 | 10.1.2.0/24 | 10.1.2/24 | -| 10.1 | 10.1.0.0/16 | 10.1/16 | -| 10 | 10.0.0.0/8 | 10/8 | -| 10.1.2.3/32 | 10.1.2.3/32 | 10.1.2.3/32 | -| 2001:4f8:3:ba::/64 | 2001:4f8:3:ba::/64 | 2001:4f8:3:ba::/64 | -| 2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128 | 2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128 | 2001:4f8:3:ba:2e0:81ff:fe22:d1f1 | -| ::ffff:1.2.3.0/120 | ::ffff:1.2.3.0/120 | ::ffff:1.2.3⁄120 | -| ::ffff:1.2.3.0/128 | ::ffff:1.2.3.0/128 | ::ffff:1.2.3.0/128 | - -## inet - -The **inet** type holds an IPv4 or IPv6 host address, and optionally its subnet, all in one field. The subnet is represented by the number of network address bits present in the host address (the "netmask"). If the netmask is 32 and the address is IPv4, then the value does not indicate a subnet, only a single host. In IPv6, the address length is 128 bits, so 128 bits specify a unique host address. - -The input format for this type is **address/y** where address is an IPv4 or IPv6 address and **y** is the number of bits in the netmask. If the **/y** portion is missing, the netmask is 32 for IPv4 and 128 for IPv6, so the value represents just a single host. On display, the **/y** portion is suppressed if the netmask specifies a single host. - -The essential difference between the **inet** and **cidr** data types is that **inet** accepts values with nonzero bits to the right of the netmask, whereas **cidr** does not. - -## macaddr - -The **macaddr** type stores MAC addresses, known for example from Ethernet card hardware addresses (although MAC addresses are used for other purposes as well). Input is accepted in the following formats: - -``` -'08:00:2b:01:02:03' -'08-00-2b-01-02-03' -'08002b:010203' -'08002b-010203' -'0800.2b01.0203' -'08002b010203' -``` - -These examples would all specify the same address. Upper and lower cases are accepted for the digits a through f. Output is always in the first of the forms shown. +--- +title: Network Address +summary: Network Address +author: Guo Huan +date: 2021-04-06 +--- + +# Network Address + +MogDB offers data types to store IPv4, IPv6, and MAC addresses. + +It is better to use these types instead of plain text types to store network addresses, because these types offer input error checking and specialized operators and functions. + +**Table 1** Network address types + +| Name | Storage Space | Description | +| :------ | :------------ | :------------------------------ | +| cidr | 7 or 19 bytes | IPv4 or IPv6 networks | +| inet | 7 or 19 bytes | IPv4 or IPv6 hosts and networks | +| macaddr | 6 bytes | MAC address | + +When sorting **inet** or **cidr** data types, IPv4 addresses will always sort before IPv6 addresses, including IPv4 addresses encapsulated or mapped to IPv6 addresses, such as ::10.2.3.4 or ::ffff:10.4.3.2. + +## cidr + +The **cidr** type (Classless Inter-Domain Routing) holds an IPv4 or IPv6 network specification. The format for specifying networks is **address/y** where **address** is the network represented as an IPv4 or IPv6 address, and **y** is the number of bits in the netmask. If **y** is omitted, it is calculated using assumptions from the older classful network numbering system, except it will be at least large enough to include all of the octets written in the input. + +**Table 2** **cidr** type input examples + +| cidr Input | cidr Output | abbrev(cidr) | +| :----------------------------------- | :----------------------------------- | :------------------------------- | +| 192.168.100.128/25 | 192.168.100.128/25 | 192.168.100.128/25 | +| 192.168/24 | 192.168.0.0/24 | 192.168.0/24 | +| 192.168/25 | 192.168.0.0/25 | 192.168.0.0/25 | +| 192.168.1 | 192.168.1.0/24 | 192.168.1/24 | +| 192.168 | 192.168.0.0/24 | 192.168.0/24 | +| 10.1.2 | 10.1.2.0/24 | 10.1.2/24 | +| 10.1 | 10.1.0.0/16 | 10.1/16 | +| 10 | 10.0.0.0/8 | 10/8 | +| 10.1.2.3/32 | 10.1.2.3/32 | 10.1.2.3/32 | +| 2001:4f8:3:ba::/64 | 2001:4f8:3:ba::/64 | 2001:4f8:3:ba::/64 | +| 2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128 | 2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128 | 2001:4f8:3:ba:2e0:81ff:fe22:d1f1 | +| ::ffff:1.2.3.0/120 | ::ffff:1.2.3.0/120 | ::ffff:1.2.3⁄120 | +| ::ffff:1.2.3.0/128 | ::ffff:1.2.3.0/128 | ::ffff:1.2.3.0/128 | + +## inet + +The **inet** type holds an IPv4 or IPv6 host address, and optionally its subnet, all in one field. The subnet is represented by the number of network address bits present in the host address (the "netmask"). If the netmask is 32 and the address is IPv4, then the value does not indicate a subnet, only a single host. In IPv6, the address length is 128 bits, so 128 bits specify a unique host address. + +The input format for this type is **address/y** where address is an IPv4 or IPv6 address and **y** is the number of bits in the netmask. If the **/y** portion is missing, the netmask is 32 for IPv4 and 128 for IPv6, so the value represents just a single host. On display, the **/y** portion is suppressed if the netmask specifies a single host. + +The essential difference between the **inet** and **cidr** data types is that **inet** accepts values with nonzero bits to the right of the netmask, whereas **cidr** does not. + +## macaddr + +The **macaddr** type stores MAC addresses, known for example from Ethernet card hardware addresses (although MAC addresses are used for other purposes as well). Input is accepted in the following formats: + +``` +'08:00:2b:01:02:03' +'08-00-2b-01-02-03' +'08002b:010203' +'08002b-010203' +'0800.2b01.0203' +'08002b010203' +``` + +These examples would all specify the same address. Upper and lower cases are accepted for the digits a through f. Output is always in the first of the forms shown. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/object-identifier-types.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/object-identifier-types.md index de58932b..3dda0c83 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/object-identifier-types.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/object-identifier-types.md @@ -1,90 +1,90 @@ ---- -title: OID Types -summary: OID Types -author: Guo Huan -date: 2021-04-06 ---- - -# OID Types - -OIDs are used internally by MogDB as primary keys for various system catalogs. OIDs are not added to user-created tables by the system. The **OID** type represents an object identifier. - -The **OID** type is currently implemented as an unsigned four-byte integer. So, using a user-created table's **OID** column as a primary key is discouraged. - -**Table 1** Object identifier types - -| Name | Reference | Description | Example | -| :------------ | :----------- | :----------------------------------------------------------- | :------------------------------------ | -| OID | N/A | Numeric object identifier | 564182 | -| CID | N/A | A command identifier. This is the data type of the system columns **cmin** and **cmax**. Command identifiers are 32-bit quantities. | N/A | -| XID | N/A | A transaction identifier. This is the data type of the system columns **xmin** and **xmax**. Transaction identifiers are 64-bit quantities. | N/A | -| TID | N/A | A row identifier. This is the data type of the system column **ctid**. A row ID is a pair (block number, tuple index within block) that identifies the physical location of the row within its table. | N/A | -| REGCONFIG | pg_ts_config | Text search configuration | english | -| REGDICTIONARY | pg_ts_dict | Text search dictionary | simple | -| REGOPER | pg_operator | Operator name | N/A | -| REGOPERATOR | pg_operator | Operator with argument types | *(integer,integer) or -(NONE,integer) | -| REGPROC | pg_proc | Function name | sum | -| REGPROCEDURE | pg_proc | Function with argument types | sum(int4) | -| REGCLASS | pg_class | Relation name | pg_type | -| REGTYPE | pg_type | Data type name | integer | - -The **OID** type is used for a column in the database system catalog. - -Example: - -```sql -MogDB=# SELECT oid FROM pg_class WHERE relname = 'pg_type'; - oid ------- - 1247 -(1 row) -``` - -The alias type for **OID** is **REGCLASS** which allows simplified search for **OID** values. - -Example: - -```sql -MogDB=# SELECT attrelid,attname,atttypid,attstattarget FROM pg_attribute WHERE attrelid = 'pg_type'::REGCLASS; - attrelid | attname | atttypid | attstattarget -----------+------------+----------+--------------- - 1247 | xc_node_id | 23 | 0 - 1247 | tableoid | 26 | 0 - 1247 | cmax | 29 | 0 - 1247 | xmax | 28 | 0 - 1247 | cmin | 29 | 0 - 1247 | xmin | 28 | 0 - 1247 | oid | 26 | 0 - 1247 | ctid | 27 | 0 - 1247 | typname | 19 | -1 - 1247 | typnamespace | 26 | -1 - 1247 | typowner | 26 | -1 - 1247 | typlen | 21 | -1 - 1247 | typbyval | 16 | -1 - 1247 | typtype | 18 | -1 - 1247 | typcategory | 18 | -1 - 1247 | typispreferred | 16 | -1 - 1247 | typisdefined | 16 | -1 - 1247 | typdelim | 18 | -1 - 1247 | typrelid | 26 | -1 - 1247 | typelem | 26 | -1 - 1247 | typarray | 26 | -1 - 1247 | typinput | 24 | -1 - 1247 | typoutput | 24 | -1 - 1247 | typreceive | 24 | -1 - 1247 | typsend | 24 | -1 - 1247 | typmodin | 24 | -1 - 1247 | typmodout | 24 | -1 - 1247 | typanalyze | 24 | -1 - 1247 | typalign | 18 | -1 - 1247 | typstorage | 18 | -1 - 1247 | typnotnull | 16 | -1 - 1247 | typbasetype | 26 | -1 - 1247 | typtypmod | 23 | -1 - 1247 | typndims | 23 | -1 - 1247 | typcollation | 26 | -1 - 1247 | typdefaultbin | 194 | -1 - 1247 | typdefault | 25 | -1 - 1247 | typacl | 1034 | -1 -(38 rows) -``` +--- +title: OID Types +summary: OID Types +author: Guo Huan +date: 2021-04-06 +--- + +# OID Types + +OIDs are used internally by MogDB as primary keys for various system catalogs. OIDs are not added to user-created tables by the system. The **OID** type represents an object identifier. + +The **OID** type is currently implemented as an unsigned four-byte integer. So, using a user-created table's **OID** column as a primary key is discouraged. + +**Table 1** Object identifier types + +| Name | Reference | Description | Example | +| :------------ | :----------- | :----------------------------------------------------------- | :------------------------------------ | +| OID | N/A | Numeric object identifier | 564182 | +| CID | N/A | A command identifier. This is the data type of the system columns **cmin** and **cmax**. Command identifiers are 32-bit quantities. | N/A | +| XID | N/A | A transaction identifier. This is the data type of the system columns **xmin** and **xmax**. Transaction identifiers are 64-bit quantities. | N/A | +| TID | N/A | A row identifier. This is the data type of the system column **ctid**. A row ID is a pair (block number, tuple index within block) that identifies the physical location of the row within its table. | N/A | +| REGCONFIG | pg_ts_config | Text search configuration | english | +| REGDICTIONARY | pg_ts_dict | Text search dictionary | simple | +| REGOPER | pg_operator | Operator name | N/A | +| REGOPERATOR | pg_operator | Operator with argument types | *(integer,integer) or -(NONE,integer) | +| REGPROC | pg_proc | Function name | sum | +| REGPROCEDURE | pg_proc | Function with argument types | sum(int4) | +| REGCLASS | pg_class | Relation name | pg_type | +| REGTYPE | pg_type | Data type name | integer | + +The **OID** type is used for a column in the database system catalog. + +Example: + +```sql +MogDB=# SELECT oid FROM pg_class WHERE relname = 'pg_type'; + oid +------ + 1247 +(1 row) +``` + +The alias type for **OID** is **REGCLASS** which allows simplified search for **OID** values. + +Example: + +```sql +MogDB=# SELECT attrelid,attname,atttypid,attstattarget FROM pg_attribute WHERE attrelid = 'pg_type'::REGCLASS; + attrelid | attname | atttypid | attstattarget +----------+------------+----------+--------------- + 1247 | xc_node_id | 23 | 0 + 1247 | tableoid | 26 | 0 + 1247 | cmax | 29 | 0 + 1247 | xmax | 28 | 0 + 1247 | cmin | 29 | 0 + 1247 | xmin | 28 | 0 + 1247 | oid | 26 | 0 + 1247 | ctid | 27 | 0 + 1247 | typname | 19 | -1 + 1247 | typnamespace | 26 | -1 + 1247 | typowner | 26 | -1 + 1247 | typlen | 21 | -1 + 1247 | typbyval | 16 | -1 + 1247 | typtype | 18 | -1 + 1247 | typcategory | 18 | -1 + 1247 | typispreferred | 16 | -1 + 1247 | typisdefined | 16 | -1 + 1247 | typdelim | 18 | -1 + 1247 | typrelid | 26 | -1 + 1247 | typelem | 26 | -1 + 1247 | typarray | 26 | -1 + 1247 | typinput | 24 | -1 + 1247 | typoutput | 24 | -1 + 1247 | typreceive | 24 | -1 + 1247 | typsend | 24 | -1 + 1247 | typmodin | 24 | -1 + 1247 | typmodout | 24 | -1 + 1247 | typanalyze | 24 | -1 + 1247 | typalign | 18 | -1 + 1247 | typstorage | 18 | -1 + 1247 | typnotnull | 16 | -1 + 1247 | typbasetype | 26 | -1 + 1247 | typtypmod | 23 | -1 + 1247 | typndims | 23 | -1 + 1247 | typcollation | 26 | -1 + 1247 | typdefaultbin | 194 | -1 + 1247 | typdefault | 25 | -1 + 1247 | typacl | 1034 | -1 +(38 rows) +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/pseudo-types.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/pseudo-types.md index 4a0a47e4..7f51e483 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/pseudo-types.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/pseudo-types.md @@ -1,65 +1,65 @@ ---- -title: Pseudo-Types -summary: Pseudo-Types -author: Guo Huan -date: 2021-04-06 ---- - -# Pseudo-Types - -MogDB type system contains a number of special-purpose entries that are collectively called pseudo-types. A pseudo-type cannot be used as a column data type, but it can be used to declare a function's argument or result type. - -Each of the available pseudo-types is useful in situations where a function's behavior does not correspond to simply taking or returning a value of a specific SQL data type. Table 1 lists all pseudo-types. - -**Table 1** Pseudo-types - -| Name | Description | -| :--------------- | :----------------------------------------------------------- | -| any | Indicates that a function accepts any input data type. | -| anyelement | Indicates that a function accepts any data type. | -| anyarray | Indicates that a function accepts any array data type. | -| anynonarray | Indicates that a function accepts any non-array data type. | -| anyenum | Indicates that a function accepts any enum data type. | -| anyrange | Indicates that a function accepts any range data type. | -| cstring | Indicates that a function accepts or returns a null-terminated C string. | -| internal | Indicates that a function accepts or returns a server-internal data type. | -| language_handler | Indicates that a procedural language call handler is declared to return **language_handler**. | -| fdw_handler | Indicates that a foreign-data wrapper handler is declared to return **fdw_handler**. | -| record | Identifies a function returning an unspecified row type. | -| trigger | Indicates that a trigger function is declared to return **trigger**. | -| void | Indicates that a function returns no value. | -| opaque | Indicates an obsolete type name that formerly served all the above purposes. | - -Functions coded in C (whether built in or dynamically loaded) can be declared to accept or return any of these pseudo data types. It is up to the function author to ensure that the function will behave safely when a pseudo-type is used as an argument type. - -Functions coded in procedural languages can use pseudo-types only as allowed by their implementation languages. At present the procedural languages all forbid use of a pseudo-type as argument type, and allow only **void** and **record** as a result type. Some also support polymorphic functions using the **anyelement**, **anyarray**, **anynonarray**, **anyenum**, and **anyrange** types. - -The **internal** pseudo-type is used to declare functions that are meant only to be called internally by the database system, and not by direct invocation in an SQL query. If a function has at least one **internal**-type argument then it cannot be called from SQL. You are advised not to create any function that is declared to return internal unless it has at least one **internal** argument. - -Example: - -```sql --- Create a table. -MogDB=# create table t1 (a int); - --- Insert two data records. -MogDB=# insert into t1 values(1),(2); - --- Create the showall() function: -MogDB=# CREATE OR REPLACE FUNCTION showall() RETURNS SETOF record -AS $$ SELECT count(*) from t1; $$ -LANGUAGE SQL; - --- Invoke the showall() function: -MogDB=# SELECT showall(); - showall ---------- - (2) -(1 row) - --- Delete the function: -MogDB=# DROP FUNCTION showall(); - --- Delete the table. -MogDB=# drop table t1; -``` +--- +title: Pseudo-Types +summary: Pseudo-Types +author: Guo Huan +date: 2021-04-06 +--- + +# Pseudo-Types + +MogDB type system contains a number of special-purpose entries that are collectively called pseudo-types. A pseudo-type cannot be used as a column data type, but it can be used to declare a function's argument or result type. + +Each of the available pseudo-types is useful in situations where a function's behavior does not correspond to simply taking or returning a value of a specific SQL data type. Table 1 lists all pseudo-types. + +**Table 1** Pseudo-types + +| Name | Description | +| :--------------- | :----------------------------------------------------------- | +| any | Indicates that a function accepts any input data type. | +| anyelement | Indicates that a function accepts any data type. | +| anyarray | Indicates that a function accepts any array data type. | +| anynonarray | Indicates that a function accepts any non-array data type. | +| anyenum | Indicates that a function accepts any enum data type. | +| anyrange | Indicates that a function accepts any range data type. | +| cstring | Indicates that a function accepts or returns a null-terminated C string. | +| internal | Indicates that a function accepts or returns a server-internal data type. | +| language_handler | Indicates that a procedural language call handler is declared to return **language_handler**. | +| fdw_handler | Indicates that a foreign-data wrapper handler is declared to return **fdw_handler**. | +| record | Identifies a function returning an unspecified row type. | +| trigger | Indicates that a trigger function is declared to return **trigger**. | +| void | Indicates that a function returns no value. | +| opaque | Indicates an obsolete type name that formerly served all the above purposes. | + +Functions coded in C (whether built in or dynamically loaded) can be declared to accept or return any of these pseudo data types. It is up to the function author to ensure that the function will behave safely when a pseudo-type is used as an argument type. + +Functions coded in procedural languages can use pseudo-types only as allowed by their implementation languages. At present the procedural languages all forbid use of a pseudo-type as argument type, and allow only **void** and **record** as a result type. Some also support polymorphic functions using the **anyelement**, **anyarray**, **anynonarray**, **anyenum**, and **anyrange** types. + +The **internal** pseudo-type is used to declare functions that are meant only to be called internally by the database system, and not by direct invocation in an SQL query. If a function has at least one **internal**-type argument then it cannot be called from SQL. You are advised not to create any function that is declared to return internal unless it has at least one **internal** argument. + +Example: + +```sql +-- Create a table. +MogDB=# create table t1 (a int); + +-- Insert two data records. +MogDB=# insert into t1 values(1),(2); + +-- Create the showall() function: +MogDB=# CREATE OR REPLACE FUNCTION showall() RETURNS SETOF record +AS $$ SELECT count(*) from t1; $$ +LANGUAGE SQL; + +-- Invoke the showall() function: +MogDB=# SELECT showall(); + showall +--------- + (2) +(1 row) + +-- Delete the function: +MogDB=# DROP FUNCTION showall(); + +-- Delete the table. +MogDB=# drop table t1; +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/supported-data-types.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/supported-data-types.md index bc009e46..16c04b95 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/supported-data-types.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/supported-data-types.md @@ -1,31 +1,31 @@ ---- -title: Supported Data Types -summary: Supported Data Types -author: zhang cuiping -date: 2023-04-07 ---- - -# Supported Data Types - -MogDB supports implicit conversions between certain data types. For details, see [PG_CAST](./../system-catalogs-and-system-views/system-catalogs/PG_CAST.md). - -- **[Numeric Data Types](numeric-data-types.md)** -- **[Monetary Types](monetary.md)** -- **[Boolean Data Types](boolean-data-types.md)** -- **[Character Data Types](character-data-types.md)** -- **[Character Data Types](binary-data-types.md)** -- **[Date/Time](date-time-types.md)** -- **[Geometric](geometric.md)** -- **[Network Address Types](network-address.md)** -- **[Bit String Types](bit-string-types.md)** -- **[Text Search Types](text-search-types.md)** -- **[UUID](uuid-type.md)** -- **[JSON/JSONB Types](json-types.md)** -- **[HLL](HLL.md)** -- **[Range](range.md)** -- **[OID Types](object-identifier-types.md)** -- **[Pseudo-Types](pseudo-types.md)** -- **[Data Types Supported by Column-store Tables](data-types-supported-by-column-store-tables.md)** -- **[XML Types](xml-type.md)** -- **[Data Type Used by the Ledger Database](data-type-used-by-the-ledger-database.md)** +--- +title: Supported Data Types +summary: Supported Data Types +author: zhang cuiping +date: 2023-04-07 +--- + +# Supported Data Types + +MogDB supports implicit conversions between certain data types. For details, see [PG_CAST](./../system-catalogs-and-system-views/system-catalogs/PG_CAST.md). + +- **[Numeric Data Types](numeric-data-types.md)** +- **[Monetary Types](monetary.md)** +- **[Boolean Data Types](boolean-data-types.md)** +- **[Character Data Types](character-data-types.md)** +- **[Character Data Types](binary-data-types.md)** +- **[Date/Time](date-time-types.md)** +- **[Geometric](geometric.md)** +- **[Network Address Types](network-address.md)** +- **[Bit String Types](bit-string-types.md)** +- **[Text Search Types](text-search-types.md)** +- **[UUID](uuid-type.md)** +- **[JSON/JSONB Types](json-types.md)** +- **[HLL](HLL.md)** +- **[Range](range.md)** +- **[OID Types](object-identifier-types.md)** +- **[Pseudo-Types](pseudo-types.md)** +- **[Data Types Supported by Column-store Tables](data-types-supported-by-column-store-tables.md)** +- **[XML Types](xml-type.md)** +- **[Data Type Used by the Ledger Database](data-type-used-by-the-ledger-database.md)** - **[SET Type](set-type.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/text-search-types.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/text-search-types.md index 5d796e11..c2f1e305 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/text-search-types.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/text-search-types.md @@ -1,168 +1,168 @@ ---- -title: Text Search Types -summary: Text Search Types -author: Guo Huan -date: 2021-04-06 ---- - -# Text Search Types - -MogDB **tsvector** type represents a document in a form optimized for text search. The **tsquery** type similarly represents a text query. - -## tsvector - -The **tsvector** type represents a retrieval unit, usually a textual column within a row of a database table, or a combination of such columns. A **tsvector** value is a sorted list of distinct lexemes, which are words that have been normalized to merge different variants of the same word. Sorting and deduplication are done automatically during input. The **to_tsvector** function is used to parse and normalize a document string. The **to_tsvector** function is used to parse and normalize a document string. - -A **tsvector** value is a sorted list of distinct lexemes, which are words that have been formatted different entries. During segmentation, **tsvector** automatically performs duplicate-elimination to the entries for input in a certain order. For example: - -```sql -MogDB=# SELECT 'a fat cat sat on a mat and ate a fat rat'::tsvector; - tsvector ----------------------------------------------------- - 'a' 'and' 'ate' 'cat' 'fat' 'mat' 'on' 'rat' 'sat' -(1 row) -``` - -It can be seen from the preceding example that **tsvector** segments a string by spaces, and segmented lexemes are sorted based on their length and alphabetical order. To represent lexemes containing whitespace or punctuation, surround them with quotes: - -```sql -MogDB=# SELECT $$the lexeme ' ' contains spaces$$::tsvector; - tsvector -------------------------------------------- - ' ' 'contains' 'lexeme' 'spaces' 'the' -(1 row) -``` - -Use double dollar signs ($$) to mark entries containing single quotation marks ("). - -```sql -MogDB=# SELECT $$the lexeme 'Joe''s' contains a quote$$::tsvector; - tsvector ------------------------------------------------- - 'Joe''s' 'a' 'contains' 'lexeme' 'quote' 'the' -(1 row) -``` - -Optionally, integer positions can be attached to lexemes: - -```sql -MogDB=# SELECT 'a:1 fat:2 cat:3 sat:4 on:5 a:6 mat:7 and:8 ate:9 a:10 fat:11 rat:12'::tsvector; - tsvector -------------------------------------------------------------------------------- - 'a':1,6,10 'and':8 'ate':9 'cat':3 'fat':2,11 'mat':7 'on':5 'rat':12 'sat':4 -(1 row) -``` - -A position normally indicates the source word's location in the document. Positional information can be used for proximity ranking. Position values range from 1 to 16383. The default maximum value is **16383**. Duplicate positions for the same lexeme are discarded. - -Lexemes that have positions can further be labeled with a weight, which can be **A**, **B**, **C**, or **D**. **D** is the default and hence is not shown on output: - -```sql -MogDB=# SELECT 'a:1A fat:2B,4C cat:5D'::tsvector; - tsvector ----------------------------- - 'a':1A 'cat':5 'fat':2B,4C -(1 row) -``` - -Weights are typically used to reflect document structure, for example, by marking title words differently from body words. Text search ranking functions can assign different priorities to the different weight markers. - -The following example is the standard usage of the **tsvector** type. For example: - -```sql -MogDB=# SELECT 'The Fat Rats'::tsvector; - tsvector --------------------- - 'Fat' 'Rats' 'The' -(1 row) -``` - -For most English-text-searching applications the above words would be considered non-normalized, which should usually be passed through **to_tsvector** to normalize the words appropriately for searching: - -```sql -MogDB=# SELECT to_tsvector('english', 'The Fat Rats'); - to_tsvector ------------------ - 'fat':2 'rat':3 -(1 row) -``` - -## tsquery - -The **tsquery** type represents a retrieval condition. A **tsquery** value stores lexemes that are to be searched for, and combines them honoring the **Boolean** operators **& (AND)**, **| (OR)**, and **! (NOT)**. Parentheses can be used to enforce grouping of the operators. The **to_tsquery** and **plainto_tsquery** functions will normalize lexemes before the lexemes are converted to the **tsquery** type. - -```sql -MogDB=# SELECT 'fat & rat'::tsquery; - tsquery ---------------- - 'fat' & 'rat' -(1 row) - -MogDB=# SELECT 'fat & (rat | cat)'::tsquery; - tsquery ---------------------------- - 'fat' & ( 'rat' | 'cat' ) -(1 row) - -MogDB=# SELECT 'fat & rat & ! cat'::tsquery; - tsquery ------------------------- - 'fat' & 'rat' & !'cat' -(1 row) -``` - -In the absence of parentheses, **! (NOT)** binds most tightly, and **& (AND)** binds more tightly than **| (OR)**. - -Lexemes in a **tsquery** can be labeled with one or more weight letters, which restrict them to match only **tsvector** lexemes with matching weights: - -```sql -MogDB=# SELECT 'fat:ab & cat'::tsquery; - tsquery ------------------- - 'fat':AB & 'cat' -(1 row) -``` - -Also, lexemes in a **tsquery** can be labeled with * to specify prefix matching: - -```sql -MogDB=# SELECT 'super:*'::tsquery; - tsquery ------------ - 'super':* -(1 row) -``` - -This query will match any word in a **tsvector** that begins with "super". - -Note that prefixes are first processed by text search configurations, which means the following example returns true: - -```sql -MogDB=# SELECT to_tsvector( 'postgraduate' ) @@ to_tsquery( 'mogdb:*' ) AS RESULT; - result ----------- - t -(1 row) -``` - -This is because **mogdb** gets stemmed to **postgr**: - -```sql -MogDB=# SELECT to_tsquery('mogdb:*'); - to_tsquery ------------- - 'postgr':* -(1 row) -``` - -It then matches **postgraduate**. - -**'Fat:ab & Cats'** is normalized to the **tsquery** type as follows: - -```sql -MogDB=# SELECT to_tsquery('Fat:ab & Cats'); - to_tsquery ------------------- - 'fat':AB & 'cat' -(1 row) -``` +--- +title: Text Search Types +summary: Text Search Types +author: Guo Huan +date: 2021-04-06 +--- + +# Text Search Types + +MogDB **tsvector** type represents a document in a form optimized for text search. The **tsquery** type similarly represents a text query. + +## tsvector + +The **tsvector** type represents a retrieval unit, usually a textual column within a row of a database table, or a combination of such columns. A **tsvector** value is a sorted list of distinct lexemes, which are words that have been normalized to merge different variants of the same word. Sorting and deduplication are done automatically during input. The **to_tsvector** function is used to parse and normalize a document string. The **to_tsvector** function is used to parse and normalize a document string. + +A **tsvector** value is a sorted list of distinct lexemes, which are words that have been formatted different entries. During segmentation, **tsvector** automatically performs duplicate-elimination to the entries for input in a certain order. For example: + +```sql +MogDB=# SELECT 'a fat cat sat on a mat and ate a fat rat'::tsvector; + tsvector +---------------------------------------------------- + 'a' 'and' 'ate' 'cat' 'fat' 'mat' 'on' 'rat' 'sat' +(1 row) +``` + +It can be seen from the preceding example that **tsvector** segments a string by spaces, and segmented lexemes are sorted based on their length and alphabetical order. To represent lexemes containing whitespace or punctuation, surround them with quotes: + +```sql +MogDB=# SELECT $$the lexeme ' ' contains spaces$$::tsvector; + tsvector +------------------------------------------- + ' ' 'contains' 'lexeme' 'spaces' 'the' +(1 row) +``` + +Use double dollar signs ($$) to mark entries containing single quotation marks ("). + +```sql +MogDB=# SELECT $$the lexeme 'Joe''s' contains a quote$$::tsvector; + tsvector +------------------------------------------------ + 'Joe''s' 'a' 'contains' 'lexeme' 'quote' 'the' +(1 row) +``` + +Optionally, integer positions can be attached to lexemes: + +```sql +MogDB=# SELECT 'a:1 fat:2 cat:3 sat:4 on:5 a:6 mat:7 and:8 ate:9 a:10 fat:11 rat:12'::tsvector; + tsvector +------------------------------------------------------------------------------- + 'a':1,6,10 'and':8 'ate':9 'cat':3 'fat':2,11 'mat':7 'on':5 'rat':12 'sat':4 +(1 row) +``` + +A position normally indicates the source word's location in the document. Positional information can be used for proximity ranking. Position values range from 1 to 16383. The default maximum value is **16383**. Duplicate positions for the same lexeme are discarded. + +Lexemes that have positions can further be labeled with a weight, which can be **A**, **B**, **C**, or **D**. **D** is the default and hence is not shown on output: + +```sql +MogDB=# SELECT 'a:1A fat:2B,4C cat:5D'::tsvector; + tsvector +---------------------------- + 'a':1A 'cat':5 'fat':2B,4C +(1 row) +``` + +Weights are typically used to reflect document structure, for example, by marking title words differently from body words. Text search ranking functions can assign different priorities to the different weight markers. + +The following example is the standard usage of the **tsvector** type. For example: + +```sql +MogDB=# SELECT 'The Fat Rats'::tsvector; + tsvector +-------------------- + 'Fat' 'Rats' 'The' +(1 row) +``` + +For most English-text-searching applications the above words would be considered non-normalized, which should usually be passed through **to_tsvector** to normalize the words appropriately for searching: + +```sql +MogDB=# SELECT to_tsvector('english', 'The Fat Rats'); + to_tsvector +----------------- + 'fat':2 'rat':3 +(1 row) +``` + +## tsquery + +The **tsquery** type represents a retrieval condition. A **tsquery** value stores lexemes that are to be searched for, and combines them honoring the **Boolean** operators **& (AND)**, **| (OR)**, and **! (NOT)**. Parentheses can be used to enforce grouping of the operators. The **to_tsquery** and **plainto_tsquery** functions will normalize lexemes before the lexemes are converted to the **tsquery** type. + +```sql +MogDB=# SELECT 'fat & rat'::tsquery; + tsquery +--------------- + 'fat' & 'rat' +(1 row) + +MogDB=# SELECT 'fat & (rat | cat)'::tsquery; + tsquery +--------------------------- + 'fat' & ( 'rat' | 'cat' ) +(1 row) + +MogDB=# SELECT 'fat & rat & ! cat'::tsquery; + tsquery +------------------------ + 'fat' & 'rat' & !'cat' +(1 row) +``` + +In the absence of parentheses, **! (NOT)** binds most tightly, and **& (AND)** binds more tightly than **| (OR)**. + +Lexemes in a **tsquery** can be labeled with one or more weight letters, which restrict them to match only **tsvector** lexemes with matching weights: + +```sql +MogDB=# SELECT 'fat:ab & cat'::tsquery; + tsquery +------------------ + 'fat':AB & 'cat' +(1 row) +``` + +Also, lexemes in a **tsquery** can be labeled with * to specify prefix matching: + +```sql +MogDB=# SELECT 'super:*'::tsquery; + tsquery +----------- + 'super':* +(1 row) +``` + +This query will match any word in a **tsvector** that begins with "super". + +Note that prefixes are first processed by text search configurations, which means the following example returns true: + +```sql +MogDB=# SELECT to_tsvector( 'postgraduate' ) @@ to_tsquery( 'mogdb:*' ) AS RESULT; + result +---------- + t +(1 row) +``` + +This is because **mogdb** gets stemmed to **postgr**: + +```sql +MogDB=# SELECT to_tsquery('mogdb:*'); + to_tsquery +------------ + 'postgr':* +(1 row) +``` + +It then matches **postgraduate**. + +**'Fat:ab & Cats'** is normalized to the **tsquery** type as follows: + +```sql +MogDB=# SELECT to_tsquery('Fat:ab & Cats'); + to_tsquery +------------------ + 'fat':AB & 'cat' +(1 row) +``` diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/uuid-type.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/uuid-type.md index 20b2399e..0ae5e8fa 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/uuid-type.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/uuid-type.md @@ -1,27 +1,27 @@ ---- -title: UUID Type -summary: UUID Type -author: Guo Huan -date: 2021-04-06 ---- - -# UUID Type - -The data type **UUID** stores Universally Unique Identifiers (UUID) as defined by RFC 4122, ISO/IEF 9834-8:2005, and related standards. This identifier is a 128-bit quantity that is generated by an algorithm chosen to make it very unlikely that the same identifier will be generated by anyone else in the known universe using the same algorithm. - -A UUID is written as a sequence of lower-case hexadecimal digits, in several groups separated by hyphens, specifically a group of 8 digits followed by three groups of 4 digits followed by a group of 12 digits, for a total of 32 digits representing the 128 bits. An example of a UUID in this standard form is: - -```sql -a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 -``` - -MogDB also accepts the following alternative forms for input: use of upper-case letters and digits, the standard format surrounded by braces, omitting some or all hyphens, adding a hyphen after any group of four digits. An example is provided as follows: - -```sql -A0EEBC99-9C0B-4EF8-BB6D-6BB9BD380A11 -{a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11} -a0eebc999c0b4ef8bb6d6bb9bd380a11 -a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11 -``` - -Output is always in the standard form. +--- +title: UUID Type +summary: UUID Type +author: Guo Huan +date: 2021-04-06 +--- + +# UUID Type + +The data type **UUID** stores Universally Unique Identifiers (UUID) as defined by RFC 4122, ISO/IEF 9834-8:2005, and related standards. This identifier is a 128-bit quantity that is generated by an algorithm chosen to make it very unlikely that the same identifier will be generated by anyone else in the known universe using the same algorithm. + +A UUID is written as a sequence of lower-case hexadecimal digits, in several groups separated by hyphens, specifically a group of 8 digits followed by three groups of 4 digits followed by a group of 12 digits, for a total of 32 digits representing the 128 bits. An example of a UUID in this standard form is: + +```sql +a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 +``` + +MogDB also accepts the following alternative forms for input: use of upper-case letters and digits, the standard format surrounded by braces, omitting some or all hyphens, adding a hyphen after any group of four digits. An example is provided as follows: + +```sql +A0EEBC99-9C0B-4EF8-BB6D-6BB9BD380A11 +{a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11} +a0eebc999c0b4ef8bb6d6bb9bd380a11 +a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11 +``` + +Output is always in the standard form. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/xml-type.md b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/xml-type.md index 9615f8d8..713a83aa 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/xml-type.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/supported-data-types/xml-type.md @@ -1,69 +1,69 @@ ---- -title: XML Type -summary: XML Type -author: Guo Huan -date: 2021-04-06 ---- - -# XML Type - -XML data types can be used to store XML data. Its internal format is the same as the TEXT type. Its advantages over storing XML data directly in a TEXT domain are that it will use the processing ability of LIBXML2 for XML formatted text, check whether the structure of the input value conforms to the XML standard, and provide support functions based on LIBXML2 to perform type-safe operations on it. - -XML types can store well-formed “documents” and “content” fragments defined by XML standards. They are defined by referring to the broader “DOCUMENT NODE” XQUERY and XPATH data models. Basically, this means that there can be more than one top-level element or character node in the content fragment. The expression XMLVALUE IS DOCUMENT can be used to evaluate whether a specific XML value is a complete document or just a document fragment. - -The XML parser converts XML documents into XML DOM objects. DOM (DOCUMENT OBJECT MODEL document object model) defines the standard methods for accessing and manipulating documents. XML DOM (XML DOCUMENT OBJECT MODEL) defines the standard methods for accessing and manipulating XML documents. The XML DOM views the XML document as a tree structure. All elements can be accessed through the DOM tree. You can modify or delete their contents and create new elements. Elements, their text, and their attributes are considered nodes. - -The XML underlying uses the same data structure as the text type for storage, with a maximum of 1GB. - -Example: - -```xml -MogDB= CREATE TABLE xmltest (id int, data xml); -MogDB= INSERT INTO xmltest VALUES (1, 'one'); -MogDB= INSERT INTO xmltest VALUES (2, 'two'); -MogDB= SELECT * FROM xmltest ORDER BY 1; - id | data -----+-------------------- -1 | one -2 | two -(2 rows) -MogDB= SELECT xmlconcat(xmlcomment('hello'), - xmlelement(NAME qux, 'xml'), - xmlcomment('world')); - xmlconcat ----------------------------------------- - xml -(1 row) -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - The following operations are not supported for XML types: -> - Logical expressions and, or, not. -> - As partition key, secondary partition key, foreign key, primary key and unique constraint. -> - XML-related implicit type conversion (XML type can be implicitly converted to character type during assignment). -> - Array expression, row expression, sub-query expression [not] in/any/all. -> - Create common indexes, unique indexes, global indexes, local indexes, and partial indexes on XML type fields. -> - Compare expressions>,<,>=,<=,=,<>,!=, ^= between and、is distinct from、is not distinct from、<=>。 -> - Conditional expressions decode, nullif, greatest, least. -> - As the distinct/group by/order by parameter. -> - Aggregate functions sum, max, min, avg, listagg, corr, covar_pop、covar_samp、stddev、stddev_pop、stddev_samp、var_pop、var_samp、variance、bit_and、bit_or、bool_and、bool_or、every、regr_avgx、regr_avgy、regr_count、regr_intercept、regr_r2、regr_slope、regr_sxx、regr_sxy、regr_syy。 -> - ODBC related binding parameter transfer interface is not supported. -> - XML types support the following operations: -> - Physical backup recovery. -> - The comparison expression is null, is not null. -> - Conditional expressions case, coalesce. -> - Subquery expression [not] exists. -> - Global temporary tables and local temporary tables. -> - cast type. -> - Expression index. -> - The value of XML type is determined according to the “xml option” session configuration parameter. -> - Support gs_dump export and gs_restore import operation. -> - Parallel query, supporting astore and ustore storage engines. -> - As input parameter, output parameter, user-defined variable and return value of user-defined function. -> - As input parameter, output parameter, user-defined variable and return value of stored procedure. Stored procedures that support autonomous transactions. -> - Character processing function quote_literal(value anyelement)、quote_nullable(value anyelement)。 -> - Aggregate functions count, array_agg, checksum (need to be explicitly converted to character type), string_agg (needs to be explicitly converted to character type). -> - The addition, deletion, modification and query of the compound type in the user-defined type when it involves the XML type, and the same as the XML field in the ordinary table, it needs to be inserted and modified according to the XML syntax. -> - Support JDBC and ODBC to operate on XML data types, select, update, insert, delete the fields, input XML values using SQL syntax, and obtain XML values using the getSQLXML method of the ResultSet class. It supports the JDBC-related binding parameter transfer interface, the setSQLXML method in the PreparedStatement preprocessing statement interface, and the getSQLXML (int columnIndex) method in the ResultSet execution result set interface. +--- +title: XML Type +summary: XML Type +author: Guo Huan +date: 2021-04-06 +--- + +# XML Type + +XML data types can be used to store XML data. Its internal format is the same as the TEXT type. Its advantages over storing XML data directly in a TEXT domain are that it will use the processing ability of LIBXML2 for XML formatted text, check whether the structure of the input value conforms to the XML standard, and provide support functions based on LIBXML2 to perform type-safe operations on it. + +XML types can store well-formed “documents” and “content” fragments defined by XML standards. They are defined by referring to the broader “DOCUMENT NODE” XQUERY and XPATH data models. Basically, this means that there can be more than one top-level element or character node in the content fragment. The expression XMLVALUE IS DOCUMENT can be used to evaluate whether a specific XML value is a complete document or just a document fragment. + +The XML parser converts XML documents into XML DOM objects. DOM (DOCUMENT OBJECT MODEL document object model) defines the standard methods for accessing and manipulating documents. XML DOM (XML DOCUMENT OBJECT MODEL) defines the standard methods for accessing and manipulating XML documents. The XML DOM views the XML document as a tree structure. All elements can be accessed through the DOM tree. You can modify or delete their contents and create new elements. Elements, their text, and their attributes are considered nodes. + +The XML underlying uses the same data structure as the text type for storage, with a maximum of 1GB. + +Example: + +```xml +MogDB= CREATE TABLE xmltest (id int, data xml); +MogDB= INSERT INTO xmltest VALUES (1, 'one'); +MogDB= INSERT INTO xmltest VALUES (2, 'two'); +MogDB= SELECT * FROM xmltest ORDER BY 1; + id | data +----+-------------------- +1 | one +2 | two +(2 rows) +MogDB= SELECT xmlconcat(xmlcomment('hello'), + xmlelement(NAME qux, 'xml'), + xmlcomment('world')); + xmlconcat +---------------------------------------- + xml +(1 row) +``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> +> - The following operations are not supported for XML types: +> - Logical expressions and, or, not. +> - As partition key, secondary partition key, foreign key, primary key and unique constraint. +> - XML-related implicit type conversion (XML type can be implicitly converted to character type during assignment). +> - Array expression, row expression, sub-query expression [not] in/any/all. +> - Create common indexes, unique indexes, global indexes, local indexes, and partial indexes on XML type fields. +> - Compare expressions>,<,>=,<=,=,<>,!=, ^= between and、is distinct from、is not distinct from、<=>。 +> - Conditional expressions decode, nullif, greatest, least. +> - As the distinct/group by/order by parameter. +> - Aggregate functions sum, max, min, avg, listagg, corr, covar_pop、covar_samp、stddev、stddev_pop、stddev_samp、var_pop、var_samp、variance、bit_and、bit_or、bool_and、bool_or、every、regr_avgx、regr_avgy、regr_count、regr_intercept、regr_r2、regr_slope、regr_sxx、regr_sxy、regr_syy。 +> - ODBC related binding parameter transfer interface is not supported. +> - XML types support the following operations: +> - Physical backup recovery. +> - The comparison expression is null, is not null. +> - Conditional expressions case, coalesce. +> - Subquery expression [not] exists. +> - Global temporary tables and local temporary tables. +> - cast type. +> - Expression index. +> - The value of XML type is determined according to the “xml option” session configuration parameter. +> - Support gs_dump export and gs_restore import operation. +> - Parallel query, supporting astore and ustore storage engines. +> - As input parameter, output parameter, user-defined variable and return value of user-defined function. +> - As input parameter, output parameter, user-defined variable and return value of stored procedure. Stored procedures that support autonomous transactions. +> - Character processing function quote_literal(value anyelement)、quote_nullable(value anyelement)。 +> - Aggregate functions count, array_agg, checksum (need to be explicitly converted to character type), string_agg (needs to be explicitly converted to character type). +> - The addition, deletion, modification and query of the compound type in the user-defined type when it involves the XML type, and the same as the XML field in the ordinary table, it needs to be inserted and modified according to the XML syntax. +> - Support JDBC and ODBC to operate on XML data types, select, update, insert, delete the fields, input XML values using SQL syntax, and obtain XML values using the getSQLXML method of the ResultSet class. It supports the JDBC-related binding parameter transfer interface, the setSQLXML method in the PreparedStatement preprocessing statement interface, and the getSQLXML (int columnIndex) method in the ResultSet execution result set interface. > - Call process: You need to use the java.sql.SQLXML interface class to construct an XML object, then set the specified object type to Oid.XML, and then send the type ID and XML value to the server. After obtaining the returned result from the server, you first call ResultSet.getString, and then use the java.sql.SQLXML interface class to construct an XML object through the obtained character string. At this time, you will check again whether the content conforms to the XML standard format. Therefore, you can also use ResultSet. getString to directly obtain the XML string object. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/overview-of-system-catalogs-and-system-views.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/overview-of-system-catalogs-and-system-views.md index 318172d5..b7e64bbf 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/overview-of-system-catalogs-and-system-views.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/overview-of-system-catalogs-and-system-views.md @@ -1,18 +1,18 @@ ---- -title: Overview of System Catalogs and System Views -summary: Overview of System Catalogs and System Views -author: Guo Huan -date: 2021-04-19 ---- - -# Overview of System Catalogs and System Views - -System catalogs store structured metadata of MogDB. They are the source of information used by MogDB to control system running and are a core component of the database system. - -System views provide ways to query the system catalogs and internal database status. - -System catalogs and system views are visible to either system administrators or all users. Some system catalogs and views have marked the need of administrator permissions, so they are accessible only to administrators. - -You can delete and re-create system catalogs, add columns to them, and insert and update values in them, but doing so may make system information inconsistent and cause system faults. Generally, users should not modify system catalogs or system views, or rename their schemas. They are automatically maintained by the system. - +--- +title: Overview of System Catalogs and System Views +summary: Overview of System Catalogs and System Views +author: Guo Huan +date: 2021-04-19 +--- + +# Overview of System Catalogs and System Views + +System catalogs store structured metadata of MogDB. They are the source of information used by MogDB to control system running and are a core component of the database system. + +System views provide ways to query the system catalogs and internal database status. + +System catalogs and system views are visible to either system administrators or all users. Some system catalogs and views have marked the need of administrator permissions, so they are accessible only to administrators. + +You can delete and re-create system catalogs, add columns to them, and insert and update values in them, but doing so may make system information inconsistent and cause system faults. Generally, users should not modify system catalogs or system views, or rename their schemas. They are automatically maintained by the system. + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** Do not add, delete, or modify system catalogs because doing so will result in exceptions or even MogDB unavailability. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs-and-system-views.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs-and-system-views.md index 763a29bd..a63ac735 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs-and-system-views.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs-and-system-views.md @@ -1,15 +1,15 @@ ---- -title: System Catalogs and System Views -summary: System Catalogs and System Views -author: zhang cuiping -date: 2023-04-07 ---- - -# System Catalogs and System Views - -- **[Overview of System Catalogs and System Views](overview-of-system-catalogs-and-system-views.md)** -- **[Querying a System Catalog](viewing-system-catalogs.md)** - -- **[System Catalogs](./system-catalogs/system-catalogs.md)** - +--- +title: System Catalogs and System Views +summary: System Catalogs and System Views +author: zhang cuiping +date: 2023-04-07 +--- + +# System Catalogs and System Views + +- **[Overview of System Catalogs and System Views](overview-of-system-catalogs-and-system-views.md)** +- **[Querying a System Catalog](viewing-system-catalogs.md)** + +- **[System Catalogs](./system-catalogs/system-catalogs.md)** + - **[System Views](./system-views/system-views.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_TXN_SNAPSHOT.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_TXN_SNAPSHOT.md index 8973ffd4..a9cac9b1 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_TXN_SNAPSHOT.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_TXN_SNAPSHOT.md @@ -1,19 +1,19 @@ ---- -title: GS_TXN_SNAPSHOT -summary: GS_TXN_SNAPSHOT -author: Zhang Cuiping -date: 2021-10-11 ---- - -# GS_TXN_SNAPSHOT - -**GS_TXN_SNAPSHOT** is a timestamp-CSN mapping table. It periodically samples and maintains an appropriate time range to estimate the CSN value corresponding to the timestamp in the range. - -**Table 1** GS_TXN_SNAPSHOT columns - -| **Name** | **Data Type** | **Description** | -| :---------- | :------------------------- | :-------------------------------------- | -| snptime | timestamp with time zonetz | Snapshot time. | -| snpxmin | bigint | Minimum transaction ID snapshots. | -| snpcsn | bigint | Commit sequence number (CSN) snapshots. | +--- +title: GS_TXN_SNAPSHOT +summary: GS_TXN_SNAPSHOT +author: Zhang Cuiping +date: 2021-10-11 +--- + +# GS_TXN_SNAPSHOT + +**GS_TXN_SNAPSHOT** is a timestamp-CSN mapping table. It periodically samples and maintains an appropriate time range to estimate the CSN value corresponding to the timestamp in the range. + +**Table 1** GS_TXN_SNAPSHOT columns + +| **Name** | **Data Type** | **Description** | +| :---------- | :------------------------- | :-------------------------------------- | +| snptime | timestamp with time zonetz | Snapshot time. | +| snpxmin | bigint | Minimum transaction ID snapshots. | +| snpcsn | bigint | Commit sequence number (CSN) snapshots. | | snpsnapshot | text | Serialized snapshot text. | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_EVENT_TRIGGER.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_EVENT_TRIGGER.md index 5c215525..cf181bba 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_EVENT_TRIGGER.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_EVENT_TRIGGER.md @@ -1,21 +1,21 @@ ---- -title: PG_EVENT_TRIGGER -summary: PG_EVENT_TRIGGER -author: zhang cuiping -date: 2023-04-07 ---- - -# PG_EVENT_TRIGGER - -PG_EVENT_TRIGGER records information about each event trigger. - -**Table 1** PG_EVENT_TRIGGER columns - -| Name | Type | Reference | Description | -| :--------- | :----- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| evtname | name | - | Trigger name, which must be unique. | -| evtevent | name | [-](https://docs.opengauss.org/en/docs/5.0.0/docs/DatabaseReference/pg_type.html) | Identifies the event for which this trigger fires. | -| evtowner | oid | pg_authid.oid | Owner of the event trigger. | -| evtfoid | oid | pg_proc.oid | Function to be called. | -| evtenabled | char | - | Controls in which the session replication role modes the event trigger fires. **O**: Trigger fires in "origin" and "local" modes. **D**: Trigger is disabled. **R**: Trigger fires in "replica" mode. **A**: Trigger fires always. | +--- +title: PG_EVENT_TRIGGER +summary: PG_EVENT_TRIGGER +author: zhang cuiping +date: 2023-04-07 +--- + +# PG_EVENT_TRIGGER + +PG_EVENT_TRIGGER records information about each event trigger. + +**Table 1** PG_EVENT_TRIGGER columns + +| Name | Type | Reference | Description | +| :--------- | :----- | :----------------------------------------------------------- | :----------------------------------------------------------- | +| evtname | name | - | Trigger name, which must be unique. | +| evtevent | name | [-](https://docs.opengauss.org/en/docs/5.0.0/docs/DatabaseReference/pg_type.html) | Identifies the event for which this trigger fires. | +| evtowner | oid | pg_authid.oid | Owner of the event trigger. | +| evtfoid | oid | pg_proc.oid | Function to be called. | +| evtenabled | char | - | Controls in which the session replication role modes the event trigger fires. **O**: Trigger fires in "origin" and "local" modes. **D**: Trigger is disabled. **R**: Trigger fires in "replica" mode. **A**: Trigger fires always. | | evttags | text[] | - | Command tags for which this trigger will fire. If NULL, the firing of this trigger is not restricted by the command tag. | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SUBSCRIPTION_REL.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SUBSCRIPTION_REL.md index a4741bb7..36ee7bc1 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SUBSCRIPTION_REL.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SUBSCRIPTION_REL.md @@ -1,22 +1,22 @@ ---- -title: PG_SUBSCRIPTION_REL -summary: PG_SUBSCRIPTION_REL -author: zhang cuiping -date: 2023-04-07 ---- - -# PG_SUBSCRIPTION_REL - -The system catalog PG_SUBSCRIPTION_REL contains the status of each replicated table in each subscription. It is a many-to-many mapping. - -This system catalog contains only tables known to the subscription after running [CREATE SUBSCRIPTION](../../sql-syntax/CREATE-SUBSCRIPTION.md) or [ALTER SUBSCRIPTION … REFRESH PUBLICATION](../../sql-syntax/ALTER-SUBSCRIPTION.md). - -**Table 1** PG_SUBSCRIPTION_REL columns - -| Name | Type | Description | -| :--------- | :--- | :----------------------------------------------------------- | -| srsubid | oid | Identifier of the subscription. | -| srrelid | oid | Subscription relationship ID. | -| srsubstate | char | Subscription status.
- **i**: Initialization.
- **d**: Basic data being replicated.
- **f**: Basic data replication completed.
- **s**: Progress synchronized with incremental replication.
- **r**: Incremental replication ready. | -| srcsn | int8 | Snapshot CSN during basic data replication. | +--- +title: PG_SUBSCRIPTION_REL +summary: PG_SUBSCRIPTION_REL +author: zhang cuiping +date: 2023-04-07 +--- + +# PG_SUBSCRIPTION_REL + +The system catalog PG_SUBSCRIPTION_REL contains the status of each replicated table in each subscription. It is a many-to-many mapping. + +This system catalog contains only tables known to the subscription after running [CREATE SUBSCRIPTION](../../sql-syntax/CREATE-SUBSCRIPTION.md) or [ALTER SUBSCRIPTION … REFRESH PUBLICATION](../../sql-syntax/ALTER-SUBSCRIPTION.md). + +**Table 1** PG_SUBSCRIPTION_REL columns + +| Name | Type | Description | +| :--------- | :--- | :----------------------------------------------------------- | +| srsubid | oid | Identifier of the subscription. | +| srrelid | oid | Subscription relationship ID. | +| srsubstate | char | Subscription status.
- **i**: Initialization.
- **d**: Basic data being replicated.
- **f**: Basic data replication completed.
- **s**: Progress synchronized with incremental replication.
- **r**: Incremental replication ready. | +| srcsn | int8 | Snapshot CSN during basic data replication. | | srsublsn | text | Remote LSN used to synchronize the incremental replication progress in the **s** or **r** state. Otherwise, the value is null. | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TYPE.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TYPE.md index 584e53d7..613b27d1 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TYPE.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TYPE.md @@ -1,46 +1,46 @@ ---- -title: PG_TYPE -summary: PG_TYPE -author: Guo Huan -date: 2021-04-19 ---- - -# PG_TYPE - -**PG_TYPE** stores information about data types. - -**Table 1** PG_TYPE columns - -| Name | Type | Description | -| :------------- | :----------- | :----------------------------------------------------------- | -| oid | oid | Row identifier (hidden attribute, which must be specified) | -| typname | name | Data type name | -| typnamespace | oid | OID of the namespace that contains the type | -| typowner | oid | Owner of the type | -| typlen | smallint | Number of bytes in the internal representation of the type for a fixed-size type. It is a negative number for a variable-length type.
- The value **-1** indicates a "varlena" type (one that has a length word).
- The value **-2** indicates a null-terminated C string. | -| typbyval | boolean | Whether the value of this type is passed by a parameter or reference of this column. **typbyval** is **false** if the type of **typlen** is not **1**, **2**, **4**, or **8**, because values of this type are always passed by reference of this column. **typbyval** can be **false** even if the **typlen** is passed by a parameter of this column. | -| typtype | "char" | - **b**: base type.
- **c**: composite type (for example, a table's row type)
- **d**: domain
- **p**: pseudo
- **r**: indicates a range type.
- **e**: indicates an enumeration type.
- **u**: indicates an undefined type.
- **o**: indicates a set type.
For details, see **typrelid** and **typbasetype**. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typcategory | "char" | Specifies an arbitrary classification of data types that is used by the parser for data conversion. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typispreferred | boolean | True if conversion is performed when data meets conversion rules specified by TYPCATEGORY. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typisdefined | boolean | Whether a type has been defined. It is **true** if the type is defined, and **false** if this is a placeholder entry for a not-yet-defined type. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typdelim | "char" | Character that separates two values of this type when parsing an array input. Note that the delimiter is associated with the array element data type, not the array data type. | -| typrelid | oid | Points to the pg_class row that defines the corresponding table if this is a composite type (see typtype). For a free-standing composite type, the pg_class entry does not represent a table, but it is required for the type's pg_attribute entries to link to. The value is 0 for non-composite type. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typelem | oid | Identifies another row in pg_type if this is not 0. The current type can be subscripted like an array yielding values of type typelem. A "true" array type has a variable length (**typlen = –1**), but some fixed-length types (**typlen > 0**) also have non-zero **typelem**, for example **name** and **point**. If a fixed-length type has a **typelem**, its internal representation must be some number of values of the **typelem** data type with no other data. Variable-length array types have a header defined by the array subroutines. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typarray | oid | Specifies that the corresponding type row is available in pg_type if this is not 0. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typinput | regproc | Specifies the input conversion function (text format). When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typoutput | regproc | Specifies the output conversion function (text format). When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typreceive | regproc | Specifies the input conversion function (binary format), or 0 if none. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typsend | regproc | Specifies the output conversion function (binary format), or 0 if none. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typmodin | regproc | Specifies the type modifier input function, or 0 if the type does not support modifiers. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typmodout | regproc | Specifies the type modifier output function, or 0 if the type does not support modifiers. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typanalyze | regproc | Specifies the custom ANALYZE function, or 0 if the standard function is used. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typalign | "char" | Specifies the alignment required when storing a value of this type. It applies to storage on disks as well as most representations of the value inside openGauss. When multiple values are stored consecutively, such as in the representation of a complete row on disk, padding is inserted before a data of this type so that it begins on the specified boundary. The alignment reference is the beginning of the first data in the sequence. Possible values are as follows:
- **c**: char alignment, that is, no alignment needed
- **s**: short alignment (2 bytes on most machines)
- **i**: integer alignment (4 bytes on most machines)
- **d**: double alignment (8 bytes on many machines, but by no means all)
NOTICE:
For types used in system catalogs, the size and alignment defined in pg_type must agree with the way that the compiler lays out the column in a structure representing a table row. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | -| typstorage | "char" | Tells for variable-length types (those with typlen = -1) if the type is prepared for dealing with irregular values and what the default strategy for attributes of this type should be. Possible values include:
- **p**: Values are always stored plain.
- **e**: Value can be stored in a secondary relationship (if the relation has one, see **pg_class.reltoastrelid**).
- **m**: Values can be stored compressed inline.
- **x**: Values can be stored compressed inline or stored in secondary storage.
NOTICE:
**m** domains can also be moved out to secondary storage, but only as a last resort (**e** and **x** domains are moved first). | -| typenotnull | boolean | Whether the type has a NOTNULL constraint. Currently, it is used for domains only. | -| typbasetype | oid | If this is a domain (see **typtype**), then **typbasetype** identifies the type that this one is based on. The value is **0** if this type is not a derived type. | -| typtypmod | integer | Records the **typtypmod** to be applied to domains' base types by domains (the value is **-1** if the base type does not use **typmod**). This is **-1** if this type is not a domain. | -| typndims | integer | Number of array dimensions for a domain that is an array (**typbasetype** is an array type; the domain's **typelem** matches the base type's **typelem**). This is **0** for types other than domains over array types. | -| typcollation | oid | Sequence rule for specified types (**0** if sequencing is not supported) | -| typdefaultbin | pg_node_tree | **nodeToString()** representation of a default expression for the type if the value is non-null. Currently, this column is only used for domains. | -| typdefault | text | The value is **NULL** if a type has no associated default value.
- If **typdefaultbin** is not **NULL**, **typdefault** must contain a default expression represented by **typdefaultbin**.
- If **typdefaultbin** is **NULL** and **typdefault** is not, then **typdefault** is the external representation of the type's default value, which can be fed to the type's input converter to produce a constant. | -| typacl | aclitem[] | Access permission. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +--- +title: PG_TYPE +summary: PG_TYPE +author: Guo Huan +date: 2021-04-19 +--- + +# PG_TYPE + +**PG_TYPE** stores information about data types. + +**Table 1** PG_TYPE columns + +| Name | Type | Description | +| :------------- | :----------- | :----------------------------------------------------------- | +| oid | oid | Row identifier (hidden attribute, which must be specified) | +| typname | name | Data type name | +| typnamespace | oid | OID of the namespace that contains the type | +| typowner | oid | Owner of the type | +| typlen | smallint | Number of bytes in the internal representation of the type for a fixed-size type. It is a negative number for a variable-length type.
- The value **-1** indicates a "varlena" type (one that has a length word).
- The value **-2** indicates a null-terminated C string. | +| typbyval | boolean | Whether the value of this type is passed by a parameter or reference of this column. **typbyval** is **false** if the type of **typlen** is not **1**, **2**, **4**, or **8**, because values of this type are always passed by reference of this column. **typbyval** can be **false** even if the **typlen** is passed by a parameter of this column. | +| typtype | "char" | - **b**: base type.
- **c**: composite type (for example, a table's row type)
- **d**: domain
- **p**: pseudo
- **r**: indicates a range type.
- **e**: indicates an enumeration type.
- **u**: indicates an undefined type.
- **o**: indicates a set type.
For details, see **typrelid** and **typbasetype**. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typcategory | "char" | Specifies an arbitrary classification of data types that is used by the parser for data conversion. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typispreferred | boolean | True if conversion is performed when data meets conversion rules specified by TYPCATEGORY. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typisdefined | boolean | Whether a type has been defined. It is **true** if the type is defined, and **false** if this is a placeholder entry for a not-yet-defined type. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typdelim | "char" | Character that separates two values of this type when parsing an array input. Note that the delimiter is associated with the array element data type, not the array data type. | +| typrelid | oid | Points to the pg_class row that defines the corresponding table if this is a composite type (see typtype). For a free-standing composite type, the pg_class entry does not represent a table, but it is required for the type's pg_attribute entries to link to. The value is 0 for non-composite type. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typelem | oid | Identifies another row in pg_type if this is not 0. The current type can be subscripted like an array yielding values of type typelem. A "true" array type has a variable length (**typlen = –1**), but some fixed-length types (**typlen > 0**) also have non-zero **typelem**, for example **name** and **point**. If a fixed-length type has a **typelem**, its internal representation must be some number of values of the **typelem** data type with no other data. Variable-length array types have a header defined by the array subroutines. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typarray | oid | Specifies that the corresponding type row is available in pg_type if this is not 0. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typinput | regproc | Specifies the input conversion function (text format). When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typoutput | regproc | Specifies the output conversion function (text format). When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typreceive | regproc | Specifies the input conversion function (binary format), or 0 if none. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typsend | regproc | Specifies the output conversion function (binary format), or 0 if none. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typmodin | regproc | Specifies the type modifier input function, or 0 if the type does not support modifiers. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typmodout | regproc | Specifies the type modifier output function, or 0 if the type does not support modifiers. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typanalyze | regproc | Specifies the custom ANALYZE function, or 0 if the standard function is used. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typalign | "char" | Specifies the alignment required when storing a value of this type. It applies to storage on disks as well as most representations of the value inside openGauss. When multiple values are stored consecutively, such as in the representation of a complete row on disk, padding is inserted before a data of this type so that it begins on the specified boundary. The alignment reference is the beginning of the first data in the sequence. Possible values are as follows:
- **c**: char alignment, that is, no alignment needed
- **s**: short alignment (2 bytes on most machines)
- **i**: integer alignment (4 bytes on most machines)
- **d**: double alignment (8 bytes on many machines, but by no means all)
NOTICE:
For types used in system catalogs, the size and alignment defined in pg_type must agree with the way that the compiler lays out the column in a structure representing a table row. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | +| typstorage | "char" | Tells for variable-length types (those with typlen = -1) if the type is prepared for dealing with irregular values and what the default strategy for attributes of this type should be. Possible values include:
- **p**: Values are always stored plain.
- **e**: Value can be stored in a secondary relationship (if the relation has one, see **pg_class.reltoastrelid**).
- **m**: Values can be stored compressed inline.
- **x**: Values can be stored compressed inline or stored in secondary storage.
NOTICE:
**m** domains can also be moved out to secondary storage, but only as a last resort (**e** and **x** domains are moved first). | +| typenotnull | boolean | Whether the type has a NOTNULL constraint. Currently, it is used for domains only. | +| typbasetype | oid | If this is a domain (see **typtype**), then **typbasetype** identifies the type that this one is based on. The value is **0** if this type is not a derived type. | +| typtypmod | integer | Records the **typtypmod** to be applied to domains' base types by domains (the value is **-1** if the base type does not use **typmod**). This is **-1** if this type is not a domain. | +| typndims | integer | Number of array dimensions for a domain that is an array (**typbasetype** is an array type; the domain's **typelem** matches the base type's **typelem**). This is **0** for types other than domains over array types. | +| typcollation | oid | Sequence rule for specified types (**0** if sequencing is not supported) | +| typdefaultbin | pg_node_tree | **nodeToString()** representation of a default expression for the type if the value is non-null. Currently, this column is only used for domains. | +| typdefault | text | The value is **NULL** if a type has no associated default value.
- If **typdefaultbin** is not **NULL**, **typdefault** must contain a default expression represented by **typdefaultbin**.
- If **typdefaultbin** is **NULL** and **typdefault** is not, then **typdefault** is the external representation of the type's default value, which can be fed to the type's input converter to produce a constant. | +| typacl | aclitem[] | Access permission. When it is **false**, nothing except the type name, namespace, and OID can be relied on. | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/STATEMENT_HISTORY.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/STATEMENT_HISTORY.md index a923c3af..9dd7df24 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/STATEMENT_HISTORY.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/STATEMENT_HISTORY.md @@ -71,7 +71,3 @@ The constraints on the query of this system catalog are as follows: | details | bytea | List of wait events and statement lock events.
When the value of the record level is L0, the wait events starts to be recorded to the list. Statistics about the wait event of the current statement are displayed. For details about events, see [PG_THREAD_WAIT_STATUS](../system-views/PG_THREAD_WAIT_STATUS.md).
When the record level is L2, the statement lock events starts to be recorded to the list. The events are recorded in time sequence. The number of records is affected by the track\_stmt\_details\_size parameter.
Events include:
- Start locking.
- Complete locking.
- Start lock waiting.
- Complete lock waiting.
- Start unlocking.
- Complete unlocking.
- Start lightweight lock waiting.
- Complete lightweight lock waiting. | L2 | | is_slow_sql | boolean | Whether the SQL statement is a slow SQL statement.
- **t** (true): yes
- **f** (false): no | L0 | | trace_id | text | Driver-specific trace ID, which is associated with an application request. | L0 | - -> **Note**: -> -> To facilitate cleanup of this table, the [TRUNCATE](../../sql-syntax/TRUNCATE.md) command is allowed to empty the table. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/system-catalogs.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/system-catalogs.md index 14fba2e2..16b2bd2b 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/system-catalogs.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-catalogs/system-catalogs.md @@ -1,120 +1,120 @@ ---- -title: System Catalogs -summary: System Catalogs -author: zhang cuiping -date: 2023-04-07 ---- - -# System Catalogs - -- **[GS_ASP](./GS_ASP.md)** -- **[GS_AUDITING_POLICY](GS_AUDITING_POLICY.md)** -- **[GS_AUDITING_POLICY_ACCESS](GS_AUDITING_POLICY_ACCESS.md)** -- **[GS_AUDITING_POLICY_FILTERS](GS_AUDITING_POLICY_FILTERS.md)** -- **[GS_AUDITING_POLICY_PRIVILEGES](GS_AUDITING_POLICY_PRIVILEGES.md)** -- **[GS_CLIENT_GLOBAL_KEYS](GS_CLIENT_GLOBAL_KEYS.md)** -- **[GS_CLIENT_GLOBAL_KEYS_ARGS](GS_CLIENT_GLOBAL_KEYS_ARGS.md)** -- **[GS_COLUMN_KEYS](GS_COLUMN_KEYS.md)** -- **[GS_COLUMN_KEYS_ARGS](GS_COLUMN_KEYS_ARGS.md)** -- **[GS_DB_PRIVILEGE](GS_DB_PRIVILEGE.md)** -- **[GS_ENCRYPTED_COLUMNS](GS_ENCRYPTED_COLUMNS.md)** -- **[GS_ENCRYPTED_PROC](GS_ENCRYPTED_PROC.md)** -- **[GS_GLOBAL_CHAIN](GS_GLOBAL_CHAIN.md)** -- **[GS_GLOBAL_CONFIG](GS_GLOBAL_CONFIG.md)** -- **[GS_MASKING_POLICY](GS_MASKING_POLICY.md)** -- **[GS_MASKING_POLICY_ACTIONS](GS_MASKING_POLICY_ACTIONS.md)** -- **[GS_MASKING_POLICY_FILTERS](GS_MASKING_POLICY_FILTERS.md)** -- **[GS_MATVIEW](GS_MATVIEW.md)** -- **[GS_MATVIEW_DEPENDENCY](GS_MATVIEW_DEPENDENCY.md)** -- **[GS_MODEL_WAREHOUSE](GS_MODEL_WAREHOUSE.md)** -- **[GS_OPT_MODEL](GS_OPT_MODEL.md)** -- **[GS_PACKAGE](GS_PACKAGE.md)** -- **[GS_POLICY_LABEL](GS_POLICY_LABEL.md)** -- **[GS_RECYCLEBIN](GS_RECYCLEBIN.md)** -- **[GS_TXN_SNAPSHOT](GS_TXN_SNAPSHOT.md)** -- **[GS_UID](GS_UID.md)** -- **[GS_WLM_EC_OPERATOR_INFO](GS_WLM_EC_OPERATOR_INFO.md)** -- **[GS_WLM_INSTANCE_HISTORY](GS_WLM_INSTANCE_HISTORY.md)** -- **[GS_WLM_OPERATOR_INFO](GS_WLM_OPERATOR_INFO.md)** -- **[GS_WLM_PLAN_ENCODING_TABLE](GS_WLM_PLAN_ENCODING_TABLE.md)** -- **[GS_WLM_PLAN_OPERATOR_INFO](GS_WLM_PLAN_OPERATOR_INFO.md)** -- **[GS_WLM_SESSION_QUERY_INFO_ALL](GS_WLM_SESSION_QUERY_INFO_ALL.md)** -- **[GS_WLM_USER_RESOURCE_HISTORY](GS_WLM_USER_RESOURCE_HISTORY.md)** -- **[PG_AGGREGATE](PG_AGGREGATE.md)** -- **[PG_AM](PG_AM.md)** -- **[PG_AMOP](PG_AMOP.md)** -- **[PG_AMPROC](PG_AMPROC.md)** -- **[PG_APP_WORKLOADGROUP_MAPPING](PG_APP_WORKLOADGROUP_MAPPING.md)** -- **[PG_ATTRDEF](PG_ATTRDEF.md)** -- **[PG_ATTRIBUTE](PG_ATTRIBUTE.md)** -- **[PG_AUTHID](PG_AUTHID.md)** -- **[PG_AUTH_HISTORY](PG_AUTH_HISTORY.md)** -- **[PG_AUTH_MEMBERS](PG_AUTH_MEMBERS.md)** -- **[PG_CAST](PG_CAST.md)** -- **[PG_CLASS](PG_CLASS.md)** -- **[PG_COLLATION](PG_COLLATION.md)** -- **[PG_CONSTRAINT](PG_CONSTRAINT.md)** -- **[PG_CONVERSION](PG_CONVERSION.md)** -- **[PG_DATABASE](PG_DATABASE.md)** -- **[PG_DB_ROLE_SETTING](PG_DB_ROLE_SETTING.md)** -- **[PG_DEFAULT_ACL](PG_DEFAULT_ACL.md)** -- **[PG_DEPEND](PG_DEPEND.md)** -- **[PG_DESCRIPTION](PG_DESCRIPTION.md)** -- **[PG_DIRECTORY](PG_DIRECTORY.md)** -- **[PG_ENUM](PG_ENUM.md)** -- **[PG_EVENT_TRIGGER](PG_EVENT_TRIGGER.md)** -- **[PG_EXTENSION](PG_EXTENSION.md)** -- **[PG_EXTENSION_DATA_SOURCE](PG_EXTENSION_DATA_SOURCE.md)** -- **[PG_FOREIGN_DATA_WRAPPER](PG_FOREIGN_DATA_WRAPPER.md)** -- **[PG_FOREIGN_SERVER](PG_FOREIGN_SERVER.md)** -- **[PG_FOREIGN_TABLE](PG_FOREIGN_TABLE.md)** -- **[PG_HASHBUCKET](PG_HASHBUCKET.md)** -- **[PG_INDEX](PG_INDEX.md)** -- **[PG_INHERITS](PG_INHERITS.md)** -- **[PG_JOB](PG_JOB.md)** -- **[PG_JOB_PROC](PG_JOB_PROC.md)** -- **[PG_LANGUAGE](PG_LANGUAGE.md)** -- **[PG_LARGEOBJECT](PG_LARGEOBJECT.md)** -- **[PG_LARGEOBJECT_METADATA](PG_LARGEOBJECT_METADATA.md)** -- **[PG_NAMESPACE](PG_NAMESPACE.md)** -- **[PG_OBJECT](PG_OBJECT.md)** -- **[PG_OPCLASS](PG_OPCLASS.md)** -- **[PG_OPERATOR](PG_OPERATOR.md)** -- **[PG_OPFAMILY](PG_OPFAMILY.md)** -- **[PG_PARTITION](PG_PARTITION.md)** -- **[PG_PLTEMPLATE](PG_PLTEMPLATE.md)** -- **[PG_PROC](PG_PROC.md)** -- **[PG_PUBLICATION](PG_PUBLICATION.md)** -- **[PG_PUBLICATION_REL](PG_PUBLICATION_REL.md)** -- **[PG_RANGE](PG_RANGE.md)** -- **[PG_REPLICATION_ORIGIN](PG_REPLICATION_ORIGIN.md)** -- **[PG_RESOURCE_POOL](PG_RESOURCE_POOL.md)** -- **[PG_REWRITE](PG_REWRITE.md)** -- **[PG_RLSPOLICY](PG_RLSPOLICY.md)** -- **[PG_SECLABEL](PG_SECLABEL.md)** -- **[PG_SET](PG_SET.md)** -- **[PG_SHDEPEND](PG_SHDEPEND.md)** -- **[PG_SHDESCRIPTION](PG_SHDESCRIPTION.md)** -- **[PG_SHSECLABEL](PG_SHSECLABEL.md)** -- **[PG_STATISTIC](PG_STATISTIC.md)** -- **[PG_STATISTIC_EXT](PG_STATISTIC_EXT.md)** -- **[PG_SUBSCRIPTION](PG_SUBSCRIPTION.md)** -- **[PG_SUBSCRIPTION_REL](PG_SUBSCRIPTION_REL.md)** -- **[PG_SYNONYM](PG_SYNONYM.md)** -- **[PG_TABLESPACE](PG_TABLESPACE.md)** -- **[PG_TRIGGER](PG_TRIGGER.md)** -- **[PG_TS_CONFIG](PG_TS_CONFIG.md)** -- **[PG_TS_CONFIG_MAP](PG_TS_CONFIG_MAP.md)** -- **[PG_TS_DICT](PG_TS_DICT.md)** -- **[PG_TS_PARSER](PG_TS_PARSER.md)** -- **[PG_TS_TEMPLATE](PG_TS_TEMPLATE.md)** -- **[PG_TYPE](PG_TYPE.md)** -- **[PG_USER_MAPPING](PG_USER_MAPPING.md)** -- **[PG_USER_STATUS](PG_USER_STATUS.md)** -- **[PG_WORKLOAD_GROUP](PG_WORKLOAD_GROUP.md)** -- **[PGXC_CLASS](PGXC_CLASS.md)** -- **[PGXC_GROUP](PGXC_GROUP.md)** -- **[PGXC_NODE](PGXC_NODE.md)** -- **[PGXC_SLICE](PGXC_SLICE.md)** -- **[PLAN_TABLE_DATA](PLAN_TABLE_DATA.md)** +--- +title: System Catalogs +summary: System Catalogs +author: zhang cuiping +date: 2023-04-07 +--- + +# System Catalogs + +- **[GS_ASP](./GS_ASP.md)** +- **[GS_AUDITING_POLICY](GS_AUDITING_POLICY.md)** +- **[GS_AUDITING_POLICY_ACCESS](GS_AUDITING_POLICY_ACCESS.md)** +- **[GS_AUDITING_POLICY_FILTERS](GS_AUDITING_POLICY_FILTERS.md)** +- **[GS_AUDITING_POLICY_PRIVILEGES](GS_AUDITING_POLICY_PRIVILEGES.md)** +- **[GS_CLIENT_GLOBAL_KEYS](GS_CLIENT_GLOBAL_KEYS.md)** +- **[GS_CLIENT_GLOBAL_KEYS_ARGS](GS_CLIENT_GLOBAL_KEYS_ARGS.md)** +- **[GS_COLUMN_KEYS](GS_COLUMN_KEYS.md)** +- **[GS_COLUMN_KEYS_ARGS](GS_COLUMN_KEYS_ARGS.md)** +- **[GS_DB_PRIVILEGE](GS_DB_PRIVILEGE.md)** +- **[GS_ENCRYPTED_COLUMNS](GS_ENCRYPTED_COLUMNS.md)** +- **[GS_ENCRYPTED_PROC](GS_ENCRYPTED_PROC.md)** +- **[GS_GLOBAL_CHAIN](GS_GLOBAL_CHAIN.md)** +- **[GS_GLOBAL_CONFIG](GS_GLOBAL_CONFIG.md)** +- **[GS_MASKING_POLICY](GS_MASKING_POLICY.md)** +- **[GS_MASKING_POLICY_ACTIONS](GS_MASKING_POLICY_ACTIONS.md)** +- **[GS_MASKING_POLICY_FILTERS](GS_MASKING_POLICY_FILTERS.md)** +- **[GS_MATVIEW](GS_MATVIEW.md)** +- **[GS_MATVIEW_DEPENDENCY](GS_MATVIEW_DEPENDENCY.md)** +- **[GS_MODEL_WAREHOUSE](GS_MODEL_WAREHOUSE.md)** +- **[GS_OPT_MODEL](GS_OPT_MODEL.md)** +- **[GS_PACKAGE](GS_PACKAGE.md)** +- **[GS_POLICY_LABEL](GS_POLICY_LABEL.md)** +- **[GS_RECYCLEBIN](GS_RECYCLEBIN.md)** +- **[GS_TXN_SNAPSHOT](GS_TXN_SNAPSHOT.md)** +- **[GS_UID](GS_UID.md)** +- **[GS_WLM_EC_OPERATOR_INFO](GS_WLM_EC_OPERATOR_INFO.md)** +- **[GS_WLM_INSTANCE_HISTORY](GS_WLM_INSTANCE_HISTORY.md)** +- **[GS_WLM_OPERATOR_INFO](GS_WLM_OPERATOR_INFO.md)** +- **[GS_WLM_PLAN_ENCODING_TABLE](GS_WLM_PLAN_ENCODING_TABLE.md)** +- **[GS_WLM_PLAN_OPERATOR_INFO](GS_WLM_PLAN_OPERATOR_INFO.md)** +- **[GS_WLM_SESSION_QUERY_INFO_ALL](GS_WLM_SESSION_QUERY_INFO_ALL.md)** +- **[GS_WLM_USER_RESOURCE_HISTORY](GS_WLM_USER_RESOURCE_HISTORY.md)** +- **[PG_AGGREGATE](PG_AGGREGATE.md)** +- **[PG_AM](PG_AM.md)** +- **[PG_AMOP](PG_AMOP.md)** +- **[PG_AMPROC](PG_AMPROC.md)** +- **[PG_APP_WORKLOADGROUP_MAPPING](PG_APP_WORKLOADGROUP_MAPPING.md)** +- **[PG_ATTRDEF](PG_ATTRDEF.md)** +- **[PG_ATTRIBUTE](PG_ATTRIBUTE.md)** +- **[PG_AUTHID](PG_AUTHID.md)** +- **[PG_AUTH_HISTORY](PG_AUTH_HISTORY.md)** +- **[PG_AUTH_MEMBERS](PG_AUTH_MEMBERS.md)** +- **[PG_CAST](PG_CAST.md)** +- **[PG_CLASS](PG_CLASS.md)** +- **[PG_COLLATION](PG_COLLATION.md)** +- **[PG_CONSTRAINT](PG_CONSTRAINT.md)** +- **[PG_CONVERSION](PG_CONVERSION.md)** +- **[PG_DATABASE](PG_DATABASE.md)** +- **[PG_DB_ROLE_SETTING](PG_DB_ROLE_SETTING.md)** +- **[PG_DEFAULT_ACL](PG_DEFAULT_ACL.md)** +- **[PG_DEPEND](PG_DEPEND.md)** +- **[PG_DESCRIPTION](PG_DESCRIPTION.md)** +- **[PG_DIRECTORY](PG_DIRECTORY.md)** +- **[PG_ENUM](PG_ENUM.md)** +- **[PG_EVENT_TRIGGER](PG_EVENT_TRIGGER.md)** +- **[PG_EXTENSION](PG_EXTENSION.md)** +- **[PG_EXTENSION_DATA_SOURCE](PG_EXTENSION_DATA_SOURCE.md)** +- **[PG_FOREIGN_DATA_WRAPPER](PG_FOREIGN_DATA_WRAPPER.md)** +- **[PG_FOREIGN_SERVER](PG_FOREIGN_SERVER.md)** +- **[PG_FOREIGN_TABLE](PG_FOREIGN_TABLE.md)** +- **[PG_HASHBUCKET](PG_HASHBUCKET.md)** +- **[PG_INDEX](PG_INDEX.md)** +- **[PG_INHERITS](PG_INHERITS.md)** +- **[PG_JOB](PG_JOB.md)** +- **[PG_JOB_PROC](PG_JOB_PROC.md)** +- **[PG_LANGUAGE](PG_LANGUAGE.md)** +- **[PG_LARGEOBJECT](PG_LARGEOBJECT.md)** +- **[PG_LARGEOBJECT_METADATA](PG_LARGEOBJECT_METADATA.md)** +- **[PG_NAMESPACE](PG_NAMESPACE.md)** +- **[PG_OBJECT](PG_OBJECT.md)** +- **[PG_OPCLASS](PG_OPCLASS.md)** +- **[PG_OPERATOR](PG_OPERATOR.md)** +- **[PG_OPFAMILY](PG_OPFAMILY.md)** +- **[PG_PARTITION](PG_PARTITION.md)** +- **[PG_PLTEMPLATE](PG_PLTEMPLATE.md)** +- **[PG_PROC](PG_PROC.md)** +- **[PG_PUBLICATION](PG_PUBLICATION.md)** +- **[PG_PUBLICATION_REL](PG_PUBLICATION_REL.md)** +- **[PG_RANGE](PG_RANGE.md)** +- **[PG_REPLICATION_ORIGIN](PG_REPLICATION_ORIGIN.md)** +- **[PG_RESOURCE_POOL](PG_RESOURCE_POOL.md)** +- **[PG_REWRITE](PG_REWRITE.md)** +- **[PG_RLSPOLICY](PG_RLSPOLICY.md)** +- **[PG_SECLABEL](PG_SECLABEL.md)** +- **[PG_SET](PG_SET.md)** +- **[PG_SHDEPEND](PG_SHDEPEND.md)** +- **[PG_SHDESCRIPTION](PG_SHDESCRIPTION.md)** +- **[PG_SHSECLABEL](PG_SHSECLABEL.md)** +- **[PG_STATISTIC](PG_STATISTIC.md)** +- **[PG_STATISTIC_EXT](PG_STATISTIC_EXT.md)** +- **[PG_SUBSCRIPTION](PG_SUBSCRIPTION.md)** +- **[PG_SUBSCRIPTION_REL](PG_SUBSCRIPTION_REL.md)** +- **[PG_SYNONYM](PG_SYNONYM.md)** +- **[PG_TABLESPACE](PG_TABLESPACE.md)** +- **[PG_TRIGGER](PG_TRIGGER.md)** +- **[PG_TS_CONFIG](PG_TS_CONFIG.md)** +- **[PG_TS_CONFIG_MAP](PG_TS_CONFIG_MAP.md)** +- **[PG_TS_DICT](PG_TS_DICT.md)** +- **[PG_TS_PARSER](PG_TS_PARSER.md)** +- **[PG_TS_TEMPLATE](PG_TS_TEMPLATE.md)** +- **[PG_TYPE](PG_TYPE.md)** +- **[PG_USER_MAPPING](PG_USER_MAPPING.md)** +- **[PG_USER_STATUS](PG_USER_STATUS.md)** +- **[PG_WORKLOAD_GROUP](PG_WORKLOAD_GROUP.md)** +- **[PGXC_CLASS](PGXC_CLASS.md)** +- **[PGXC_GROUP](PGXC_GROUP.md)** +- **[PGXC_NODE](PGXC_NODE.md)** +- **[PGXC_SLICE](PGXC_SLICE.md)** +- **[PLAN_TABLE_DATA](PLAN_TABLE_DATA.md)** - **[STATEMENT_HISTORY](STATEMENT_HISTORY.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/GS_SHARED_MEMORY_DETAIL.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/GS_SHARED_MEMORY_DETAIL.md new file mode 100644 index 00000000..3831e750 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/GS_SHARED_MEMORY_DETAIL.md @@ -0,0 +1,21 @@ +--- +title: GS_SHARED_MEMORY_DETAIL +summary: GS_SHARED_MEMORY_DETAIL +author: Guo Huan +date: 2021-04-19 +--- + +# GS_SHARED_MEMORY_DETAIL + +Records usage information for all generated shared memory contexts for the current node. + +**Table 1** GS_SHARED_MEMORY_DETAIL fields + +| Name | Type | Description | +| :---------- | :------- | :------------------------------------------ | +| contextname | text | The name of the memory context. | +| level | smallint | The level of the memory context. | +| parent | text | Parent memory context. | +| totalsize | bigint | Total shared memory size (in bytes). | +| freesize | bigint | Remaining size of shared memory (in bytes). | +| usedsize | bigint | Shared memory used size (in bytes). | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/IOS_STATUS.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/IOS_STATUS.md new file mode 100644 index 00000000..e1e56a18 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/IOS_STATUS.md @@ -0,0 +1,27 @@ +--- +title: IOS_STATUS +summary: IOS_STATUS +author: Guo Huan +date: 2023-12-20 +--- + +# IOS_STATUS + +The IOS_STATUS view is used to display the performance status of the I/O thread pool responsible for read-ahead for a recent period of time, including IOSCtl dispatch requests, I/O latency/bandwidth, queue backlog, and other metrics. When the main query thread I/O latency is very high or the cache hit rate is low, users or developers can visualize the performance of the read-ahead thread pool to help locate the problem. + +**Table 1** IOS_STATUS fields + +| Name | Type | Description | +| :----------------------------- | :--- | :----------------------------------------------------------- | +| ios_worker_num | Int4 | Current number of I/O thread pool ios_workers | +| io_requests | Int8 | Total number of pre-read requests received by the thread pool | +| io_dispatched | Int8 | Total number of pre-read requests dispatched by the thread pool to ios_worker | +| avg_io_size_blks | Int4 | Average number of 8K pages included in each pre-read request | +| avg_io_request_latency_history | Int4 | Average total latency of all historical pre-read requests (total time from start of entry into the IOSCtl queue to completion of ios_worker processing), in microseconds | +| avg_io_latency_history | Int4 | Historical average I/O latency (time for I/O to read the disk) for all pre-read requests, in microseconds | +| avg_io_request_latency_ps | Int4 | Average total latency of all pre-read requests in the last 1 second, in microseconds | +| avg_io_latency_ps | Int4 | Average I/O latency of all pre-read requests in the last 1 second, in microseconds | +| blks_read_ps | Int4 | Total number of 8K pages read in the last 1 second | +| io_read_ps | Int4 | Total number of disks read in the last 1 second | +| io_queue_depth | Int4 | The current queue depth of IOSCtl | +| ring_buff_mem | Int4 | Current size of the memory that manages the pre-read ring buff data structure, in bytes | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/PATCH_INFORMATION_TABLE.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/PATCH_INFORMATION_TABLE.md new file mode 100644 index 00000000..cd7b5fc0 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/PATCH_INFORMATION_TABLE.md @@ -0,0 +1,20 @@ +--- +title: PATCH_INFORMATION_TABLE +summary: PATCH_INFORMATION_TABLE +author: Guo Huan +date: 2024-03-28 +--- + +# PATCH_INFORMATION_TABLE + +PATCH_INFORMATION_TABLE records information about the patches installed on the instance and the corresponding bugs fixed. + +**Table 1** PATCH_INFORMATION_TABLE fields + +| Name | Type | Description | +| :---------------- | :----------------------- | :----------------------------------------------------------- | +| patch_version | text | Version number, note that the version number is increasing by one, not jumping. | +| bugid | text | Bugid, limited to 20 digits in length. It can be composed of English letters, numbers and English symbols. | +| patch_build_time | timestamp with time zone | Patch build time | +| patch_commit_time | timestamp with time zone | Patch commit time | +| bugdesc | text | Bug description information, currently does not support Chinese and Chinese symbols. Only English letters, numbers and symbols are supported | diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/system-views.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/system-views.md index 8c734877..8a8b8fa6 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/system-views.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/system-views/system-views.md @@ -1,127 +1,130 @@ ---- -title: System Views -summary: System Views -author: zhang cuiping -date: 2023-04-07 ---- - -# System Views - -- **[GET_GLOBAL_PREPARED_XACTS (Discarded)](GET_GLOBAL_PREPARED_XACTS.md)** -- **[GS_ASYNC_SUBMIT_SESSIONS_STATUS](GS_ASYNC_SUBMIT_SESSIONS_STATUS.md)** -- **[GS_AUDITING](GS_AUDITING.md)** -- **[GS_AUDITING_ACCESS](GS_AUDITING_ACCESS.md)** -- **[GS_AUDITING_PRIVILEGE](GS_AUDITING_PRIVILEGE.md)** -- **[GS_CLUSTER_RESOURCE_INFO](GS_CLUSTER_RESOURCE_INFO.md)** -- **[GS_COMPRESSION](GS_COMPRESSION.md)** -- **[GS_DB_PRIVILEGES](GS_DB_PRIVILEGES.md)** -- **[GS_FILE_STAT](GS_FILE_STAT.md)** -- **[GS_GSC_MEMORY_DETAIL](GS_GSC_MEMORY_DETAIL.md)** -- **[GS_INSTANCE_TIME](GS_INSTANCE_TIME.md)** -- **[GS_LABELS](GS_LABELS.md)** -- **[GS_LSC_MEMORY_DETAIL](GS_LSC_MEMORY_DETAIL.md)** -- **[GS_MASKING](GS_MASKING.md)** -- **[GS_MATVIEWS](GS_MATVIEWS.md)** -- **[GS_OS_RUN_INFO](GS_OS_RUN_INFO.md)** -- **[GS_REDO_STAT](GS_REDO_STAT.md)** -- **[GS_SESSION_CPU_STATISTICS](GS_SESSION_CPU_STATISTICS.md)** -- **[GS_SESSION_MEMORY](GS_SESSION_MEMORY.md)** -- **[GS_SESSION_MEMORY_CONTEXT](GS_SESSION_MEMORY_CONTEXT.md)** -- **[GS_SESSION_MEMORY_DETAIL](GS_SESSION_MEMORY_DETAIL.md)** -- **[GS_SESSION_MEMORY_STATISTICS](GS_SESSION_MEMORY_STATISTICS.md)** -- **[GS_SESSION_STAT](GS_SESSION_STAT.md)** -- **[GS_SESSION_TIME](GS_SESSION_TIME.md)** -- **[GS_SQL_COUNT](GS_SQL_COUNT.md)** -- **[GS_STAT_SESSION_CU](GS_STAT_SESSION_CU.md)** -- **[GS_THREAD_MEMORY_CONTEXT](GS_THREAD_MEMORY_CONTEXT.md)** -- **[GS_TOTAL_MEMORY_DETAIL](GS_TOTAL_MEMORY_DETAIL.md)** -- **[GS_WLM_CGROUP_INFO](GS_WLM_CGROUP_INFO.md)** -- **[GS_WLM_EC_OPERATOR_STATISTICS](GS_WLM_EC_OPERATOR_STATISTICS.md)** -- **[GS_WLM_OPERATOR_HISTORY](GS_WLM_OPERATOR_HISTORY.md)** -- **[GS_WLM_OPERATOR_STATISTICS](GS_WLM_OPERATOR_STATISTICS.md)** -- **[GS_WLM_PLAN_OPERATOR_HISTORY](GS_WLM_PLAN_OPERATOR_HISTORY.md)** -- **[GS_WLM_REBUILD_USER_RESOURCE_POOL](GS_WLM_REBUILD_USER_RESOURCE_POOL.md)** -- **[GS_WLM_RESOURCE_POOL](GS_WLM_RESOURCE_POOL.md)** -- **[GS_WLM_SESSION_HISTORY](GS_WLM_SESSION_HISTORY.md)** -- **[GS_WLM_SESSION_INFO](GS_WLM_SESSION_INFO.md)** -- **[GS_WLM_SESSION_INFO_ALL](GS_WLM_SESSION_INFO_ALL.md)** -- **[GS_WLM_SESSION_STATISTICS](GS_WLM_SESSION_STATISTICS.md)** -- **[GS_WLM_USER_INFO](GS_WLM_USER_INFO.md)** -- **[MPP_TABLES](MPP_TABLES.md)** -- **[PG_AVAILABLE_EXTENSION_VERSIONS](PG_AVAILABLE_EXTENSION_VERSIONS.md)** -- **[PG_AVAILABLE_EXTENSION](PG_AVAILABLE_EXTENSIONS.md)** -- **[PG_CURSORS](PG_CURSORS.md)** -- **[PG_COMM_DELAY](PG_COMM_DELAY.md)** -- **[PG_COMM_RECV_STREAM](PG_COMM_RECV_STREAM.md)** -- **[PG_COMM_SEND_STREAM](PG_COMM_SEND_STREAM.md)** -- **[PG_COMM_STATUS](PG_COMM_STATUS.md)** -- **[PG_CONTROL_GROUP_CONFIG](PG_CONTROL_GROUP_CONFIG.md)** -- **[PG_EXT_STATS](PG_EXT_STATS.md)** -- **[PG_GET_INVALID_BACKENDS](PG_GET_INVALID_BACKENDS.md)** -- **[PG_GET_SENDERS_CATCHUP_TIME](PG_GET_SENDERS_CATCHUP_TIME.md)** -- **[PG_GROUP](PG_GROUP.md)** -- **[PG_GTT_RELSTATS](PG_GTT_RELSTATS.md)** -- **[PG_GTT_STATS](PG_GTT_STATS.md)** -- **[PG_GTT_ATTACHED_PIDS](PG_GTT_ATTACHED_PIDS.md)** -- **[PG_INDEXES](PG_INDEXES.md)** -- **[PG_LOCKS](PG_LOCKS.md)** -- **[PG_NODE_ENV](PG_NODE_ENV.md)** -- **[PG_OS_THREADS](PG_OS_THREADS.md)** -- **[PG_PREPARED_STATEMENTS](PG_PREPARED_STATEMENTS.md)** -- **[PG_PREPARED_XACTS](PG_PREPARED_XACTS.md)** -- **[PG_PUBLICATION_TABLES](PG_PUBLICATION_TABLES.md)** -- **[PG_REPLICATION_ORIGIN_STATUS](PG_REPLICATION_ORIGIN_STATUS.md)** -- **[PG_REPLICATION_SLOTS](PG_REPLICATION_SLOTS.md)** -- **[PG_RLSPOLICIES](PG_RLSPOLICIES.md)** -- **[PG_ROLES](PG_ROLES.md)** -- **[PG_RULES](PG_RULES.md)** -- **[PG_RUNNING_XACTS](PG_RUNNING_XACTS.md)** -- **[PG_SECLABELS](PG_SECLABELS.md)** -- **[PG_SESSION_IOSTAT](PG_SESSION_IOSTAT.md)** -- **[PG_SESSION_WLMSTAT](PG_SESSION_WLMSTAT.md)** -- **[PG_SETTINGS](PG_SETTINGS.md)** -- **[PG_SHADOW](PG_SHADOW.md)** -- **[PG_STATS](PG_STATS.md)** -- **[PG_STAT_ACTIVITY](PG_STAT_ACTIVITY.md)** -- **[PG_STAT_ACTIVITY_NG](PG_STAT_ACTIVITY_NG.md)** -- **[PG_STAT_ALL_INDEXES](PG_STAT_ALL_INDEXES.md)** -- **[PG_STAT_ALL_TABLES](PG_STAT_ALL_TABLES.md)** -- **[PG_STAT_BAD_BLOCK](PG_STAT_BAD_BLOCK.md)** -- **[PG_STAT_BGWRITER](PG_STAT_BGWRITER.md)** -- **[PG_STAT_DATABASE](PG_STAT_DATABASE.md)** -- **[PG_STAT_DATABASE_CONFLICTS](PG_STAT_DATABASE_CONFLICTS.md)** -- **[PG_STAT_USER_FUNCTIONS](PG_STAT_USER_FUNCTIONS.md)** -- **[PG_STAT_USER_INDEXES](PG_STAT_USER_INDEXES.md)** -- **[PG_STAT_USER_TABLES](PG_STAT_USER_TABLES.md)** -- **[PG_STAT_REPLICATION](PG_STAT_REPLICATION.md)** -- **[PG_STAT_SUBSCRIPTION](PG_STAT_SUBSCRIPTION.md)** -- **[PG_STAT_SYS_INDEXES](PG_STAT_SYS_INDEXES.md)** -- **[PG_STAT_SYS_TABLES](PG_STAT_SYS_TABLES.md)** -- **[PG_STAT_XACT_ALL_TABLES](PG_STAT_XACT_ALL_TABLES.md)** -- **[PG_STAT_XACT_SYS_TABLES](PG_STAT_XACT_SYS_TABLES.md)** -- **[PG_STAT_XACT_USER_FUNCTIONS](PG_STAT_XACT_USER_FUNCTIONS.md)** -- **[PG_STAT_XACT_USER_TABLES](PG_STAT_XACT_USER_TABLES.md)** -- **[PG_STATIO_ALL_INDEXES](PG_STATIO_ALL_INDEXES.md)** -- **[PG_STATIO_ALL_SEQUENCES](PG_STATIO_ALL_SEQUENCES.md)** -- **[PG_STATIO_ALL_TABLES](PG_STATIO_ALL_TABLES.md)** -- **[PG_STATIO_SYS_INDEXES](PG_STATIO_SYS_INDEXES.md)** -- **[PG_STATIO_SYS_SEQUENCES](PG_STATIO_SYS_SEQUENCES.md)** -- **[PG_STATIO_SYS_TABLES](PG_STATIO_SYS_TABLES.md)** -- **[PG_STATIO_USER_INDEXES](PG_STATIO_USER_INDEXES.md)** -- **[PG_STATIO_USER_SEQUENCES](PG_STATIO_USER_SEQUENCES.md)** -- **[PG_STATIO_USER_TABLES](PG_STATIO_USER_TABLES.md)** -- **[PG_TABLES](PG_TABLES.md)** -- **[PG_TDE_INFO](PG_TDE_INFO.md)** -- **[PG_THREAD_WAIT_STATUS](PG_THREAD_WAIT_STATUS.md)** -- **[PG_TIMEZONE_ABBREVS](PG_TIMEZONE_ABBREVS.md)** -- **[PG_TIMEZONE_NAMES](PG_TIMEZONE_NAMES.md)** -- **[PG_TOTAL_MEMORY_DETAIL](PG_TOTAL_MEMORY_DETAIL.md)** -- **[PG_TOTAL_USER_RESOURCE_INFO](PG_TOTAL_USER_RESOURCE_INFO.md)** -- **[PG_TOTAL_USER_RESOURCE_INFO_OID](PG_TOTAL_USER_RESOURCE_INFO_OID.md)** -- **[PG_USER](PG_USER.md)** -- **[PG_USER_MAPPINGS](PG_USER_MAPPINGS.md)** -- **[PG_VIEWS](PG_VIEWS.md)** -- **[PG_VARIABLE_INFO](PG_VARIABLE_INFO.md)** -- **[PG_WLM_STATISTICS](PG_WLM_STATISTICS.md)** -- **[PGXC_PREPARED_XACTS](PGXC_PREPARED_XACTS.md)** -- **[PLAN_TABLE](PLAN_TABLE.md)** \ No newline at end of file +--- +title: System Views +summary: System Views +author: zhang cuiping +date: 2023-04-07 +--- + +# System Views + +- **[GET_GLOBAL_PREPARED_XACTS (Discarded)](GET_GLOBAL_PREPARED_XACTS.md)** +- **[GS_ASYNC_SUBMIT_SESSIONS_STATUS](GS_ASYNC_SUBMIT_SESSIONS_STATUS.md)** +- **[GS_AUDITING](GS_AUDITING.md)** +- **[GS_AUDITING_ACCESS](GS_AUDITING_ACCESS.md)** +- **[GS_AUDITING_PRIVILEGE](GS_AUDITING_PRIVILEGE.md)** +- **[GS_CLUSTER_RESOURCE_INFO](GS_CLUSTER_RESOURCE_INFO.md)** +- **[GS_COMPRESSION](GS_COMPRESSION.md)** +- **[GS_DB_PRIVILEGES](GS_DB_PRIVILEGES.md)** +- **[GS_FILE_STAT](GS_FILE_STAT.md)** +- **[GS_GSC_MEMORY_DETAIL](GS_GSC_MEMORY_DETAIL.md)** +- **[GS_INSTANCE_TIME](GS_INSTANCE_TIME.md)** +- **[GS_LABELS](GS_LABELS.md)** +- **[GS_LSC_MEMORY_DETAIL](GS_LSC_MEMORY_DETAIL.md)** +- **[GS_MASKING](GS_MASKING.md)** +- **[GS_MATVIEWS](GS_MATVIEWS.md)** +- **[GS_OS_RUN_INFO](GS_OS_RUN_INFO.md)** +- **[GS_REDO_STAT](GS_REDO_STAT.md)** +- **[GS_SESSION_CPU_STATISTICS](GS_SESSION_CPU_STATISTICS.md)** +- **[GS_SESSION_MEMORY](GS_SESSION_MEMORY.md)** +- **[GS_SESSION_MEMORY_CONTEXT](GS_SESSION_MEMORY_CONTEXT.md)** +- **[GS_SESSION_MEMORY_DETAIL](GS_SESSION_MEMORY_DETAIL.md)** +- **[GS_SESSION_MEMORY_STATISTICS](GS_SESSION_MEMORY_STATISTICS.md)** +- **[GS_SESSION_STAT](GS_SESSION_STAT.md)** +- **[GS_SESSION_TIME](GS_SESSION_TIME.md)** +- **[GS_SHARED_MEMORY_DETAIL](./GS_SHARED_MEMORY_DETAIL.md)** +- **[GS_SQL_COUNT](GS_SQL_COUNT.md)** +- **[GS_STAT_SESSION_CU](GS_STAT_SESSION_CU.md)** +- **[GS_THREAD_MEMORY_CONTEXT](GS_THREAD_MEMORY_CONTEXT.md)** +- **[GS_TOTAL_MEMORY_DETAIL](GS_TOTAL_MEMORY_DETAIL.md)** +- **[GS_WLM_CGROUP_INFO](GS_WLM_CGROUP_INFO.md)** +- **[GS_WLM_EC_OPERATOR_STATISTICS](GS_WLM_EC_OPERATOR_STATISTICS.md)** +- **[GS_WLM_OPERATOR_HISTORY](GS_WLM_OPERATOR_HISTORY.md)** +- **[GS_WLM_OPERATOR_STATISTICS](GS_WLM_OPERATOR_STATISTICS.md)** +- **[GS_WLM_PLAN_OPERATOR_HISTORY](GS_WLM_PLAN_OPERATOR_HISTORY.md)** +- **[GS_WLM_REBUILD_USER_RESOURCE_POOL](GS_WLM_REBUILD_USER_RESOURCE_POOL.md)** +- **[GS_WLM_RESOURCE_POOL](GS_WLM_RESOURCE_POOL.md)** +- **[GS_WLM_SESSION_HISTORY](GS_WLM_SESSION_HISTORY.md)** +- **[GS_WLM_SESSION_INFO](GS_WLM_SESSION_INFO.md)** +- **[GS_WLM_SESSION_INFO_ALL](GS_WLM_SESSION_INFO_ALL.md)** +- **[GS_WLM_SESSION_STATISTICS](GS_WLM_SESSION_STATISTICS.md)** +- **[GS_WLM_USER_INFO](GS_WLM_USER_INFO.md)** +- **[IOS_STATUS](IOS_STATUS.md)** +- **[MPP_TABLES](MPP_TABLES.md)** +- **[PG_AVAILABLE_EXTENSION_VERSIONS](PG_AVAILABLE_EXTENSION_VERSIONS.md)** +- **[PG_AVAILABLE_EXTENSION](PG_AVAILABLE_EXTENSIONS.md)** +- **[PG_CURSORS](PG_CURSORS.md)** +- **[PG_COMM_DELAY](PG_COMM_DELAY.md)** +- **[PG_COMM_RECV_STREAM](PG_COMM_RECV_STREAM.md)** +- **[PG_COMM_SEND_STREAM](PG_COMM_SEND_STREAM.md)** +- **[PG_COMM_STATUS](PG_COMM_STATUS.md)** +- **[PG_CONTROL_GROUP_CONFIG](PG_CONTROL_GROUP_CONFIG.md)** +- **[PG_EXT_STATS](PG_EXT_STATS.md)** +- **[PG_GET_INVALID_BACKENDS](PG_GET_INVALID_BACKENDS.md)** +- **[PG_GET_SENDERS_CATCHUP_TIME](PG_GET_SENDERS_CATCHUP_TIME.md)** +- **[PG_GROUP](PG_GROUP.md)** +- **[PG_GTT_RELSTATS](PG_GTT_RELSTATS.md)** +- **[PG_GTT_STATS](PG_GTT_STATS.md)** +- **[PG_GTT_ATTACHED_PIDS](PG_GTT_ATTACHED_PIDS.md)** +- **[PG_INDEXES](PG_INDEXES.md)** +- **[PG_LOCKS](PG_LOCKS.md)** +- **[PG_NODE_ENV](PG_NODE_ENV.md)** +- **[PG_OS_THREADS](PG_OS_THREADS.md)** +- **[PG_PREPARED_STATEMENTS](PG_PREPARED_STATEMENTS.md)** +- **[PG_PREPARED_XACTS](PG_PREPARED_XACTS.md)** +- **[PG_PUBLICATION_TABLES](PG_PUBLICATION_TABLES.md)** +- **[PG_REPLICATION_ORIGIN_STATUS](PG_REPLICATION_ORIGIN_STATUS.md)** +- **[PG_REPLICATION_SLOTS](PG_REPLICATION_SLOTS.md)** +- **[PG_RLSPOLICIES](PG_RLSPOLICIES.md)** +- **[PG_ROLES](PG_ROLES.md)** +- **[PG_RULES](PG_RULES.md)** +- **[PG_RUNNING_XACTS](PG_RUNNING_XACTS.md)** +- **[PG_SECLABELS](PG_SECLABELS.md)** +- **[PG_SESSION_IOSTAT](PG_SESSION_IOSTAT.md)** +- **[PG_SESSION_WLMSTAT](PG_SESSION_WLMSTAT.md)** +- **[PG_SETTINGS](PG_SETTINGS.md)** +- **[PG_SHADOW](PG_SHADOW.md)** +- **[PG_STATS](PG_STATS.md)** +- **[PG_STAT_ACTIVITY](PG_STAT_ACTIVITY.md)** +- **[PG_STAT_ACTIVITY_NG](PG_STAT_ACTIVITY_NG.md)** +- **[PG_STAT_ALL_INDEXES](PG_STAT_ALL_INDEXES.md)** +- **[PG_STAT_ALL_TABLES](PG_STAT_ALL_TABLES.md)** +- **[PG_STAT_BAD_BLOCK](PG_STAT_BAD_BLOCK.md)** +- **[PG_STAT_BGWRITER](PG_STAT_BGWRITER.md)** +- **[PG_STAT_DATABASE](PG_STAT_DATABASE.md)** +- **[PG_STAT_DATABASE_CONFLICTS](PG_STAT_DATABASE_CONFLICTS.md)** +- **[PG_STAT_USER_FUNCTIONS](PG_STAT_USER_FUNCTIONS.md)** +- **[PG_STAT_USER_INDEXES](PG_STAT_USER_INDEXES.md)** +- **[PG_STAT_USER_TABLES](PG_STAT_USER_TABLES.md)** +- **[PG_STAT_REPLICATION](PG_STAT_REPLICATION.md)** +- **[PG_STAT_SUBSCRIPTION](PG_STAT_SUBSCRIPTION.md)** +- **[PG_STAT_SYS_INDEXES](PG_STAT_SYS_INDEXES.md)** +- **[PG_STAT_SYS_TABLES](PG_STAT_SYS_TABLES.md)** +- **[PG_STAT_XACT_ALL_TABLES](PG_STAT_XACT_ALL_TABLES.md)** +- **[PG_STAT_XACT_SYS_TABLES](PG_STAT_XACT_SYS_TABLES.md)** +- **[PG_STAT_XACT_USER_FUNCTIONS](PG_STAT_XACT_USER_FUNCTIONS.md)** +- **[PG_STAT_XACT_USER_TABLES](PG_STAT_XACT_USER_TABLES.md)** +- **[PG_STATIO_ALL_INDEXES](PG_STATIO_ALL_INDEXES.md)** +- **[PG_STATIO_ALL_SEQUENCES](PG_STATIO_ALL_SEQUENCES.md)** +- **[PG_STATIO_ALL_TABLES](PG_STATIO_ALL_TABLES.md)** +- **[PG_STATIO_SYS_INDEXES](PG_STATIO_SYS_INDEXES.md)** +- **[PG_STATIO_SYS_SEQUENCES](PG_STATIO_SYS_SEQUENCES.md)** +- **[PG_STATIO_SYS_TABLES](PG_STATIO_SYS_TABLES.md)** +- **[PG_STATIO_USER_INDEXES](PG_STATIO_USER_INDEXES.md)** +- **[PG_STATIO_USER_SEQUENCES](PG_STATIO_USER_SEQUENCES.md)** +- **[PG_STATIO_USER_TABLES](PG_STATIO_USER_TABLES.md)** +- **[PG_TABLES](PG_TABLES.md)** +- **[PG_TDE_INFO](PG_TDE_INFO.md)** +- **[PG_THREAD_WAIT_STATUS](PG_THREAD_WAIT_STATUS.md)** +- **[PG_TIMEZONE_ABBREVS](PG_TIMEZONE_ABBREVS.md)** +- **[PG_TIMEZONE_NAMES](PG_TIMEZONE_NAMES.md)** +- **[PG_TOTAL_MEMORY_DETAIL](PG_TOTAL_MEMORY_DETAIL.md)** +- **[PG_TOTAL_USER_RESOURCE_INFO](PG_TOTAL_USER_RESOURCE_INFO.md)** +- **[PG_TOTAL_USER_RESOURCE_INFO_OID](PG_TOTAL_USER_RESOURCE_INFO_OID.md)** +- **[PG_USER](PG_USER.md)** +- **[PG_USER_MAPPINGS](PG_USER_MAPPINGS.md)** +- **[PG_VIEWS](PG_VIEWS.md)** +- **[PG_VARIABLE_INFO](PG_VARIABLE_INFO.md)** +- **[PG_WLM_STATISTICS](PG_WLM_STATISTICS.md)** +- **[PGXC_PREPARED_XACTS](PGXC_PREPARED_XACTS.md)** +- **[PLAN_TABLE](PLAN_TABLE.md)** +- **[PATCH_INFORMATION_TABLE](PATCH_INFORMATION_TABLE.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/viewing-system-catalogs.md b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/viewing-system-catalogs.md index 72ddcb22..6c830c02 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/viewing-system-catalogs.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/system-catalogs-and-system-views/viewing-system-catalogs.md @@ -1,132 +1,132 @@ ---- -title: Querying a System Catalog -summary: Querying a System Catalog -author: zhang cuiping -date: 2023-04-07 ---- - -# Querying a System Catalog - -In addition to the created tables, a database contains many system catalogs. These system catalogs contain MogDB installation information and information about various queries and processes in MogDB. You can collect information about the database by querying system catalogs. - -In [System Catalogs and System Views](./system-catalogs-and-system-views.md), the description about each table indicates whether the table is visible to all users or only the initial user. To query tables that are visible only to the initial user, log in as the user. - -MogDB provides the following types of system catalogs and views: - -- System catalogs and views inherited from PG - - These system catalogs and views have the prefix **PG**. - -- New system catalogs and views of MogDB - - These system catalogs and views have the prefix **GS**. - -## Querying Database Tables - -For example, you can run the following command to query the **PG_TABLES** system catalog for all tables in the **public** schema: - -```sql -SELECT distinct(tablename) FROM pg_tables WHERE SCHEMANAME = 'public'; -``` - -Information similar to the following is displayed: - -```markdown - tablename -------------------- - err_hr_staffs - test - err_hr_staffs_ft3 - web_returns_p1 - mig_seq_table - films4 -(6 rows) -``` - -## Viewing Database Users - -You can run the **PG_USER** command to view the list of all users in the database, and view the user ID (**USESYSID**) and permissions. - -```sql -SELECT * FROM pg_user; - usename | usesysid | usecreatedb | usesuper | usecatupd | userepl | passwd | valbegin | - valuntil | respool | parent | spacelimit | useconfig | nodegroup | tempspacelimit | -spillspacelimit ----------+----------+-------------+----------+-----------+---------+----------+----------+ -----------+--------------+--------+------------+-----------+-----------+----------------+- ----------------- - omm | 10 | t | t | t | t | ******** | | - | default_pool | 0 | | | | | - joe | 16806 | f | f | f | f | ******** | | - | default_pool | 0 | | | | | -(2 rows) -``` - -## Viewing and Stopping the Running Query Statements - -You can view the running query statements in the [PG_STAT_ACTIVITY](./system-views/PG_STAT_ACTIVITY.md) view. You can use the following methods: - -1. Set the parameter **track_activities** to **on**. - - ```vbnet - SET track_activities = on; - ``` - - The database collects the running information about active queries only if the parameter is set to **on**. - -2. View the running query statements. Run the following command to view the database names, users performing queries, query status, and the corresponding PIDs which are connected to the running query statements: - - ```sql - SELECT datname, usename, state,pid FROM pg_stat_activity; - ``` - - ```sql - datname | usename | state | pid - ----------+---------+--------+----------------- - postgres | Ruby | active | 140298793514752 - postgres | Ruby | active | 140298718004992 - postgres | Ruby | idle | 140298650908416 - postgres | Ruby | idle | 140298625742592 - postgres | omm | active | 140298575406848 - (5 rows) - ``` - - If the **state** column is **idle**, the connection is idle and requires a user to enter a command. - - To identify only active query statements, run the following command: - - ```sql - SELECT datname, usename, state FROM pg_stat_activity WHERE state != 'idle'; - ``` - -3. To cancel queries that have been running for a long time, use the **PG_TERMINATE_BACKEND** function to end sessions based on the thread ID. - - ```scss - SELECT PG_TERMINATE_BACKEND(139834759993104); - ``` - - If information similar to the following is displayed, the session is successfully terminated: - - ```markdown - PG_TERMINATE_BACKEND - ---------------------- - t - (1 row) - ``` - - If information similar to the following is displayed, a user has terminated the current session: - - ```vbnet - FATAL: terminating connection due to administrator command - FATAL: terminating connection due to administrator command - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: - > - > If the **PG_TERMINATE_BACKEND** function is used to terminate the backend threads of the current session, the **gsql** client will be reconnected automatically rather than be logged out. The message “The connection to the server was lost. Attempting reset: Succeeded.” is returned. - > - > ```vbnet - > FATAL: terminating connection due to administrator command - > FATAL: terminating connection due to administrator command - > The connection to the server was lost. Attempting reset: Succeeded. +--- +title: Querying a System Catalog +summary: Querying a System Catalog +author: zhang cuiping +date: 2023-04-07 +--- + +# Querying a System Catalog + +In addition to the created tables, a database contains many system catalogs. These system catalogs contain MogDB installation information and information about various queries and processes in MogDB. You can collect information about the database by querying system catalogs. + +In [System Catalogs and System Views](./system-catalogs-and-system-views.md), the description about each table indicates whether the table is visible to all users or only the initial user. To query tables that are visible only to the initial user, log in as the user. + +MogDB provides the following types of system catalogs and views: + +- System catalogs and views inherited from PG + + These system catalogs and views have the prefix **PG**. + +- New system catalogs and views of MogDB + + These system catalogs and views have the prefix **GS**. + +## Querying Database Tables + +For example, you can run the following command to query the **PG_TABLES** system catalog for all tables in the **public** schema: + +```sql +SELECT distinct(tablename) FROM pg_tables WHERE SCHEMANAME = 'public'; +``` + +Information similar to the following is displayed: + +```markdown + tablename +------------------- + err_hr_staffs + test + err_hr_staffs_ft3 + web_returns_p1 + mig_seq_table + films4 +(6 rows) +``` + +## Viewing Database Users + +You can run the **PG_USER** command to view the list of all users in the database, and view the user ID (**USESYSID**) and permissions. + +```sql +SELECT * FROM pg_user; + usename | usesysid | usecreatedb | usesuper | usecatupd | userepl | passwd | valbegin | + valuntil | respool | parent | spacelimit | useconfig | nodegroup | tempspacelimit | +spillspacelimit +---------+----------+-------------+----------+-----------+---------+----------+----------+ +----------+--------------+--------+------------+-----------+-----------+----------------+- +---------------- + omm | 10 | t | t | t | t | ******** | | + | default_pool | 0 | | | | | + joe | 16806 | f | f | f | f | ******** | | + | default_pool | 0 | | | | | +(2 rows) +``` + +## Viewing and Stopping the Running Query Statements + +You can view the running query statements in the [PG_STAT_ACTIVITY](./system-views/PG_STAT_ACTIVITY.md) view. You can use the following methods: + +1. Set the parameter **track_activities** to **on**. + + ```vbnet + SET track_activities = on; + ``` + + The database collects the running information about active queries only if the parameter is set to **on**. + +2. View the running query statements. Run the following command to view the database names, users performing queries, query status, and the corresponding PIDs which are connected to the running query statements: + + ```sql + SELECT datname, usename, state,pid FROM pg_stat_activity; + ``` + + ```sql + datname | usename | state | pid + ----------+---------+--------+----------------- + postgres | Ruby | active | 140298793514752 + postgres | Ruby | active | 140298718004992 + postgres | Ruby | idle | 140298650908416 + postgres | Ruby | idle | 140298625742592 + postgres | omm | active | 140298575406848 + (5 rows) + ``` + + If the **state** column is **idle**, the connection is idle and requires a user to enter a command. + + To identify only active query statements, run the following command: + + ```sql + SELECT datname, usename, state FROM pg_stat_activity WHERE state != 'idle'; + ``` + +3. To cancel queries that have been running for a long time, use the **PG_TERMINATE_BACKEND** function to end sessions based on the thread ID. + + ```scss + SELECT PG_TERMINATE_BACKEND(139834759993104); + ``` + + If information similar to the following is displayed, the session is successfully terminated: + + ```markdown + PG_TERMINATE_BACKEND + ---------------------- + t + (1 row) + ``` + + If information similar to the following is displayed, a user has terminated the current session: + + ```vbnet + FATAL: terminating connection due to administrator command + FATAL: terminating connection due to administrator command + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: + > + > If the **PG_TERMINATE_BACKEND** function is used to terminate the backend threads of the current session, the **gsql** client will be reconnected automatically rather than be logged out. The message “The connection to the server was lost. Attempting reset: Succeeded.” is returned. + > + > ```vbnet + > FATAL: terminating connection due to administrator command + > FATAL: terminating connection due to administrator command + > The connection to the server was lost. Attempting reset: Succeeded. > ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/tool-reference/client-tool/gsql/client-tool-gsql.md b/product/en/docs-mogdb/v5.0/reference-guide/tool-reference/client-tool/gsql/client-tool-gsql.md index 5d5707e1..cee06dd2 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/tool-reference/client-tool/gsql/client-tool-gsql.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/tool-reference/client-tool/gsql/client-tool-gsql.md @@ -14,4 +14,5 @@ date: 2022-06-09 - **[Obtaining Help Information](obtaining-help-information.md)** - **[Command Reference](command-reference.md)** - **[Meta-Command Reference](meta-command-reference.md)** -- **[FAQs](gsql-faq.md)** \ No newline at end of file +- **[FAQs](gsql-faq.md)** +- **[gsql Release Note](gsql-release-notes.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/tool-reference/client-tool/gsql/gsql-release-notes.md b/product/en/docs-mogdb/v5.0/reference-guide/tool-reference/client-tool/gsql/gsql-release-notes.md new file mode 100644 index 00000000..34fdce13 --- /dev/null +++ b/product/en/docs-mogdb/v5.0/reference-guide/tool-reference/client-tool/gsql/gsql-release-notes.md @@ -0,0 +1,14 @@ +--- +title: gsql release note +summary: gsql release note +author: 齐永江 郭欢 +date: 2024-04-02 +--- + +# gsql release note + +## Version 5.0.0.1 (2024-03-29) + +### Add + +- We now support the AIX-7.2.0.0_power model. diff --git a/product/en/docs-mogdb/v5.0/reference-guide/tool-reference/server-tools/gs_check.md b/product/en/docs-mogdb/v5.0/reference-guide/tool-reference/server-tools/gs_check.md index cdb1f73a..c7b77510 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/tool-reference/server-tools/gs_check.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/tool-reference/server-tools/gs_check.md @@ -348,6 +348,14 @@ date: 2021-06-07 No + + CheckDropCache + + Checks whether there is a dropcache process running on each node. If yes, + this item passes the check. Otherwise, this item fails the check. + + No + CheckFilehandle @@ -1339,7 +1347,7 @@ Check result of a scenario: ```bash [perfadm@SIA1000131072 Check]$ gs_check -e inspect Parsing the check items config file successfully -The below items require root privileges to execute:[CheckBlockdev CheckIOConfigure CheckMTU CheckRXTX CheckMultiQueue CheckFirewall CheckSshdService CheckSshdConfig CheckCrondService CheckMaxProcMemory CheckBootItems CheckFilehandle CheckNICModel] +The below items require root privileges to execute:[CheckBlockdev CheckIOConfigure CheckMTU CheckRXTX CheckMultiQueue CheckFirewall CheckSshdService CheckSshdConfig CheckCrondService CheckMaxProcMemory CheckBootItems CheckFilehandle CheckNICModel CheckDropCache] Please enter root privileges user[root]: Please enter password for user[root]: Check root password connection successfully diff --git a/product/en/docs-mogdb/v5.0/security-guide/security/2-managing-users-and-their-permissions.md b/product/en/docs-mogdb/v5.0/security-guide/security/2-managing-users-and-their-permissions.md index ec1be872..e90a771d 100644 --- a/product/en/docs-mogdb/v5.0/security-guide/security/2-managing-users-and-their-permissions.md +++ b/product/en/docs-mogdb/v5.0/security-guide/security/2-managing-users-and-their-permissions.md @@ -1,1032 +1,1032 @@ ---- -title: Managing Users and Their Permissions -summary: Managing Users and Their Permissions -author: Guo Huan -date: 2021-04-26 ---- - -# Managing Users and Their Permissions - -## Default Permission Mechanism - -A user who creates an object is the owner of this object. By default, Separation of Duties is disabled after database installation. A database system administrator has the same permissions as object owners. After an object is created, only the object owner or system administrator can query, modify, and delete the object, and grant permissions for the object to other users through GRANT by default. - -To enable another user to use the object, grant required permissions to the user or the role that contains the user. - -MogDB supports the following permissions: SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, CREATE, CONNECT, EXECUTE, USAGE, ALTER, DROP, COMMENT, INDEX, and VACUUM. Permission types are associated with object types. For permission details, see GRANT. - -To remove permissions, run **REVOKE**. Object owners have implicit permissions (such as ALTER, DROP, COMMENT, INDEX, VACUUM, GRANT, and REVOKE) on objects. That is, once becoming the owner of an object, the owner is immediately granted the implicit permissions on the object. Object owners can remove their own common permissions, for example, making tables read-only to themselves or others, except the system administrator. - -System catalogs and views are visible to either system administrators or all users. System catalogs and views that require system administrator permissions can be queried only by system administrators. For details, see System Catalogs and System Views. - -The database provides the object isolation feature. If this feature is enabled, users can view only the objects (tables, views, columns, and functions) that they have the permission to access. System administrators are not affected by this feature. For details, see ALTER DATABASE. - -## Administrators - -### Initial Users - -The account automatically generated during MogDB installation is called an initial user. An initial user is the system, monitoring, O&M, and security policy administrator who has the highest-level permissions in the system and can perform all operations. This account has the same name as the OS user used for MogDB installation. You need to manually set the password during the installation. After the first login, change the initial user's password in time. - -An initial user bypasses all permission checks. You are advised to use an initial user as a database administrator only for database management other than service running. - -### System Administrators - -A system administrator is an account with the **SYSADMIN** attribute. By default, a database system administrator has the same permissions as object owners but does not have the object permissions in **dbe_perf** mode. - -To create a system administrator, connect to the database as the initial user or a system administrator and run the **CREATE USER** or **ALTER USER** statement with **SYSADMIN** specified. - -```sql -MogDB=# CREATE USER sysadmin WITH SYSADMIN password "xxxxxxxxx"; -``` - -or - -```sql -MogDB=# ALTER USER joe SYSADMIN; -``` - -To run the **ALTER USER** statement, the user must exist. - -### Monitor Administrators - -A monitoring administrator is an account with the **MONADMIN** attribute that has the privilege to view views and functions in **dbe_perf** mode and to grant or withdraw privileges to objects in **dbe_perf** mode. - -To create a monitor administrator, connect to the database as the system administrator and run the **CREATE USER** or **ALTER USER** statement with **MONADMIN** specified. - -```sql -MogDB=# CREATE USER monadmin WITH MONADMIN password "xxxxxxxxx"; -``` - -or - -```sql -MogDB=# ALTER USER joe MONADMIN; -``` - -To run the **ALTER USER** statement, the user must exist. - -### O&M Administrators - -An O&M administrator is an account with the **OPRADMIN** attribute and has permission to perform backup restores using the Roach tool. - -To create an O&M administrator, connect to the database as the initial user and run the **CREATE USER** or **ALTER USER** statement with **OPRADMIN** specified. - -```sql -MogDB=# CREATE USER opradmin WITH OPRADMIN password "xxxxxxxxx"; -``` - -or - -```sql -MogDB=# ALTER USER joe OPRADMIN; -``` - -To run the **ALTER USER** statement, the user must exist. - -### Security Policy Administrators - -A security policy administrator is an account with **POLADMIN** attributes and has permission to create resource tags, desensitization policies, and unified audit policies. - -To create an security policy administrator, connect to the database as the system administrator and run the **CREATE USER** or **ALTER USER** statement with **POLADMIN** specified. - -```sql -MogDB=# CREATE USER poladmin WITH POLADMIN password "xxxxxxxxx"; -``` - -or - -```sql -MogDB=# ALTER USER joe POLADMIN; -``` - -To run the **ALTER USER** statement, the user must exist. - -## Separation of Duties - -Descriptions in Default Permission Mechanism and Administrators are about the initial situation after a cluster is created. By default, a system administrator with the **SYSADMIN** attribute has the highest-level permissions. - -In actual service management, you can set separation of duties to prevent system administrators from having excessive centralized permissions, which may cause high risks. Some permissions of the system administrator are transferred to the security administrator and audit administrator, implementing separation of duties among the system administrator, security administrator, and audit administrator. - -After separation of duties is enabled, a system administrator does not have the **CREATEROLE** attribute (security administrator) and **AUDITADMIN** attribute (audit administrator). That is, the system administrator does not have the permissions to create roles and users and the permissions to view and maintain database audit logs. For details about the **CREATEROLE** and **AUDITADMIN** attributes, see CREATE ROLE. - -After separation of duties is enabled, system administrators have the permissions only for the objects owned by them. - -Separation of duties does not take effect for an initial user. Therefore, you are advised to use an initial user as a database administrator only for database management other than service running. - -To enable separation of duties, set **enableSeparationOfDuty** to **on**. - -For details about permission changes before and after enabling separation of duties, see [Table 1](#Table-2.3.1) and [Table 2](#Table-2.3.2). - -**Table 1** Default user permissions - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Object NameInitial User (ID: 10)System AdministratorMonitor AdministratorO&M AdministratorSecurity Policy AdministratorSecurity AdministratorAudit AdministratorCommon User
TablespacesHas all permissions except the one to access private tables.Can create, modify, delete, access, or grant permissions for tablespaces.Cannot create, modify, delete, or grant permissions for tablespaces and can access tablespaces if the access permission is granted.
TablesHas permissions for all tables.Has all permissions for their own tables, but does not have permissions for other users' tables.
IndexesCan create indexes on all tables.Can create indexes on their own tables.
SchemasHas all permissions for all schemas except dbe_perf.Has all permissions for their own schemas and dbe_perf, but does not have permissions for other users' schemas.Has all permissions for their own schemas, but does not have permissions for other users' schemas.
FunctionsHas all permissions for all functions except those in the dbe_perf schema.Has permissions for their own functions and those in the dbe_perf schema, has the call permission for other users' functions in the public schema, but does not have permissions for other users' functions in other schemas.Has permissions for their own functions, has the call permission for other users' functions in the public schema, but does not have permissions for other users' functions in other schemas.
Customized viewsHas all permissions on all views except the dbe_perf schema view.Has permissions for their own views and the dbe_perf schema view, but does not have permissions for other users' views.Has permissions for their own views, but does not have permissions for other users' views.
System catalogs and system viewsHas permissions to query all system catalogs and views.Has permissions to query only some system catalogs and views. For details, see System Catalogs and System Views.
-**Table 2** Changes in permissions after separation of duties - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Object NameInitial User (ID: 10)System AdministratorMonitor AdministratorO&M AdministratorSecurity Policy AdministratorSecurity AdministratorAudit AdministratorCommon User
TablespacesN/A.
Has all permissions except the one to access private tables.
N/AN/A
TablesPermissions reduced
Has all permissions for their own tables and other users' tables in the public schema, but does not have permissions for other users' tables in other schemas.
N/A
IndexesPermissions reduced
Can create indexes for their own tables and other users' tables in the public schema.
N/A
SchemasPermissions reduced
Has all permissions for their own schemas, but does not have permissions for other users' schemas.
N/AN/A
FunctionsPermissions reduced
Has all permissions for their own functions and other users' functions in the public schema, but does not have permissions for other users' functions in other schemas.
N/AN/A
Customized viewsPermissions reduced
Has all permissions for their own views and other users' views in the public schema, but does not have permissions for other users' views in other schemas.
N/AN/A
System catalogs and system viewsN/AN/A
-> **Notice:** -> -> PG_STATISTIC system table and PG_STATISTIC_EXT system table store some sensitive information of statistical objects, such as high frequency value MCV. After the separation of powers is carried out, the system administrator can still obtain the information in the statistical information by accessing the two system tables. - -## Users - -You can use **CREATE USER** and **ALTER USER** to create and manage database users, respectively. MogDB contains one or more named database users and roles that are shared across MogDB. However, these users and roles do not share data. That is, a user can connect to any database, but after the connection is successful, any user can access only the database declared in the connection request. - -In non-separation-of-duties scenarios, MogDB user accounts can be created and deleted only by a system administrator or a security administrator with the **CREATEROLE** attribute. In separation-of-duties scenarios, a user account can be created only by an initial user or a security administrator. - -When a user logs in, MogDB authenticates the user. A user can own databases and database objects (such as tables), and grant permissions of these objects to other users and roles. In addition to system administrators, users with the **CREATEDB** attribute can create databases and grant permissions on these databases. - -**Adding, Modifying, and Deleting Users** - -- To create a user, use the SQL statement **CREATE USER**. - - For example, create a user **joe** and set the **CREATEDB** attribute for the user. - - ```sql - MogDB=# CREATE USER joe WITH CREATEDB PASSWORD "XXXXXXXXX"; - CREATE ROLE - ``` - -- To create a system administrator, use the **CREATE USER** statement with the **SYSADMIN** parameter. - -- To delete an existing user, use **DROP USER**. - -- To change a user account (for example, rename the user or change the password), use **ALTER USER**. - -- To view a user list, query the **PG_USER** view. - - ```sql - MogDB=# SELECT * FROM pg_user; - ``` - -- To view user attributes, query the system catalog **PG_AUTHID**.s - - ```sql - MogDB=# SELECT * FROM pg_authid; - ``` - -**Private Users** - -If multiple service departments use different database user accounts to perform service operations and a database maintenance department at the same level uses database administrator accounts to perform maintenance operations, service departments may require that database administrators, without specific authorization, can manage (**DROP**, **ALTER**, and **TRUNCATE**) their data but cannot access (**INSERT**, **DELETE**, **UPDATE**, **SELECT**, and **COPY**) the data. That is, the management permissions of database administrators for tables need to be isolated from their access permissions to improve the data security of common users. - -In separation-of-duties mode, a database administrator does not have permissions for the tables in schemas of other users. In this case, database administrators have neither management permissions nor access permissions, which does not meet the requirements of the service departments mentioned above. Therefore, MogDB provides private users to solve the problem. That is, create private users with the **INDEPENDENT** attribute in non-separation-of-duties mode. - -```sql -MogDB=# CREATE USER user_independent WITH INDEPENDENT IDENTIFIED BY "1234@abc"; -``` - -System administrators and security administrators with the **CREATEROLE** attribute can manage (**DROP**, **ALTER**, and **TRUNCATE**) objects of private users but cannot access (**INSERT**, **DELETE**, **SELECT**, **UPDATE**, **COPY**, **GRANT**, **REVOKE**, and **ALTER OWNER**) the objects before being authorized. - -> **NOTICE:** PG_STATISTIC and PG_STATISTIC_EXT store sensitive information about statistical objects, such as high-frequency MCVs. The system administrator can still access the two system catalogs to obtain the statistics of the tables to which private users belong. - -**Permanent Users** - -MogDB provides the permanent user solution, that is, create a permanent user with the **PERSISTENCE** property. - -```sql -MogDB=# CREATE USER user_persistence WITH persistence IDENTIFIED BY "1234@abc"; -``` - -Only the initial user is allowed to create, modify, and delete permanent users with the **PERSISTENCE** attribute. - -## Roles - -A role is a set of users. After a role is granted to a user through **GRANT**, the user will have all the permissions of the role. It is recommended that roles be used to efficiently grant permissions. For example, you can create different roles of design, development, and maintenance personnel, grant the roles to users, and then grant specific data permissions required by different users. When permissions are granted or revoked at the role level, these changes take effect on all members of the role. - -MogDB provides an implicitly defined group **PUBLIC** that contains all roles. By default, all new users and roles have the permissions of **PUBLIC**. For details about the default permissions of **PUBLIC**, see GRANT. To revoke permissions of **PUBLIC** from a user or role, or re-grant these permissions to them, add the **PUBLIC** keyword in the **REVOKE** or **GRANT** statement. - -To view all roles, query the system catalog **PG_ROLES**. - -```sql -SELECT * FROM PG_ROLES; -``` - -**Adding, Modifying, and Deleting Roles** - -In non-separation-of-duties scenarios, a role can be created, modified, and deleted only by a system administrator or a user with the **CREATEROLE** attribute. In separation-of-duties scenarios, a role can be created, modified, and deleted only by an initial user or a user with the **CREATEROLE** attribute. - -- To create a role, use **CREATE ROLE**. -- To add or delete users in an existing role, use **ALTER ROLE**. -- To delete a role, use **DROP ROLE**. **DROP ROLE** deletes only a role, rather than member users in the role. - -## Built-in roles - -MogDB provides a group of default roles whose names start with **gs_role_**. These roles are provided to access to specific, typically high-privileged operations. You can grant these roles to other users or roles within the database so that they can use specific functions. These roles should be given with great care to ensure that they are used where they are needed. [Table 3](#table1) describes the permissions of built-in roles. - -**Table 3** Permission description of built-in roles - -| Role | Permission | -| :--------------------- | :----------------------------------------------------------- | -| gs_role_copy_files | Permission to run the **copy… to/from filename** command. However, the GUC parameter **enable_copy_server_files** must be set first to enable the function of copying server files. | -| gs_role_signal_backend | Permission to call the **pg_cancel_backend**, **pg_terminate_backend**, and **pg_terminate_session** functions to cancel or terminate other sessions. However, this role cannot perform operations on sessions of the initial user or **PERSISTENCE** user. | -| gs_role_tablespace | Permission to create a tablespace. | -| gs_role_replication | Permission to call logical replication functions, such as **kill_snapshot**, **pg_create_logical_replication_slot**, **pg_create_physical_replication_slot**, **pg_drop_replication_slot**, **pg_replication_slot_advance**, **pg_create_physical_replication_slot_extern**, **pg_logical_slot_get_changes**, **pg_logical_slot_peek_changes**, **pg_logical_slot_get_binary_changes**, and **pg_logical_slot_peek_binary_changes**. | -| gs_role_account_lock | Permission to lock and unlock users. However, this role cannot lock or unlock the initial user or users with the **PERSISTENCE** attribute. | -| gs_role_pldebugger | Permission to debug functions in **dbe_pldebugger**. | - -The restrictions on built-in roles are as follows: - -- The role names starting with **gs_role_** are reserved for built-in roles in the database. Do not create users or roles starting with **gs_role_** or rename existing users or roles starting with **gs_role_**. - -- Do not perform **ALTER** or **DROP** operations on built-in roles. - -- By default, built-in roles do not have the **LOGIN** permission and do not have preset passwords. - -- The **gsql** meta-commands **\\du** and **\\dg** do not display information about built-in roles. However, if *pattern* is set to a specific built-in role, the information is displayed. - -- When separation-of-duty is disabled, the initial user, users with the **SYSADMIN** permission, and users with the **ADMIN OPTION** built-in role permission have the permission to perform **GRANT** and **REVOKE** operations on built-in roles. When separation of duty is enabled, the initial user and users with the **ADMIN OPTION** built-in role permission have the permission to perform **GRANT** and **REVOKE** operations on built-in roles. Example: - - ```sql - GRANT gs_role_signal_backend TO user1; - REVOKE gs_role_signal_backend FROM user1; - ``` - -## Schemas - -Schemas allow multiple users to use the same database without interference. In this way, database objects can be organized into logical groups that are easy to manage, and third-party applications can be added to corresponding schemas without causing conflicts. - -Each database has one or more schemas. Each schema contains tables and other types of objects. When a database is initially created, it has a public schema by default, and all users have the usage permission on the schema. Only the system administrator and initial users can create common functions, aggregate functions, stored procedures, and synonym objects in the public schema. Only the initial users can create operators in the public schema. Other users cannot create the preceding five types of objects even if they are granted the create permission. You can group database objects by schema. A schema is similar to an OS directory but cannot be nested. By default, only the initial user can create objects under the pg_catalog schema. - -The same database object name can be used in different schemas of the same database without causing conflicts. For example, both **a_schema** and **b_schema** can contain a table named **mytable**. Users with required permissions can access objects across multiple schemas of the same database. - -When you run the **CREATE USER** command to create a user, the system creates a schema with the same name as the user in the database where the command is executed. - -Database objects are generally created in the first schema in a database search path. For details about the first schema and how to change the schema order, see [Search Path](#Search-Path). - -**Creating, Modifying, and Deleting Schemas** - -- To create a schema, use **CREATE SCHEMA**. By default, the initial user and system administrator can create schemas. Other users can create schemas in the database only when they have the CREATE permission on the database. For details about how to grant the permission, see the syntax in GRANT. - -- To change the name or owner of a schema, use **ALTER SCHEMA**. The schema owner can change the schema. - -- To delete a schema and its objects, use **DROP SCHEMA**. Schema owners can delete schemas. - -- To create a table in a schema, use the **schema_name.table_name** format to specify the table. If *schema_name* is not specified, the table will be created in the first schema in search path. - -- To view the owner of a schema, perform the following join query on the system catalogs **PG_NAMESPACE** and **PG_USER**. Replace **schema_name** in the statement with the name of the schema to be queried. - - ```sql - MogDB=# SELECT s.nspname,u.usename AS nspowner FROM pg_namespace s, pg_user u WHERE nspname='schema_name' AND s.nspowner = u.usesysid; - ``` - -- To view a list of all schemas, query the system catalog **PG_NAMESPACE**. - - ```sql - MogDB=# SELECT * FROM pg_namespace; - ``` - -- To view a list of tables in a schema, query the system catalog **PG_TABLES**. For example, the following query will return a table list from **PG_CATALOG** in the schema. - - ```sql - MogDB=# SELECT distinct(tablename),schemaname from pg_tables where schemaname = 'pg_catalog'; - ``` - -**Search Path ** - -A search path is defined in the **search_path** parameter. The parameter value is a list of schema names separated by commas (,). If no target schema is specified during object creation, the object will be added to the first schema listed in the search path. If there are objects with the same name across different schemas and no schema is specified for an object query, the object will be returned from the first schema containing the object in the search path. - -- To view the current search path, use **SHOW**. - - ```sql - MogDB=# SHOW SEARCH_PATH; - search_path - ---------------- - "$user",public - (1 row) - ``` - - The default value of **search_path** is `"$user",public.` **$user** indicates the name of the schema with the same name as the current session user. If the schema does not exist, **$user** will be ignored. By default, after a user connects to a database that has schemas with the same name, objects will be added to all the schemas. If there are no such schemas, objects will be added to only the **public** schema. - -- To change the default schema of the current session, run the **SET** statement. - - Run the following command to set **search_path** to **myschema** and **public** (**myschema** will be searched first): - - ```sql - MogDB=# SET SEARCH_PATH TO myschema, public; - SET - ``` - -## Setting User Permissions - -- To grant permissions for an object to a user, see GRANT. - - When permissions for a table or view in a schema are granted to a user or role, the **USAGE** permission of the schema must be granted together. Otherwise, the user or role can only see these objects but cannot access them. - - In the following example, permissions for the schema **tpcds** are first granted to user **joe**, and then the **SELECT** permission for the **tpcds.web_returns** table is also granted. - - ```sql - MogDB=# GRANT USAGE ON SCHEMA tpcds TO joe; - MogDB=# GRANT SELECT ON TABLE tpcds.web_returns to joe; - ``` - -- Grant a role to a user to allow the user to inherit the object permissions of the role. - - 1. Create a role. - - Create a role **lily** and grant the system permission **CREATEDB** to the role. - - ```sql - MogDB=# CREATE ROLE lily WITH CREATEDB PASSWORD "XXXXXXXXX"; - ``` - - 2. Grant object permissions to the role by using **GRANT**. - - For example, first grant permissions for the schema **tpcds** to the role **lily**, and then grant the **SELECT** permission of the **tpcds.web_returns** table to **lily**. - - ```sql - MogDB=# GRANT USAGE ON SCHEMA tpcds TO lily; - MogDB=# GRANT SELECT ON TABLE tpcds.web_returns to lily; - ``` - - 3. Grant the role permissions to a user. - - ```sql - MogDB=# GRANT lily to joe; - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > When the permissions of a role are granted to a user, the attributes of the role are not transferred together. - -- To revoke user permissions, use **REVOKE**. - -## Row-Level Access Control - -The row-level access control feature enables database access control to be accurate to each row of data tables. In this way, the same SQL query may return different results for different users. - -You can create a row-level access control policy for a data table. The policy defines an expression that takes effect only for specific database users and SQL operations. When a database user accesses the data table, if a SQL statement meets the specified row-level access control policies of the data table, the expressions that meet the specified condition will be combined by using **AND** or **OR** based on the attribute type (**PERMISSIVE** | **RESTRICTIVE**) and applied to the execution plan in the query optimization phase. - -Row-level access control is used to control the visibility of row-level data in tables. By predefining filters for data tables, the expressions that meet the specified condition can be applied to execution plans in the query optimization phase, which will affect the final execution result. Currently, the SQL statements that can be affected include **SELECT**, **UPDATE**, and **DELETE**. - -Scenario 1: A table summarizes the data of different users. Users can view only their own data. - -```sql ---Create users alice, bob, and peter. -MogDB=# CREATE ROLE alice PASSWORD 'Enmo@123'; -MogDB=# CREATE ROLE bob PASSWORD 'Enmo@123'; -MogDB=# CREATE ROLE peter PASSWORD 'Enmo@123'; - ---Create the all_data table that contains user information. -MogDB=# CREATE TABLE all_data(id int, role varchar(100), data varchar(100)); - ---Insert data into the data table. -MogDB=# INSERT INTO all_data VALUES(1, 'alice', 'alice data'); -MogDB=# INSERT INTO all_data VALUES(2, 'bob', 'bob data'); -MogDB=# INSERT INTO all_data VALUES(3, 'peter', 'peter data'); - ---Grant the read permission for the all_data table to users alice, bob, and peter. -MogDB=# GRANT SELECT ON all_data TO alice, bob, peter; - ---Enable row-level access control. -MogDB=# ALTER TABLE all_data ENABLE ROW LEVEL SECURITY; - ---Create a row-level access control policy to specify that the current user can view only their own data. -MogDB=# CREATE ROW LEVEL SECURITY POLICY all_data_rls ON all_data USING(role = CURRENT_USER); - ---View table details. -MogDB=# \d+ all_data - Table "public.all_data" - Column | Type | Modifiers | Storage | Stats target | Description ---------+------------------------+-----------+----------+--------------+------------- - id | integer | | plain | | - role | character varying(100) | | extended | | - data | character varying(100) | | extended | | -Row Level Security Policies: - POLICY "all_data_rls" - USING (((role)::name = "current_user"())) -Has OIDs: no -Location Nodes: ALL DATANODES -Options: orientation=row, compression=no, enable_rowsecurity=true - ---Switch to user alice and run SELECT * FROM public.all_data. -MogDB=# SELECT * FROM public.all_data; - id | role | data -----+-------+------------ - 1 | alice | alice data -(1 row) - -MogDB=# EXPLAIN(COSTS OFF) SELECT * FROM public.all_data; - QUERY PLAN ----------------------------------------------------------------- - Streaming (type: GATHER) - Node/s: All datanodes - -> Seq Scan on all_data - Filter: ((role)::name = 'alice'::name) - Notice: This query is influenced by row level security feature -(5 rows) - ---Switch to user peter and run SELECT * FROM public.all_data. -MogDB=# SELECT * FROM public.all_data; - id | role | data -----+-------+------------ - 3 | peter | peter data -(1 row) - -MogDB=# EXPLAIN(COSTS OFF) SELECT * FROM public.all_data; - QUERY PLAN ----------------------------------------------------------------- - Streaming (type: GATHER) - Node/s: All datanodes - -> Seq Scan on all_data - Filter: ((role)::name = 'peter'::name) - Notice: This query is influenced by row level security feature -(5 rows) -``` - ->**Notice:** PG_STATISTIC system table and PG_STATISTIC_EXT system table store some sensitive information of statistical objects, such as high frequency value MCV. After the separation of powers is carried out, the system administrator can still obtain the information in the statistical information by accessing the two system tables. - -## Setting Security Policies - -### Setting Account Security Policies - -**Background** - -For data security purposes, MogDB provides a series of security measures, such as automatically locking and unlocking accounts, manually locking and unlocking abnormal accounts, and deleting accounts that are no longer used. - -**Automatically Locking and Unlocking Accounts** - -- If the number of incorrect password attempts (**failed_login_attempts**) of an account reaches the upper limit (**10** by default), the system automatically locks the account. Smaller parameter values result in higher account security. However, if the value of this parameter is set too small, inconvenience may occur. - -- If the time during which a user is locked exceeds the preset value (**password_lock_time**, one day by default), the system automatically unlocks the user. Larger parameter values bring higher account security. However, if the value of this parameter is set too large, inconvenience may occur. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - The integral part of the **password_lock_time** value indicates the number of days and its decimal part can be converted into hours, minutes, and seconds. - > - If the **failed_login_attempts** parameter is set to **0**, an account is never locked due to incorrect password attempts. If the **password_lock_time** parameter is set to **0**, an account is quickly unlocked after it is locked due to incorrect password attempts. Therefore, only when both parameters are set to positive values, the following operations can be performed: password failure check, account locking, and account unlocking. - > - The default values of the two parameters meet the security requirements. You can change the parameter values as needed for higher security. You are advised to retain the default values. - -Configure the **failed_login_attempts** parameter. - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to connect to the database: - - ```bash - gsql -d mogdb -p 8000 - ``` - - **mogdb** is the name of the database to be connected, and **8000** is the port number of the database primary node. - -3. View the current value. - - ```sql - MogDB=# SHOW failed_login_attempts; - failed_login_attempts - ----------------------- - 10 - (1 row) - ``` - - If the command output is not **10**, run the **\q** command to exit the database. - -4. Run the following command to set the parameter to its default value **10**: - - ```bash - gs_guc reload -D /mogdb/data/dbnode -c "failed_login_attempts=10" - ``` - -Configure the **password_lock_time** parameter. - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to connect to the database: - - ```bash - gsql -d mogdb -p 8000 - ``` - - **mogdb** is the name of the database to be connected, and **8000** is the port number of the database primary node. - -3. View the current value. - - ```sql - MogDB=# SHOW password_lock_time; - password_lock_time - ----------------------- - 1 - (1 row) - ``` - - If the command output is not **1**, run the **\q** command to exit the database. - -4. Run the following command to set the parameter to its default value **1**: - - ```bash - gs_guc reload -N all -I all -c "password_lock_time=1" - ``` - -**Manually Locking and Unlocking Accounts** - -Once detecting that an account is stolen or the account is used to access the database without being authorized, administrators can manually lock the account. - -Administrators can manually unlock the account if the account becomes normal again. - -For details about how to create a user, see Users. To manually lock and unlock user **joe**, run commands in the following format: - -- To manually lock the account, run the following command: - - ```sql - MogDB=# ALTER USER joe ACCOUNT LOCK; - ALTER ROLE - ``` - -- To manually unlock the account, run the following command: - - ```sql - MogDB=# ALTER USER joe ACCOUNT UNLOCK; - ALTER ROLE - ``` - -**Deleting Accounts That Are No Longer Used** - -Administrators can delete an account that is no longer used. This operation cannot be rolled back. - -When an account to be deleted is in the active state, it is deleted after the session is disconnected. - -For example, if you want to delete account **joe**, run the following command: - -```sql -MogDB=# DROP USER joe CASCADE; -DROP ROLE -``` - -### Setting the Validity Period of an Account - -**Precautions** - -When creating a user, you need to specify the validity period of the user, including the start time and end time. - -To enable a user not within the validity period to use its account, set a new validity period. - -**Procedure** - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to connect to the database: - - ```bash - gsql -d mogdb -p 8000 - ``` - - **mogdb** is the name of the database to be connected, and **8000** is the port number of the database primary node. - -3. Run the following command to create a user and specify the start time and end time: - - ```sql - CREATE USER joe WITH PASSWORD 'XXXXXXXXX' VALID BEGIN '2015-10-10 08:00:00' VALID UNTIL '2016-10-10 08:00:00'; - ``` - - The user is created if the following information is displayed: - - ```sql - CREATE ROLE - ``` - -4. If the user is not within the specified validity period, run the following command to set the start time and end time of a new validity period. - - ```sql - ALTER USER joe WITH VALID BEGIN '2016-11-10 08:00:00' VALID UNTIL '2017-11-10 08:00:00'; - ``` - - The start time and end time of the new validity period is set successfully if the following information is displayed: - - ```sql - ALTER ROLE - ``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - If **VALID BEGIN** and **VALID UNTIL** are not specified in the **CREATE ROLE** syntax, the start time and end time of a user's operation are not limited. -> - If **VALID BEGIN** and **VALID UNTIL** are not specified in the **ALTER ROLE** syntax, the start time and end time of a user's operation are not modified and the original settings are used. - -### Setting Password Security Policies - -**Procedure** - -User passwords are stored in the system catalog **pg_authid**. To prevent password leakage, MogDB encrypts user passwords before storing them. The encryption algorithm is determined by the configuration parameter **password_encryption_type**. - -- If parameter **password_encryption_type** is set to **0**, passwords are encrypted using MD5. MD5 is not recommended because it is insecure. -- If parameter **password_encryption_type** is set to **1**, passwords are encrypted using SHA-256 and MD5. MD5 is not recommended because it is insecure. -- If parameter **password_encryption_type** is set to **2**, passwords are encrypted using SHA-256. This is the default configuration. -- If parameter **password_encryption_type** is set to **3**, passwords are encrypted using sm3. This is the default configuration. - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to connect to the database: - - ```bash - gsql -d postgres -p 8000 - ``` - - **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. - -3. View the configured encryption algorithm. - - ```sql - MogDB=# SHOW password_encryption_type; - password_encryption_type - -------------------------- - 2 - (1 row) - ``` - - If the command output is **0** or **1**, run the **\q** command to exit the database. - -4. Set **gs_guc reload -Z coordinator -D** using a secure encryption algorithm: - - ```bash - gs_guc reload -N all -I all -c "password_encryption_type=2" - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** When running **CREATE USER/ROLE** to create a database user, do not specify the properties of **UNENCRYPTED** to prevent password leakage. By doing so, only the password of the newly created user can be encrypted and stored. - -5. Configure password security parameters. - - - Password complexity - - You need to specify a password when initializing a database, creating a user, or modifying a user. The password must meet the complexity check rules (see password_policy). Otherwise, you are prompted to enter the password again. - - - If parameter **password_policy** is set to **1**, the default password complexity rule is used to check passwords. - - If parameter **password_policy** is set to **0**, the password complexity rule is not used. However, the password cannot be empty and must contain only valid characters, including uppercase letters (A–Z), lowercase letters (a–z), digits (0–9), and special characters. You are not advised to set this parameter to **0** because this operation poses security risks. Even if the setting is required, you must set **password_policy** to **0** on all MogDB nodes. - - Configure the **password_policy** parameter. - - a. Run the following command to connect to the database: - - ```bash - gsql -d postgres -p 8000 - ``` - - **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. - - b. View the current value. - - ```sql - MogDB=# SHOW password_policy; - password_policy - --------------------- - 1 - (1 row) - ``` - - If the command output is not **1**, run the **\q** command to exit the database. - - c. Run the following command to set the parameter to its default value **1**: - - ```bash - gs_guc reload -N all -I all -c "password_policy=1" - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note:** - > - > The password complexity requirements are as follows: - > - > - Minimum number of uppercase letters (A-Z) (**password_min_uppercase**) - > - > - Minimum number of lowercase letters (a-z) (**password_min_lowercase**) - > - > - Minimum number of digits (0-9) (**password_min_digital**) - > - > - Minimum number of special characters (**password_min_special**) ([Table 4](#Table-2.9.3.1) lists special characters.) - > - > - Minimum password length (**password_min_length**) - > - > - Maximum password length (**password_max_length**) - > - > - A password must contain at least three types of the characters (uppercase letters, lowercase letters, digits, and special characters). - > - > - A password is case insensitive and cannot be the username or the username spelled backwards. - > - > - A new password cannot be the current password and the current password spelled backwards. - > - > - A password cannot be a weak password. - > - > - Weak passwords are easy to crack. The definition of weak passwords may vary with users or user groups. Users can define their own weak passwords. - > - > - Passwords in the weak password dictionary are stored in the **gs_global_config** system catalog. When a user is created or modified, the password set by the user is compared with that stored in the weak password dictionary. If the password is matched, a message is displayed, indicating that the password is weak and password setting fails. - > - > - The weak password dictionary is empty by default. You can add or delete weak passwords using the following syntax: - > - > ```sql - > MogDB=# CREATE WEAK PASSWORD DICTIONARY WITH VALUES ('password1'), ('password2'); - > MogDB=# DROP WEAK PASSWORD DICTIONARY; - > ``` - - - Password reuse - - An old password can be reused only when it meets the requirements on reuse days (**password_reuse_time**) and reuse times (**password_reuse_max**). [Table 5](#Table-2.9.3.2) lists the parameter configurations. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The default values of the **password_reuse_time** and **password_reuse_max** parameters are **60** and **0**, respectively. Large parameter values ensure high security, but they may also cause operation inconvenience. The default values meet security standards. You can keep them or change the values as needed to improve the security level. - - Configure the **password_reuse_time** parameter. - - a. Run the following command to connect to the database: - - ```bash - gsql -d postgres -p 8000 - ``` - - **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. - - b. View the current value. - - ```sql - MogDB=# SHOW password_reuse_time; - password_reuse_time - --------------------- - 60 - (1 row) - ``` - - If the command output is not **60**, run the **\q** command to exit the database. - - c. Run the following command to set the parameter to its default value **60**: - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** You are not advised to set the parameter to **0**. This value is valid only when **password_reuse_time** for all MogDB nodes is set to **0**. - - ```bash - gs_guc reload -N all -I all -c "password_reuse_time=60" - ``` - - Configure the **password_reuse_max** parameter. - - a. Run the following command to connect to the database: - - ```bash - gsql -d postgres -p 8000 - ``` - - **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. - - b. View the current value. - - ```sql - MogDB=# SHOW password_reuse_max; - password_reuse_max - -------------------- - 0 - (1 row) - ``` - - If the command output is not **0**, run the **\q** command to exit the database. - - c. Run the following command to set the parameter to its default value **0**: - - ```bash - gs_guc reload -N all -I all -c "password_reuse_max = 0" - ``` - - - Password validity period - - A validity period (**password_effect_time**) is set for each database user password. If the password is about to expire (**password_notify_time**), the system displays a message to remind the user to change it upon login. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Considering the usage and service continuity of a database, the database still allows a user to log in after the password expires. A password change notification is displayed every time the user logs in to the database until the password is changed. - - Configure the **password_effect_time** parameter. - - a. Run the following command to connect to the database: - - ```bash - gsql -d postgres -p 8000 - ``` - - **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. - - b. View the current value. - - ```sql - MogDB=# SHOW password_effect_time; - password_effect_time - ---------------------- - 90 - (1 row) - ``` - - If the command output is not **90**, run the **\q** command to exit the database. - - c. Run the following command to set the parameter to **90** (**0** is not recommended): - - ```bash - gs_guc reload -N all -I all -c "password_effect_time = 90" - ``` - - Configure the **password_notify_time** parameter. - - a. Run the following command to connect to the database: - - ```bash - gsql -d postgres -p 8000 - ``` - - **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. - - b. View the current value. - - ```sql - MogDB=# SHOW password_notify_time; - password_notify_time - ---------------------- - 7 - (1 row) - ``` - - c. If **7** is not displayed, run the following command to set the parameter to **7** (**0** is not recommended): - - ```bash - gs_guc reload -N all -I all -c "password_notify_time = 7" - ``` - - - Password change - - - During database installation, an OS user with the same name as the initial user is created. The password of the OS user needs to be periodically changed for account security. - - To change the password of user **user1**, run the following command: - - ``` - passwd user1 - ``` - - Change the password as prompted. - - - System administrators and common users need to periodically change their passwords to prevent the accounts from being stolen. - - For example, to change the password of user **user1**, connect to the database as the system administrator and run the following commands: - - ```sql - MogDB=# ALTER USER user1 IDENTIFIED BY "1234@abc" REPLACE "5678@def"; - ALTER ROLE - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** **1234@abc** and **5678@def** represent the new password and the original password of user **user1**, respectively. If the new password does not have the required complexity, the change will not take effect. - - - Administrators can change their own and common users' passwords. If common users forget their passwords, they can ask administrators to change the passwords. - - To change the password of user **joe**, run the following command: - - ```sql - MogDB=# ALTER USER joe IDENTIFIED BY "xxxxxxxx"; - ALTER ROLE - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - System administrators are not allowed to change passwords for each other. - > - A system administrator can change the password of a common user without being required to provide the common user's old password. - > - A system administrator can change their own password but is required to provide the old password. - - - Password verification - - Password verification is required when you set the user or role in the current session. If the entered password is inconsistent with the stored password of the user, an error is reported. - - If user **joe** needs to be set, run the following command: - - ```sql - MogDB=# SET ROLE joe PASSWORD "abc@1234"; - ERROR: Invalid username/password,set role denied. - ``` - - **Table 4** Special characters - - | No. | Character | No. | Character | No. | Character | No. | Character | - | :--- | :-------- | :--- | :--------- | :--- | :-------- | :--- | :-------- | - | 1 | ~ | 9 | * | 17 | \| | 25 | < | - | 2 | ! | 10 | ( | 18 | [ | 26 | . | - | 3 | @ | 11 | ) | 19 | { | 27 | > | - | 4 | # | 12 | - | 20 | } | 28 | / | - | 5 | $ | 13 | _ | 21 | ] | 29 | ? | - | 6 | % | 14 | = | 22 | ; | - | - | - | 7 | ^ | 15 | + | 23 | : | - | - | - | 8 | & | 16 | </p> | 24 | , | - | - | - - **Table 5** Parameter description for reuse days and reuse times - - | Parameter | Value Range | Description | - | :----------------------------------------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | - | Number of days during which a password cannot be reused (**password_reuse_time**) | Positive number or **0**. The integral part of a positive number indicates the number of days and its decimal part can be converted into hours, minutes, and seconds.
The default value is **60**. The integral part of a positive number indicates the number of days and its decimal part can be converted into hours, minutes, and seconds. | - If the parameter value is changed to a smaller one, new passwords will be checked based on the new parameter value.
- If the parameter value is changed to a larger one (for example, changed from **a** to **b**), the historical passwords before **b** days probably can be reused because these historical passwords may have been deleted. New passwords will be checked based on the new parameter value.
NOTE:
The absolute time is used. Historical passwords are recorded using absolute time and unaffected by local time changes. | - | Number of consecutive times that a password cannot be reused (**password_reuse_max**) | Positive integer or 0. The integral part of a positive number indicates the number of days and its decimal part can be converted into hours, minutes, and seconds.
The value **0** indicates that the number of consecutive times that a password cannot be reused is not checked. The integral part of a positive number indicates the number of days and its decimal part can be converted into hours, minutes, and seconds. | - If the parameter value is changed to a smaller one, new passwords will be checked based on the new parameter value.
- If the parameter value is changed to a larger one (for example, changed from **a** to **b**), the historical passwords before the last **b** passwords probably can be reused because these historical passwords may have been deleted. New passwords will be checked based on the new parameter value. | - -6. Set user password expiration. - - When creating a user, a user with the **CREATEROLE** permission can force the user password to expire. After logging in to the database for the first time, a new user can perform query operations only after changing the password. The command format is as follows: - - ```sql - MogDB=# CREATE USER joe PASSWORD "abc@1234" EXPIRED; - CREATE ROLE - ``` - - A user with the **CREATEROLE** permission can force a user password to expire or force a user to change the forcibly expired password. The command format is as follows: - - ```sql - MogDB=# ALTER USER joe PASSWORD EXPIRED; - ALTER ROLE - ``` - - ```sql - MogDB=# ALTER USER joe PASSWORD "abc@2345" EXPIRED; - ALTER ROLE - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - After a user whose password is invalid logs in to the database, the system prompts the user to change the password when the user performs a simple or extended query. The user can then execute statements after changing the password. - > - Only initial users, system administrators (with the **sysadmin** permission), and users who have the permission to create users (with the **CREATEROLE** permission) can invalidate user passwords. System administrators can invalidate their own passwords or the passwords of other system administrators. The password of initial users cannot be invalidated. +--- +title: Managing Users and Their Permissions +summary: Managing Users and Their Permissions +author: Guo Huan +date: 2021-04-26 +--- + +# Managing Users and Their Permissions + +## Default Permission Mechanism + +A user who creates an object is the owner of this object. By default, Separation of Duties is disabled after database installation. A database system administrator has the same permissions as object owners. After an object is created, only the object owner or system administrator can query, modify, and delete the object, and grant permissions for the object to other users through GRANT by default. + +To enable another user to use the object, grant required permissions to the user or the role that contains the user. + +MogDB supports the following permissions: SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, CREATE, CONNECT, EXECUTE, USAGE, ALTER, DROP, COMMENT, INDEX, and VACUUM. Permission types are associated with object types. For permission details, see GRANT. + +To remove permissions, run **REVOKE**. Object owners have implicit permissions (such as ALTER, DROP, COMMENT, INDEX, VACUUM, GRANT, and REVOKE) on objects. That is, once becoming the owner of an object, the owner is immediately granted the implicit permissions on the object. Object owners can remove their own common permissions, for example, making tables read-only to themselves or others, except the system administrator. + +System catalogs and views are visible to either system administrators or all users. System catalogs and views that require system administrator permissions can be queried only by system administrators. For details, see System Catalogs and System Views. + +The database provides the object isolation feature. If this feature is enabled, users can view only the objects (tables, views, columns, and functions) that they have the permission to access. System administrators are not affected by this feature. For details, see ALTER DATABASE. + +## Administrators + +### Initial Users + +The account automatically generated during MogDB installation is called an initial user. An initial user is the system, monitoring, O&M, and security policy administrator who has the highest-level permissions in the system and can perform all operations. This account has the same name as the OS user used for MogDB installation. You need to manually set the password during the installation. After the first login, change the initial user's password in time. + +An initial user bypasses all permission checks. You are advised to use an initial user as a database administrator only for database management other than service running. + +### System Administrators + +A system administrator is an account with the **SYSADMIN** attribute. By default, a database system administrator has the same permissions as object owners but does not have the object permissions in **dbe_perf** mode. + +To create a system administrator, connect to the database as the initial user or a system administrator and run the **CREATE USER** or **ALTER USER** statement with **SYSADMIN** specified. + +```sql +MogDB=# CREATE USER sysadmin WITH SYSADMIN password "xxxxxxxxx"; +``` + +or + +```sql +MogDB=# ALTER USER joe SYSADMIN; +``` + +To run the **ALTER USER** statement, the user must exist. + +### Monitor Administrators + +A monitoring administrator is an account with the **MONADMIN** attribute that has the privilege to view views and functions in **dbe_perf** mode and to grant or withdraw privileges to objects in **dbe_perf** mode. + +To create a monitor administrator, connect to the database as the system administrator and run the **CREATE USER** or **ALTER USER** statement with **MONADMIN** specified. + +```sql +MogDB=# CREATE USER monadmin WITH MONADMIN password "xxxxxxxxx"; +``` + +or + +```sql +MogDB=# ALTER USER joe MONADMIN; +``` + +To run the **ALTER USER** statement, the user must exist. + +### O&M Administrators + +An O&M administrator is an account with the **OPRADMIN** attribute and has permission to perform backup restores using the Roach tool. + +To create an O&M administrator, connect to the database as the initial user and run the **CREATE USER** or **ALTER USER** statement with **OPRADMIN** specified. + +```sql +MogDB=# CREATE USER opradmin WITH OPRADMIN password "xxxxxxxxx"; +``` + +or + +```sql +MogDB=# ALTER USER joe OPRADMIN; +``` + +To run the **ALTER USER** statement, the user must exist. + +### Security Policy Administrators + +A security policy administrator is an account with **POLADMIN** attributes and has permission to create resource tags, desensitization policies, and unified audit policies. + +To create an security policy administrator, connect to the database as the system administrator and run the **CREATE USER** or **ALTER USER** statement with **POLADMIN** specified. + +```sql +MogDB=# CREATE USER poladmin WITH POLADMIN password "xxxxxxxxx"; +``` + +or + +```sql +MogDB=# ALTER USER joe POLADMIN; +``` + +To run the **ALTER USER** statement, the user must exist. + +## Separation of Duties + +Descriptions in Default Permission Mechanism and Administrators are about the initial situation after a cluster is created. By default, a system administrator with the **SYSADMIN** attribute has the highest-level permissions. + +In actual service management, you can set separation of duties to prevent system administrators from having excessive centralized permissions, which may cause high risks. Some permissions of the system administrator are transferred to the security administrator and audit administrator, implementing separation of duties among the system administrator, security administrator, and audit administrator. + +After separation of duties is enabled, a system administrator does not have the **CREATEROLE** attribute (security administrator) and **AUDITADMIN** attribute (audit administrator). That is, the system administrator does not have the permissions to create roles and users and the permissions to view and maintain database audit logs. For details about the **CREATEROLE** and **AUDITADMIN** attributes, see CREATE ROLE. + +After separation of duties is enabled, system administrators have the permissions only for the objects owned by them. + +Separation of duties does not take effect for an initial user. Therefore, you are advised to use an initial user as a database administrator only for database management other than service running. + +To enable separation of duties, set **enableSeparationOfDuty** to **on**. + +For details about permission changes before and after enabling separation of duties, see [Table 1](#Table-2.3.1) and [Table 2](#Table-2.3.2). + +**Table 1** Default user permissions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Object NameInitial User (ID: 10)System AdministratorMonitor AdministratorO&M AdministratorSecurity Policy AdministratorSecurity AdministratorAudit AdministratorCommon User
TablespacesHas all permissions except the one to access private tables.Can create, modify, delete, access, or grant permissions for tablespaces.Cannot create, modify, delete, or grant permissions for tablespaces and can access tablespaces if the access permission is granted.
TablesHas permissions for all tables.Has all permissions for their own tables, but does not have permissions for other users' tables.
IndexesCan create indexes on all tables.Can create indexes on their own tables.
SchemasHas all permissions for all schemas except dbe_perf.Has all permissions for their own schemas and dbe_perf, but does not have permissions for other users' schemas.Has all permissions for their own schemas, but does not have permissions for other users' schemas.
FunctionsHas all permissions for all functions except those in the dbe_perf schema.Has permissions for their own functions and those in the dbe_perf schema, has the call permission for other users' functions in the public schema, but does not have permissions for other users' functions in other schemas.Has permissions for their own functions, has the call permission for other users' functions in the public schema, but does not have permissions for other users' functions in other schemas.
Customized viewsHas all permissions on all views except the dbe_perf schema view.Has permissions for their own views and the dbe_perf schema view, but does not have permissions for other users' views.Has permissions for their own views, but does not have permissions for other users' views.
System catalogs and system viewsHas permissions to query all system catalogs and views.Has permissions to query only some system catalogs and views. For details, see System Catalogs and System Views.
+**Table 2** Changes in permissions after separation of duties + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Object NameInitial User (ID: 10)System AdministratorMonitor AdministratorO&M AdministratorSecurity Policy AdministratorSecurity AdministratorAudit AdministratorCommon User
TablespacesN/A.
Has all permissions except the one to access private tables.
N/AN/A
TablesPermissions reduced
Has all permissions for their own tables and other users' tables in the public schema, but does not have permissions for other users' tables in other schemas.
N/A
IndexesPermissions reduced
Can create indexes for their own tables and other users' tables in the public schema.
N/A
SchemasPermissions reduced
Has all permissions for their own schemas, but does not have permissions for other users' schemas.
N/AN/A
FunctionsPermissions reduced
Has all permissions for their own functions and other users' functions in the public schema, but does not have permissions for other users' functions in other schemas.
N/AN/A
Customized viewsPermissions reduced
Has all permissions for their own views and other users' views in the public schema, but does not have permissions for other users' views in other schemas.
N/AN/A
System catalogs and system viewsN/AN/A
+> **Notice:** +> +> PG_STATISTIC system table and PG_STATISTIC_EXT system table store some sensitive information of statistical objects, such as high frequency value MCV. After the separation of powers is carried out, the system administrator can still obtain the information in the statistical information by accessing the two system tables. + +## Users + +You can use **CREATE USER** and **ALTER USER** to create and manage database users, respectively. MogDB contains one or more named database users and roles that are shared across MogDB. However, these users and roles do not share data. That is, a user can connect to any database, but after the connection is successful, any user can access only the database declared in the connection request. + +In non-separation-of-duties scenarios, MogDB user accounts can be created and deleted only by a system administrator or a security administrator with the **CREATEROLE** attribute. In separation-of-duties scenarios, a user account can be created only by an initial user or a security administrator. + +When a user logs in, MogDB authenticates the user. A user can own databases and database objects (such as tables), and grant permissions of these objects to other users and roles. In addition to system administrators, users with the **CREATEDB** attribute can create databases and grant permissions on these databases. + +**Adding, Modifying, and Deleting Users** + +- To create a user, use the SQL statement **CREATE USER**. + + For example, create a user **joe** and set the **CREATEDB** attribute for the user. + + ```sql + MogDB=# CREATE USER joe WITH CREATEDB PASSWORD "XXXXXXXXX"; + CREATE ROLE + ``` + +- To create a system administrator, use the **CREATE USER** statement with the **SYSADMIN** parameter. + +- To delete an existing user, use **DROP USER**. + +- To change a user account (for example, rename the user or change the password), use **ALTER USER**. + +- To view a user list, query the **PG_USER** view. + + ```sql + MogDB=# SELECT * FROM pg_user; + ``` + +- To view user attributes, query the system catalog **PG_AUTHID**.s + + ```sql + MogDB=# SELECT * FROM pg_authid; + ``` + +**Private Users** + +If multiple service departments use different database user accounts to perform service operations and a database maintenance department at the same level uses database administrator accounts to perform maintenance operations, service departments may require that database administrators, without specific authorization, can manage (**DROP**, **ALTER**, and **TRUNCATE**) their data but cannot access (**INSERT**, **DELETE**, **UPDATE**, **SELECT**, and **COPY**) the data. That is, the management permissions of database administrators for tables need to be isolated from their access permissions to improve the data security of common users. + +In separation-of-duties mode, a database administrator does not have permissions for the tables in schemas of other users. In this case, database administrators have neither management permissions nor access permissions, which does not meet the requirements of the service departments mentioned above. Therefore, MogDB provides private users to solve the problem. That is, create private users with the **INDEPENDENT** attribute in non-separation-of-duties mode. + +```sql +MogDB=# CREATE USER user_independent WITH INDEPENDENT IDENTIFIED BY "1234@abc"; +``` + +System administrators and security administrators with the **CREATEROLE** attribute can manage (**DROP**, **ALTER**, and **TRUNCATE**) objects of private users but cannot access (**INSERT**, **DELETE**, **SELECT**, **UPDATE**, **COPY**, **GRANT**, **REVOKE**, and **ALTER OWNER**) the objects before being authorized. + +> **NOTICE:** PG_STATISTIC and PG_STATISTIC_EXT store sensitive information about statistical objects, such as high-frequency MCVs. The system administrator can still access the two system catalogs to obtain the statistics of the tables to which private users belong. + +**Permanent Users** + +MogDB provides the permanent user solution, that is, create a permanent user with the **PERSISTENCE** property. + +```sql +MogDB=# CREATE USER user_persistence WITH persistence IDENTIFIED BY "1234@abc"; +``` + +Only the initial user is allowed to create, modify, and delete permanent users with the **PERSISTENCE** attribute. + +## Roles + +A role is a set of users. After a role is granted to a user through **GRANT**, the user will have all the permissions of the role. It is recommended that roles be used to efficiently grant permissions. For example, you can create different roles of design, development, and maintenance personnel, grant the roles to users, and then grant specific data permissions required by different users. When permissions are granted or revoked at the role level, these changes take effect on all members of the role. + +MogDB provides an implicitly defined group **PUBLIC** that contains all roles. By default, all new users and roles have the permissions of **PUBLIC**. For details about the default permissions of **PUBLIC**, see GRANT. To revoke permissions of **PUBLIC** from a user or role, or re-grant these permissions to them, add the **PUBLIC** keyword in the **REVOKE** or **GRANT** statement. + +To view all roles, query the system catalog **PG_ROLES**. + +```sql +SELECT * FROM PG_ROLES; +``` + +**Adding, Modifying, and Deleting Roles** + +In non-separation-of-duties scenarios, a role can be created, modified, and deleted only by a system administrator or a user with the **CREATEROLE** attribute. In separation-of-duties scenarios, a role can be created, modified, and deleted only by an initial user or a user with the **CREATEROLE** attribute. + +- To create a role, use **CREATE ROLE**. +- To add or delete users in an existing role, use **ALTER ROLE**. +- To delete a role, use **DROP ROLE**. **DROP ROLE** deletes only a role, rather than member users in the role. + +## Built-in roles + +MogDB provides a group of default roles whose names start with **gs_role_**. These roles are provided to access to specific, typically high-privileged operations. You can grant these roles to other users or roles within the database so that they can use specific functions. These roles should be given with great care to ensure that they are used where they are needed. [Table 3](#table1) describes the permissions of built-in roles. + +**Table 3** Permission description of built-in roles + +| Role | Permission | +| :--------------------- | :----------------------------------------------------------- | +| gs_role_copy_files | Permission to run the **copy… to/from filename** command. However, the GUC parameter **enable_copy_server_files** must be set first to enable the function of copying server files. | +| gs_role_signal_backend | Permission to call the **pg_cancel_backend**, **pg_terminate_backend**, and **pg_terminate_session** functions to cancel or terminate other sessions. However, this role cannot perform operations on sessions of the initial user or **PERSISTENCE** user. | +| gs_role_tablespace | Permission to create a tablespace. | +| gs_role_replication | Permission to call logical replication functions, such as **kill_snapshot**, **pg_create_logical_replication_slot**, **pg_create_physical_replication_slot**, **pg_drop_replication_slot**, **pg_replication_slot_advance**, **pg_create_physical_replication_slot_extern**, **pg_logical_slot_get_changes**, **pg_logical_slot_peek_changes**, **pg_logical_slot_get_binary_changes**, and **pg_logical_slot_peek_binary_changes**. | +| gs_role_account_lock | Permission to lock and unlock users. However, this role cannot lock or unlock the initial user or users with the **PERSISTENCE** attribute. | +| gs_role_pldebugger | Permission to debug functions in **dbe_pldebugger**. | + +The restrictions on built-in roles are as follows: + +- The role names starting with **gs_role_** are reserved for built-in roles in the database. Do not create users or roles starting with **gs_role_** or rename existing users or roles starting with **gs_role_**. + +- Do not perform **ALTER** or **DROP** operations on built-in roles. + +- By default, built-in roles do not have the **LOGIN** permission and do not have preset passwords. + +- The **gsql** meta-commands **\\du** and **\\dg** do not display information about built-in roles. However, if *pattern* is set to a specific built-in role, the information is displayed. + +- When separation-of-duty is disabled, the initial user, users with the **SYSADMIN** permission, and users with the **ADMIN OPTION** built-in role permission have the permission to perform **GRANT** and **REVOKE** operations on built-in roles. When separation of duty is enabled, the initial user and users with the **ADMIN OPTION** built-in role permission have the permission to perform **GRANT** and **REVOKE** operations on built-in roles. Example: + + ```sql + GRANT gs_role_signal_backend TO user1; + REVOKE gs_role_signal_backend FROM user1; + ``` + +## Schemas + +Schemas allow multiple users to use the same database without interference. In this way, database objects can be organized into logical groups that are easy to manage, and third-party applications can be added to corresponding schemas without causing conflicts. + +Each database has one or more schemas. Each schema contains tables and other types of objects. When a database is initially created, it has a public schema by default, and all users have the usage permission on the schema. Only the system administrator and initial users can create common functions, aggregate functions, stored procedures, and synonym objects in the public schema. Only the initial users can create operators in the public schema. Other users cannot create the preceding five types of objects even if they are granted the create permission. You can group database objects by schema. A schema is similar to an OS directory but cannot be nested. By default, only the initial user can create objects under the pg_catalog schema. + +The same database object name can be used in different schemas of the same database without causing conflicts. For example, both **a_schema** and **b_schema** can contain a table named **mytable**. Users with required permissions can access objects across multiple schemas of the same database. + +When you run the **CREATE USER** command to create a user, the system creates a schema with the same name as the user in the database where the command is executed. + +Database objects are generally created in the first schema in a database search path. For details about the first schema and how to change the schema order, see [Search Path](#Search-Path). + +**Creating, Modifying, and Deleting Schemas** + +- To create a schema, use **CREATE SCHEMA**. By default, the initial user and system administrator can create schemas. Other users can create schemas in the database only when they have the CREATE permission on the database. For details about how to grant the permission, see the syntax in GRANT. + +- To change the name or owner of a schema, use **ALTER SCHEMA**. The schema owner can change the schema. + +- To delete a schema and its objects, use **DROP SCHEMA**. Schema owners can delete schemas. + +- To create a table in a schema, use the **schema_name.table_name** format to specify the table. If *schema_name* is not specified, the table will be created in the first schema in search path. + +- To view the owner of a schema, perform the following join query on the system catalogs **PG_NAMESPACE** and **PG_USER**. Replace **schema_name** in the statement with the name of the schema to be queried. + + ```sql + MogDB=# SELECT s.nspname,u.usename AS nspowner FROM pg_namespace s, pg_user u WHERE nspname='schema_name' AND s.nspowner = u.usesysid; + ``` + +- To view a list of all schemas, query the system catalog **PG_NAMESPACE**. + + ```sql + MogDB=# SELECT * FROM pg_namespace; + ``` + +- To view a list of tables in a schema, query the system catalog **PG_TABLES**. For example, the following query will return a table list from **PG_CATALOG** in the schema. + + ```sql + MogDB=# SELECT distinct(tablename),schemaname from pg_tables where schemaname = 'pg_catalog'; + ``` + +**Search Path ** + +A search path is defined in the **search_path** parameter. The parameter value is a list of schema names separated by commas (,). If no target schema is specified during object creation, the object will be added to the first schema listed in the search path. If there are objects with the same name across different schemas and no schema is specified for an object query, the object will be returned from the first schema containing the object in the search path. + +- To view the current search path, use **SHOW**. + + ```sql + MogDB=# SHOW SEARCH_PATH; + search_path + ---------------- + "$user",public + (1 row) + ``` + + The default value of **search_path** is `"$user",public.` **$user** indicates the name of the schema with the same name as the current session user. If the schema does not exist, **$user** will be ignored. By default, after a user connects to a database that has schemas with the same name, objects will be added to all the schemas. If there are no such schemas, objects will be added to only the **public** schema. + +- To change the default schema of the current session, run the **SET** statement. + + Run the following command to set **search_path** to **myschema** and **public** (**myschema** will be searched first): + + ```sql + MogDB=# SET SEARCH_PATH TO myschema, public; + SET + ``` + +## Setting User Permissions + +- To grant permissions for an object to a user, see GRANT. + + When permissions for a table or view in a schema are granted to a user or role, the **USAGE** permission of the schema must be granted together. Otherwise, the user or role can only see these objects but cannot access them. + + In the following example, permissions for the schema **tpcds** are first granted to user **joe**, and then the **SELECT** permission for the **tpcds.web_returns** table is also granted. + + ```sql + MogDB=# GRANT USAGE ON SCHEMA tpcds TO joe; + MogDB=# GRANT SELECT ON TABLE tpcds.web_returns to joe; + ``` + +- Grant a role to a user to allow the user to inherit the object permissions of the role. + + 1. Create a role. + + Create a role **lily** and grant the system permission **CREATEDB** to the role. + + ```sql + MogDB=# CREATE ROLE lily WITH CREATEDB PASSWORD "XXXXXXXXX"; + ``` + + 2. Grant object permissions to the role by using **GRANT**. + + For example, first grant permissions for the schema **tpcds** to the role **lily**, and then grant the **SELECT** permission of the **tpcds.web_returns** table to **lily**. + + ```sql + MogDB=# GRANT USAGE ON SCHEMA tpcds TO lily; + MogDB=# GRANT SELECT ON TABLE tpcds.web_returns to lily; + ``` + + 3. Grant the role permissions to a user. + + ```sql + MogDB=# GRANT lily to joe; + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > When the permissions of a role are granted to a user, the attributes of the role are not transferred together. + +- To revoke user permissions, use **REVOKE**. + +## Row-Level Access Control + +The row-level access control feature enables database access control to be accurate to each row of data tables. In this way, the same SQL query may return different results for different users. + +You can create a row-level access control policy for a data table. The policy defines an expression that takes effect only for specific database users and SQL operations. When a database user accesses the data table, if a SQL statement meets the specified row-level access control policies of the data table, the expressions that meet the specified condition will be combined by using **AND** or **OR** based on the attribute type (**PERMISSIVE** | **RESTRICTIVE**) and applied to the execution plan in the query optimization phase. + +Row-level access control is used to control the visibility of row-level data in tables. By predefining filters for data tables, the expressions that meet the specified condition can be applied to execution plans in the query optimization phase, which will affect the final execution result. Currently, the SQL statements that can be affected include **SELECT**, **UPDATE**, and **DELETE**. + +Scenario 1: A table summarizes the data of different users. Users can view only their own data. + +```sql +--Create users alice, bob, and peter. +MogDB=# CREATE ROLE alice PASSWORD 'Enmo@123'; +MogDB=# CREATE ROLE bob PASSWORD 'Enmo@123'; +MogDB=# CREATE ROLE peter PASSWORD 'Enmo@123'; + +--Create the all_data table that contains user information. +MogDB=# CREATE TABLE all_data(id int, role varchar(100), data varchar(100)); + +--Insert data into the data table. +MogDB=# INSERT INTO all_data VALUES(1, 'alice', 'alice data'); +MogDB=# INSERT INTO all_data VALUES(2, 'bob', 'bob data'); +MogDB=# INSERT INTO all_data VALUES(3, 'peter', 'peter data'); + +--Grant the read permission for the all_data table to users alice, bob, and peter. +MogDB=# GRANT SELECT ON all_data TO alice, bob, peter; + +--Enable row-level access control. +MogDB=# ALTER TABLE all_data ENABLE ROW LEVEL SECURITY; + +--Create a row-level access control policy to specify that the current user can view only their own data. +MogDB=# CREATE ROW LEVEL SECURITY POLICY all_data_rls ON all_data USING(role = CURRENT_USER); + +--View table details. +MogDB=# \d+ all_data + Table "public.all_data" + Column | Type | Modifiers | Storage | Stats target | Description +--------+------------------------+-----------+----------+--------------+------------- + id | integer | | plain | | + role | character varying(100) | | extended | | + data | character varying(100) | | extended | | +Row Level Security Policies: + POLICY "all_data_rls" + USING (((role)::name = "current_user"())) +Has OIDs: no +Location Nodes: ALL DATANODES +Options: orientation=row, compression=no, enable_rowsecurity=true + +--Switch to user alice and run SELECT * FROM public.all_data. +MogDB=# SELECT * FROM public.all_data; + id | role | data +----+-------+------------ + 1 | alice | alice data +(1 row) + +MogDB=# EXPLAIN(COSTS OFF) SELECT * FROM public.all_data; + QUERY PLAN +---------------------------------------------------------------- + Streaming (type: GATHER) + Node/s: All datanodes + -> Seq Scan on all_data + Filter: ((role)::name = 'alice'::name) + Notice: This query is influenced by row level security feature +(5 rows) + +--Switch to user peter and run SELECT * FROM public.all_data. +MogDB=# SELECT * FROM public.all_data; + id | role | data +----+-------+------------ + 3 | peter | peter data +(1 row) + +MogDB=# EXPLAIN(COSTS OFF) SELECT * FROM public.all_data; + QUERY PLAN +---------------------------------------------------------------- + Streaming (type: GATHER) + Node/s: All datanodes + -> Seq Scan on all_data + Filter: ((role)::name = 'peter'::name) + Notice: This query is influenced by row level security feature +(5 rows) +``` + +>**Notice:** PG_STATISTIC system table and PG_STATISTIC_EXT system table store some sensitive information of statistical objects, such as high frequency value MCV. After the separation of powers is carried out, the system administrator can still obtain the information in the statistical information by accessing the two system tables. + +## Setting Security Policies + +### Setting Account Security Policies + +**Background** + +For data security purposes, MogDB provides a series of security measures, such as automatically locking and unlocking accounts, manually locking and unlocking abnormal accounts, and deleting accounts that are no longer used. + +**Automatically Locking and Unlocking Accounts** + +- If the number of incorrect password attempts (**failed_login_attempts**) of an account reaches the upper limit (**10** by default), the system automatically locks the account. Smaller parameter values result in higher account security. However, if the value of this parameter is set too small, inconvenience may occur. + +- If the time during which a user is locked exceeds the preset value (**password_lock_time**, one day by default), the system automatically unlocks the user. Larger parameter values bring higher account security. However, if the value of this parameter is set too large, inconvenience may occur. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > + > - The integral part of the **password_lock_time** value indicates the number of days and its decimal part can be converted into hours, minutes, and seconds. + > - If the **failed_login_attempts** parameter is set to **0**, an account is never locked due to incorrect password attempts. If the **password_lock_time** parameter is set to **0**, an account is quickly unlocked after it is locked due to incorrect password attempts. Therefore, only when both parameters are set to positive values, the following operations can be performed: password failure check, account locking, and account unlocking. + > - The default values of the two parameters meet the security requirements. You can change the parameter values as needed for higher security. You are advised to retain the default values. + +Configure the **failed_login_attempts** parameter. + +1. Log in as the OS user **omm** to the primary node of the database. + +2. Run the following command to connect to the database: + + ```bash + gsql -d mogdb -p 8000 + ``` + + **mogdb** is the name of the database to be connected, and **8000** is the port number of the database primary node. + +3. View the current value. + + ```sql + MogDB=# SHOW failed_login_attempts; + failed_login_attempts + ----------------------- + 10 + (1 row) + ``` + + If the command output is not **10**, run the **\q** command to exit the database. + +4. Run the following command to set the parameter to its default value **10**: + + ```bash + gs_guc reload -D /mogdb/data/dbnode -c "failed_login_attempts=10" + ``` + +Configure the **password_lock_time** parameter. + +1. Log in as the OS user **omm** to the primary node of the database. + +2. Run the following command to connect to the database: + + ```bash + gsql -d mogdb -p 8000 + ``` + + **mogdb** is the name of the database to be connected, and **8000** is the port number of the database primary node. + +3. View the current value. + + ```sql + MogDB=# SHOW password_lock_time; + password_lock_time + ----------------------- + 1 + (1 row) + ``` + + If the command output is not **1**, run the **\q** command to exit the database. + +4. Run the following command to set the parameter to its default value **1**: + + ```bash + gs_guc reload -N all -I all -c "password_lock_time=1" + ``` + +**Manually Locking and Unlocking Accounts** + +Once detecting that an account is stolen or the account is used to access the database without being authorized, administrators can manually lock the account. + +Administrators can manually unlock the account if the account becomes normal again. + +For details about how to create a user, see Users. To manually lock and unlock user **joe**, run commands in the following format: + +- To manually lock the account, run the following command: + + ```sql + MogDB=# ALTER USER joe ACCOUNT LOCK; + ALTER ROLE + ``` + +- To manually unlock the account, run the following command: + + ```sql + MogDB=# ALTER USER joe ACCOUNT UNLOCK; + ALTER ROLE + ``` + +**Deleting Accounts That Are No Longer Used** + +Administrators can delete an account that is no longer used. This operation cannot be rolled back. + +When an account to be deleted is in the active state, it is deleted after the session is disconnected. + +For example, if you want to delete account **joe**, run the following command: + +```sql +MogDB=# DROP USER joe CASCADE; +DROP ROLE +``` + +### Setting the Validity Period of an Account + +**Precautions** + +When creating a user, you need to specify the validity period of the user, including the start time and end time. + +To enable a user not within the validity period to use its account, set a new validity period. + +**Procedure** + +1. Log in as the OS user **omm** to the primary node of the database. + +2. Run the following command to connect to the database: + + ```bash + gsql -d mogdb -p 8000 + ``` + + **mogdb** is the name of the database to be connected, and **8000** is the port number of the database primary node. + +3. Run the following command to create a user and specify the start time and end time: + + ```sql + CREATE USER joe WITH PASSWORD 'XXXXXXXXX' VALID BEGIN '2015-10-10 08:00:00' VALID UNTIL '2016-10-10 08:00:00'; + ``` + + The user is created if the following information is displayed: + + ```sql + CREATE ROLE + ``` + +4. If the user is not within the specified validity period, run the following command to set the start time and end time of a new validity period. + + ```sql + ALTER USER joe WITH VALID BEGIN '2016-11-10 08:00:00' VALID UNTIL '2017-11-10 08:00:00'; + ``` + + The start time and end time of the new validity period is set successfully if the following information is displayed: + + ```sql + ALTER ROLE + ``` + +> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** +> +> - If **VALID BEGIN** and **VALID UNTIL** are not specified in the **CREATE ROLE** syntax, the start time and end time of a user's operation are not limited. +> - If **VALID BEGIN** and **VALID UNTIL** are not specified in the **ALTER ROLE** syntax, the start time and end time of a user's operation are not modified and the original settings are used. + +### Setting Password Security Policies + +**Procedure** + +User passwords are stored in the system catalog **pg_authid**. To prevent password leakage, MogDB encrypts user passwords before storing them. The encryption algorithm is determined by the configuration parameter **password_encryption_type**. + +- If parameter **password_encryption_type** is set to **0**, passwords are encrypted using MD5. MD5 is not recommended because it is insecure. +- If parameter **password_encryption_type** is set to **1**, passwords are encrypted using SHA-256 and MD5. MD5 is not recommended because it is insecure. +- If parameter **password_encryption_type** is set to **2**, passwords are encrypted using SHA-256. This is the default configuration. +- If parameter **password_encryption_type** is set to **3**, passwords are encrypted using sm3. This is the default configuration. + +1. Log in as the OS user **omm** to the primary node of the database. + +2. Run the following command to connect to the database: + + ```bash + gsql -d postgres -p 8000 + ``` + + **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. + +3. View the configured encryption algorithm. + + ```sql + MogDB=# SHOW password_encryption_type; + password_encryption_type + -------------------------- + 2 + (1 row) + ``` + + If the command output is **0** or **1**, run the **\q** command to exit the database. + +4. Set **gs_guc reload -Z coordinator -D** using a secure encryption algorithm: + + ```bash + gs_guc reload -N all -I all -c "password_encryption_type=2" + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** When running **CREATE USER/ROLE** to create a database user, do not specify the properties of **UNENCRYPTED** to prevent password leakage. By doing so, only the password of the newly created user can be encrypted and stored. + +5. Configure password security parameters. + + - Password complexity + + You need to specify a password when initializing a database, creating a user, or modifying a user. The password must meet the complexity check rules (see password_policy). Otherwise, you are prompted to enter the password again. + + - If parameter **password_policy** is set to **1**, the default password complexity rule is used to check passwords. + - If parameter **password_policy** is set to **0**, the password complexity rule is not used. However, the password cannot be empty and must contain only valid characters, including uppercase letters (A–Z), lowercase letters (a–z), digits (0–9), and special characters. You are not advised to set this parameter to **0** because this operation poses security risks. Even if the setting is required, you must set **password_policy** to **0** on all MogDB nodes. + + Configure the **password_policy** parameter. + + a. Run the following command to connect to the database: + + ```bash + gsql -d postgres -p 8000 + ``` + + **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. + + b. View the current value. + + ```sql + MogDB=# SHOW password_policy; + password_policy + --------------------- + 1 + (1 row) + ``` + + If the command output is not **1**, run the **\q** command to exit the database. + + c. Run the following command to set the parameter to its default value **1**: + + ```bash + gs_guc reload -N all -I all -c "password_policy=1" + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note:** + > + > The password complexity requirements are as follows: + > + > - Minimum number of uppercase letters (A-Z) (**password_min_uppercase**) + > + > - Minimum number of lowercase letters (a-z) (**password_min_lowercase**) + > + > - Minimum number of digits (0-9) (**password_min_digital**) + > + > - Minimum number of special characters (**password_min_special**) ([Table 4](#Table-2.9.3.1) lists special characters.) + > + > - Minimum password length (**password_min_length**) + > + > - Maximum password length (**password_max_length**) + > + > - A password must contain at least three types of the characters (uppercase letters, lowercase letters, digits, and special characters). + > + > - A password is case insensitive and cannot be the username or the username spelled backwards. + > + > - A new password cannot be the current password and the current password spelled backwards. + > + > - A password cannot be a weak password. + > + > - Weak passwords are easy to crack. The definition of weak passwords may vary with users or user groups. Users can define their own weak passwords. + > + > - Passwords in the weak password dictionary are stored in the **gs_global_config** system catalog. When a user is created or modified, the password set by the user is compared with that stored in the weak password dictionary. If the password is matched, a message is displayed, indicating that the password is weak and password setting fails. + > + > - The weak password dictionary is empty by default. You can add or delete weak passwords using the following syntax: + > + > ```sql + > MogDB=# CREATE WEAK PASSWORD DICTIONARY WITH VALUES ('password1'), ('password2'); + > MogDB=# DROP WEAK PASSWORD DICTIONARY; + > ``` + + - Password reuse + + An old password can be reused only when it meets the requirements on reuse days (**password_reuse_time**) and reuse times (**password_reuse_max**). [Table 5](#Table-2.9.3.2) lists the parameter configurations. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The default values of the **password_reuse_time** and **password_reuse_max** parameters are **60** and **0**, respectively. Large parameter values ensure high security, but they may also cause operation inconvenience. The default values meet security standards. You can keep them or change the values as needed to improve the security level. + + Configure the **password_reuse_time** parameter. + + a. Run the following command to connect to the database: + + ```bash + gsql -d postgres -p 8000 + ``` + + **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. + + b. View the current value. + + ```sql + MogDB=# SHOW password_reuse_time; + password_reuse_time + --------------------- + 60 + (1 row) + ``` + + If the command output is not **60**, run the **\q** command to exit the database. + + c. Run the following command to set the parameter to its default value **60**: + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** You are not advised to set the parameter to **0**. This value is valid only when **password_reuse_time** for all MogDB nodes is set to **0**. + + ```bash + gs_guc reload -N all -I all -c "password_reuse_time=60" + ``` + + Configure the **password_reuse_max** parameter. + + a. Run the following command to connect to the database: + + ```bash + gsql -d postgres -p 8000 + ``` + + **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. + + b. View the current value. + + ```sql + MogDB=# SHOW password_reuse_max; + password_reuse_max + -------------------- + 0 + (1 row) + ``` + + If the command output is not **0**, run the **\q** command to exit the database. + + c. Run the following command to set the parameter to its default value **0**: + + ```bash + gs_guc reload -N all -I all -c "password_reuse_max = 0" + ``` + + - Password validity period + + A validity period (**password_effect_time**) is set for each database user password. If the password is about to expire (**password_notify_time**), the system displays a message to remind the user to change it upon login. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Considering the usage and service continuity of a database, the database still allows a user to log in after the password expires. A password change notification is displayed every time the user logs in to the database until the password is changed. + + Configure the **password_effect_time** parameter. + + a. Run the following command to connect to the database: + + ```bash + gsql -d postgres -p 8000 + ``` + + **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. + + b. View the current value. + + ```sql + MogDB=# SHOW password_effect_time; + password_effect_time + ---------------------- + 90 + (1 row) + ``` + + If the command output is not **90**, run the **\q** command to exit the database. + + c. Run the following command to set the parameter to **90** (**0** is not recommended): + + ```bash + gs_guc reload -N all -I all -c "password_effect_time = 90" + ``` + + Configure the **password_notify_time** parameter. + + a. Run the following command to connect to the database: + + ```bash + gsql -d postgres -p 8000 + ``` + + **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. + + b. View the current value. + + ```sql + MogDB=# SHOW password_notify_time; + password_notify_time + ---------------------- + 7 + (1 row) + ``` + + c. If **7** is not displayed, run the following command to set the parameter to **7** (**0** is not recommended): + + ```bash + gs_guc reload -N all -I all -c "password_notify_time = 7" + ``` + + - Password change + + - During database installation, an OS user with the same name as the initial user is created. The password of the OS user needs to be periodically changed for account security. + + To change the password of user **user1**, run the following command: + + ``` + passwd user1 + ``` + + Change the password as prompted. + + - System administrators and common users need to periodically change their passwords to prevent the accounts from being stolen. + + For example, to change the password of user **user1**, connect to the database as the system administrator and run the following commands: + + ```sql + MogDB=# ALTER USER user1 IDENTIFIED BY "1234@abc" REPLACE "5678@def"; + ALTER ROLE + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** **1234@abc** and **5678@def** represent the new password and the original password of user **user1**, respectively. If the new password does not have the required complexity, the change will not take effect. + + - Administrators can change their own and common users' passwords. If common users forget their passwords, they can ask administrators to change the passwords. + + To change the password of user **joe**, run the following command: + + ```sql + MogDB=# ALTER USER joe IDENTIFIED BY "xxxxxxxx"; + ALTER ROLE + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > + > - System administrators are not allowed to change passwords for each other. + > - A system administrator can change the password of a common user without being required to provide the common user's old password. + > - A system administrator can change their own password but is required to provide the old password. + + - Password verification + + Password verification is required when you set the user or role in the current session. If the entered password is inconsistent with the stored password of the user, an error is reported. + + If user **joe** needs to be set, run the following command: + + ```sql + MogDB=# SET ROLE joe PASSWORD "abc@1234"; + ERROR: Invalid username/password,set role denied. + ``` + + **Table 4** Special characters + + | No. | Character | No. | Character | No. | Character | No. | Character | + | :--- | :-------- | :--- | :--------- | :--- | :-------- | :--- | :-------- | + | 1 | ~ | 9 | * | 17 | \| | 25 | < | + | 2 | ! | 10 | ( | 18 | [ | 26 | . | + | 3 | @ | 11 | ) | 19 | { | 27 | > | + | 4 | # | 12 | - | 20 | } | 28 | / | + | 5 | $ | 13 | _ | 21 | ] | 29 | ? | + | 6 | % | 14 | = | 22 | ; | - | - | + | 7 | ^ | 15 | + | 23 | : | - | - | + | 8 | & | 16 | </p> | 24 | , | - | - | + + **Table 5** Parameter description for reuse days and reuse times + + | Parameter | Value Range | Description | + | :----------------------------------------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | + | Number of days during which a password cannot be reused (**password_reuse_time**) | Positive number or **0**. The integral part of a positive number indicates the number of days and its decimal part can be converted into hours, minutes, and seconds.
The default value is **60**. The integral part of a positive number indicates the number of days and its decimal part can be converted into hours, minutes, and seconds. | - If the parameter value is changed to a smaller one, new passwords will be checked based on the new parameter value.
- If the parameter value is changed to a larger one (for example, changed from **a** to **b**), the historical passwords before **b** days probably can be reused because these historical passwords may have been deleted. New passwords will be checked based on the new parameter value.
NOTE:
The absolute time is used. Historical passwords are recorded using absolute time and unaffected by local time changes. | + | Number of consecutive times that a password cannot be reused (**password_reuse_max**) | Positive integer or 0. The integral part of a positive number indicates the number of days and its decimal part can be converted into hours, minutes, and seconds.
The value **0** indicates that the number of consecutive times that a password cannot be reused is not checked. The integral part of a positive number indicates the number of days and its decimal part can be converted into hours, minutes, and seconds. | - If the parameter value is changed to a smaller one, new passwords will be checked based on the new parameter value.
- If the parameter value is changed to a larger one (for example, changed from **a** to **b**), the historical passwords before the last **b** passwords probably can be reused because these historical passwords may have been deleted. New passwords will be checked based on the new parameter value. | + +6. Set user password expiration. + + When creating a user, a user with the **CREATEROLE** permission can force the user password to expire. After logging in to the database for the first time, a new user can perform query operations only after changing the password. The command format is as follows: + + ```sql + MogDB=# CREATE USER joe PASSWORD "abc@1234" EXPIRED; + CREATE ROLE + ``` + + A user with the **CREATEROLE** permission can force a user password to expire or force a user to change the forcibly expired password. The command format is as follows: + + ```sql + MogDB=# ALTER USER joe PASSWORD EXPIRED; + ALTER ROLE + ``` + + ```sql + MogDB=# ALTER USER joe PASSWORD "abc@2345" EXPIRED; + ALTER ROLE + ``` + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > + > - After a user whose password is invalid logs in to the database, the system prompts the user to change the password when the user performs a simple or extended query. The user can then execute statements after changing the password. + > - Only initial users, system administrators (with the **sysadmin** permission), and users who have the permission to create users (with the **CREATEROLE** permission) can invalidate user passwords. System administrators can invalidate their own passwords or the passwords of other system administrators. The password of initial users cannot be invalidated. diff --git a/product/en/docs-mogdb/v5.0/security-guide/security/3-configuring-database-audit.md b/product/en/docs-mogdb/v5.0/security-guide/security/3-configuring-database-audit.md index d37410ce..2bb2e316 100644 --- a/product/en/docs-mogdb/v5.0/security-guide/security/3-configuring-database-audit.md +++ b/product/en/docs-mogdb/v5.0/security-guide/security/3-configuring-database-audit.md @@ -1,372 +1,372 @@ ---- -title: Configuring Database Audit -summary: Configuring Database Audit -author: Guo Huan -date: 2021-04-26 ---- - -# Configuring Database Audit - -## Overview - -**Background** - -Database security is essential for a database system. MogDB writes all user operations in the database to audit logs. Database security administrators can use the audit logs to reproduce a series of events that cause faults in the database and identify unauthorized users, unauthorized operations, and the time when these operations are performed. - -You need to know the following about the audit function: - -- The overall audit switch **audit_enabled** supports dynamic loading. After you change the switch status when the database is running, the change takes effect immediately and you do not need to restart the database. Its default value is **on**, indicating that the audit function is enabled. -- In addition to the overall audit switch, each audit item has an independent switch. An audit item is available only after its own switch is turned on. -- The switch of each audit supports dynamic loading. After changing the audit switch status when the database is running, the modification takes effect immediately without restarting the database. - -[Table 1](#Table-3.1.1) describes the audit items supported by MogDB. - -**Table 1** Audit items - -| Configuration Item | Description | -| :----------------------------------------------------------- | :----------------------------------------------------------- | -| User login and logout audit | Parameter:**audit_login_logout**
Its default value is **7**, which indicates that the function of user login and logout audit is enabled. **0** indicates that the function of user login and logout audit is disabled. Other values are not recommended. | -| Database startup, stop, recovery, and switchover audit | Parameter:**audit_database_process**
Its default value is **1**, which indicates that the audit of database startup, stop, recovery, and switchover is enabled. | -| User locking and unlocking audit | Parameter:**audit_user_locked**
Its default value is **1**, which indicates that the audit of user locking and unlocking is enabled. | -| Unauthorized access audit | Parameter:**audit_user_violation**
Its default value is **0**, which indicates that the audit of unauthorized access is disabled. | -| Permission granting and revoking audit | Parameter:**audit_grant_revoke**
Its default value is **1**, which indicates that the audit of permission granting and revoking is enabled. | -| Audit of **CREATE**, **ALTER**, and **DROP** operations on database objects | Parameter:**audit_system_object**
Its default value is **12295**, which indicates that the **CREATE**, **ALTER**, and **DROP** operations only on databases, schemas, users, and data sources are audited. | -| Audit of **INSERT**, **UPDATE**, and **DELETE** operations on a specific table | Parameter:**audit_dml_state**
Its default value is **0**, which indicates that the audit of DML operations (except **SELECT**) on a specific table is disabled. | -| **SELECT** operation audit | Parameter:**audit_dml_state_select**
Its default value is **0**, which indicates that the audit of **SELECT** operations is disabled. | -| **COPY** operation audit | Parameter:**audit_copy_exec**
Its default value is **0**, which indicates that the audit of the **COPY** operations is enabled. | -| Execution of stored procedures and customized functions | Parameter:**audit_function_exec**
Its default value is **0**, which indicates that no execution audit logs of stored procedures and customized functions are recorded. | -| **SET** operation audit | Parameter:**audit_set_parameter**
Its default value is **1**, which indicates that the audit of the **SET** operation is enabled. | -| Transaction ID record | Parameter:**audit_xid_info**
Its default value is **0**, which indicates that the function of recording transaction IDs in audit logs is disabled. | - -For details about security-related parameters, see [Table 2](#Table-3.1.2). - -**Table 2** Security-related parameters - -| Parameter | Description | -| :-------------------------- | :----------------------------------------------------------- | -| ssl | Specifies whether the SSL connection is enabled. | -| require_ssl | Specifies whether the server requires the SSL connection. | -| ssl_ciphers | Encryption algorithm list supported by the SSL | -| ssl_cert_file | File containing the SSL server certificate | -| ssl_key_file | File containing the SSL private key | -| ssl_ca_file | File containing CA information | -| ssl_crl_file | File containing CRL information | -| password_policy | Specifies whether to check the password complexity. | -| password_reuse_time | Specifies whether to check the reuse days of a new password. | -| password_reuse_max | Specifies whether to check the reuse times of a new password. | -| password_lock_time | Duration before a locked account is automatically unlocked | -| failed_login_attempts | If the number of consecutive login attempts with incorrect passwords reaches this value, the account is locked. | -| password_encryption_type | Password storage encryption mode | -| password_min_uppercase | Minimum number of uppercase letters in a password | -| password_min_lowercase | Minimum number of lowercase letters in a password | -| password_min_digital | Minimum number of digits in a password | -| password_min_special | Minimum number of special characters in a password | -| password_min_length | Minimum password length
NOTE:
The value of this parameter must be less than or equal to that of **password_max_length**. Otherwise, a password length error message is displayed upon all password-related operations. | -| password_max_length | Maximum password length
NOTE:
The value of this parameter must be greater than or equal to that of **password_min_length**. Otherwise, a password length error message is displayed upon all password-related operations. | -| password_effect_time | Password validity period | -| password_notify_time | Number of days prior to account password expiration that a user is notified | -| audit_enabled | Specifies whether the audit process is enabled or disabled. | -| audit_directory | Audit file storage directory | -| audit_data_format | Audit log file format. Currently, only the binary format is supported. | -| audit_rotation_interval | Time interval of creating an audit log file. If the interval between the creation time of the last audit log file and the current time exceeds the parameter value, the server generates a new audit log file. | -| audit_rotation_size | Maximum capacity of an audit log file. If the total number of messages in an audit log exceeds the value of **audit_rotation_size**, the server will generate a new audit log file. | -| audit_resource_policy | Policy for determining whether audit logs are preferentially stored by space or time. **on** indicates that audit logs are preferentially stored by space. | -| audit_file_remain_time | Minimum duration required for recording audit logs. This parameter is valid only when **audit_resource_policy** is set to **off**. | -| audit_space_limit | Maximum total size of audit log files in a disk | -| audit_file_remain_threshold | Maximum number of audit files in the audit directory | -| audit_login_logout | Specifies whether to audit user logins (including login successes and failures) and logouts. | -| audit_database_process | Specifies whether to audit database startup, stop, switchover, and restoration operations. | -| audit_user_locked | Specifies whether to audit database user locking and unlocking. | -| audit_user_violation | Specifies whether to audit unauthorized access of database users. | -| audit_grant_revoke | Specifies whether to audit user permission granting and reclaiming operations. | -| audit_system_object | Specifies whether to audit the **CREATE**, **DROP**, and **ALTER** operations on database objects. | -| audit_dml_state | Specifies whether to audit the **INSERT**, **UPDATE**, and **DELETE** operations on a specific table. | -| audit_dml_state_select | Specifies whether to audit the **SELECT** operation. | -| audit_copy_exec | Specifies whether to audit the **COPY** operation. | -| audit_function_exec | Specifies whether to record audit information during execution of stored procedures, anonymous blocks, or customized functions (excluding system functions). | -| audit_set_parameter | Specifies whether to audit the **SET** operation. | -| enableSeparationOfDuty | Specifies whether the separation of duties is enabled. | -| session_timeout | If the duration of a connection session exceeds the parameter value, the session is automatically disconnected. | -| auth_iteration_count | Number of iterations during the generation of encrypted information for authentication | - -**Procedure** - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to connect to the database: - - ```bash - gsql -d mogdb -p 8000 - ``` - - **mogdb** is the name of the database to be connected, and **8000** is the port number of the database primary node. - - If information similar to the following is displayed, the connection succeeds: - - ```bash - gsql((MogDB x.x.x build f521c606) compiled at 2021-09-16 14:55:22 commit 2935 last mr 6385 release) - Non-SSL connection (SSL connection is recommended when requiring high-security) - Type "help" for help. - - MogDB=# - ``` - -3. Check the status of the overall audit switch. - - 1. Run the **show** command to view the value of **audit_enabled**. - - ```sql - MogDB=# SHOW audit_enabled; - ``` - - If off is displayed, run the **\q** command to exit the database and go to the next step. If on is displayed, no further action is required. - - 2. Run the following command to enable the audit function. The parameter settings take effect immediately. - - ```bash - gs_guc set -N all -I all -c "audit_enabled=on" - ``` - -4. Configure specific audit items. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - After the audit function is enabled, user operations can be recorded into an audit file. - > - The default parameter value of each audit item meets security standards. You can enable audit functions as needed, but system performance may be affected. - - For example, you can enable the audit switch of the **CREATE**, **DROP**, and **ALTER** operations on all database objects. The methods of modifying other configuration items are the same: - - ```bash - gs_guc reload -N all -I all -c "audit_system_object=12295" - ``` - - **audit_system_object** indicates the switch of the audit item, and **12295** indicates the value of the audit switch. - -## Querying Audit Results - -**Prerequisites** - -- Audit has been enabled. -- Audit of required items has been enabled. -- The database is running properly and a series of addition, modification, deletion, and query operations have been executed in the database. Otherwise, no audit result is generated. -- Audit logs are separately recorded on the database nodes. - -**Background** - -- Only users with the **AUDITADMIN** permission can view audit records. For details about database users and how to create users, see [Users](2-managing-users-and-their-permissions.md#users). - -- The SQL function **pg_query_audit** is provided by the database for audit query. Its syntax is as follows: - - ```bash - pg_query_audit(timestamptz startime,timestamptz endtime,audit_log) - ``` - - **startime** and **endtime** indicate the start time and end time of the audit record, respectively. **audit_log** indicates the physical file path of the queried audit logs. If **audit_log** is not specified, the audit log information of the current instance is queried. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > The difference between the values of the **startime** and **endtime** parameters indicates the query period, which can be any value ranging from 00:00:00 of the **startime** parameter to 23:59:59 of the **endtime** parameter. Therefore, the **startime** and **endtime** parameters must be properly set to ensure that the required audit information is displayed. - -**Procedure** - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to connect to the database: - - ```bash - gsql -d postgres -p 8000 - ``` - - **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. - -3. Run the following command to query the audit record: - - ```sql - MogDB=# select * from pg_query_audit('2021-03-04 08:00:00','2021-03-04 17:00:00'); - ``` - - The command output is similar to the following: - - ``` - time | type | result | userid | username | database | client_conninfo | object_name | detail_info | node_name | thread_id | local_port | remote_port - ------------------------+----------------+--------+--------+-----------+-----------+-------------------------+-------------------+--------------+-------------------+---------------------------------+------------+------------- - 2021-03-04 08:00:08+08 | login_success | ok | 10 | omm | postgres | gsql@::1 | postgres | login db(postgres) success, SSL=off | dn_6001_6002_6003 | 140477687527168@668131208211425 |17778 | 46946 - ``` - - This audit record indicates that user **omm** logged in to the **postgres** at the time specified by the **time** column. After the host specified by **log_hostname** is started and a client is connected to its IP address, the host name found by reverse DNS resolution is displayed following the at sign (@) in the value of **client_conninfo**. - - > **NOTE:** SSL information is recorded at the end of the audit log **detail_info**. **SSL=on** indicates that the client is connected using SSL, and **SSL=off** indicates that the client is not connected using SSL. - -## Maintaining Audit Logs - -**Prerequisites** - -You have the audit permission. - -**Background** - -- [Table 1](#Table-3.3.1) lists the configuration parameters related to audit logs and the parameter descriptions. - - **Table 1** Configuration parameters of audit logs - - | Parameter | Description | Default Value | - | :-------------------------- | :--------------------------------------------------- | :----------------------------------------------------------- | - | audit_directory | Audit file storage directory | /var/log/mogdb/**User name**/pg_audit | - | audit_resource_policy | Policy for saving audit logs | **on** (indicating that the space configuration policy is used) | - | audit_space_limit | Maximum storage space occupied by audit files | 1GB | - | audit_file_remain_time | Minimum period for storing audit log files | 90 | - | audit_file_remain_threshold | Maximum number of audit files in the audit directory | 1048576 | - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > If **gs_om** is used for MogDB deployment, audit logs are stored in **/var/log/mogdb/Username/pg_audit**. - -- The **pg_delete_audit** function is used to delete audit logs and is an internal SQL function of the database. Its syntax is as follows: - - ```bash - pg_delete_audit(timestamp startime,timestamp endtime) - ``` - - **startime** and **endtime** indicate the audit record start time and end time, respectively. - -- Audit content is commonly recorded to database tables or OS files. [Table 2](#Table-3.3.2) lists the advantages and disadvantages of the two record methods. - - **Table 2** Comparison between the two record methods - - | Mode | Advantage | Disadvantage | - | :----------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | - | Record in tables | Users do not need to maintain audit logs. | Any users having certain permissions to access database objects can access the audit tables. If a user illegally performs operations on the audit tables, the audit records may become inaccurate. | - | Record in OS files | This method has higher security because a user with the permission to access the database may not have the permission to access the OS files. | Users need to maintain audit logs. | - - For database security purposes, MogDB adopts the second method to save audit results for reliability. - -**Procedure** - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to connect to the database: - - ```bash - gsql -d mogdb -p 8000 - ``` - - **mogdb** is the name of the database to be connected, and **8000** is the port number of the database primary node. - -3. Select a method to maintain audit logs. - - - Automatic deletion of audit logs - - If the storage space occupied by audit files or the number of audit files reaches the upper limit, the system automatically deletes the oldest audit files and records deletion information to audit logs. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > By default, the allowed maximum storage space that can be occupied by audit files is set to 1024 MB. Users can set the value as required. - - Configure the allowed maximum size of storage space occupied by audit files (**audit_space_limit**). - - a. View the current value. - - ```sql - MogDB=# SHOW audit_space_limit; - audit_space_limit - ------------------- - 1GB - (1 row) - ``` - - If the command output is not 1 GB (1024 MB), run the **\q** command to exit the database. - - b. Run the following command to set the parameter to its default value **1024MB**: - - ```bash - gs_guc reload -N all -I all -c "audit_space_limit=1024MB" - ``` - - Configure the maximum number of audit files (**audit_file_remain_threshold**). - - a. View the current value. - - ```sql - MogDB=# SHOW audit_file_remain_threshold; - audit_file_remain_threshold - ----------------------------- - 1048576 - (1 row) - ``` - - If the command output is not 1048576, run the **\q** command to exit the database. - - b. Run the following command to set the parameter to its default value **1048576**: - - ```bash - gs_guc reload -N all -I all -c "audit_file_remain_threshold=1048576" - ``` - - - Manual backup of audit files - - If the storage space occupied by audit files or the number of audit logs exceeds the threshold specified by the configuration file, the system automatically deletes the oldest audit files. Therefore, you are advised to periodically save important audit logs. - - a. Run the **show** command to view the directory (**audit_directory**) where audit files are saved. - - ```sql - MogDB=# SHOW audit_directory; - ``` - - b. Copy the entire audit directory elsewhere to save it. - - - Manual deletion of audit files - - Run the **pg_delete_audit** command to manually delete the audit records generated during a specified period of time. - - Example: Manually delete the audit records generated from September 20, 2012 to September 21, 2012. - - ```sql - SELECT pg_delete_audit('2012-09-20 00:00:00','2012-09-21 23:59:59'); - ``` - -## Configuring File Permission Security Policies - -**Background** - -During its installation, the database sets permissions for its files, including files (such as log files) generated during the running process. File permissions are set as follows: - -- The permission of program directories in the database is set to **0750**. - -- The permission for data file directories in the database is set to **0700**. - - During MogDB deployment, the directory specified by the **tmpMppdbPath** parameter in the XML configuration file is created for storing **.s.PGSQL.\*** files. If the parameter is not specified, the **/tmp/$USER_mppdb** directory is created. The directory and file permission is set to **0700**. - -- The permissions of data files and audit logs of the database, as well as data files generated by other database programs, are set to **0600**. The permission of run logs is equal to or lower than **0640** by default. - -- Common OS users are not allowed to modify or delete database files and log files. - -**Directory and File Permissions of Database Programs** - -[Table 1](#Table-3.4.1) lists some of program directories and file permissions of the installed database. - -**Table 1** Program directories and file permissions - -| File or Directory | Parent Contents | Permissions | -| :----------------------------------------- | :------------------------------------- | :---------- | -| bin | - | 0700 | -| lib | - | 0700 | -| share | - | 0700 | -| data (database node/primary database node) | - | 0700 | -| base | Instance data directory | 0700 | -| global | Instance data directory | 0700 | -| pg_audit | Instance data directory (configurable) | 0700 | -| pg_log | Instance data directory (configurable) | 0700 | -| pg_xlog | Instance data directory | 0700 | -| postgresql.conf | Instance data directory | 0600 | -| pg_hba.conf | Instance data directory | 0600 | -| postmaster.opts | Instance data directory | 0600 | -| pg_ident.conf | Instance data directory | 0600 | -| gs_initdb | bin | 0700 | -| gs_dump | bin | 0700 | -| gs_ctl | bin | 0700 | -| gs_guc | bin | 0700 | -| gsql | bin | 0700 | -| archive_status | pg_xlog | 0700 | -| libpq.so.5.5 | lib | 0600 | - -**Suggestion** - -During the installation, the database automatically sets permissions for its files, including files (such as log files) generated during the running process. The specified permissions meet permission requirements in most scenarios. If you have any special requirements for the related permissions, you are advised to periodically check the permission settings to ensure that the permissions meet the product requirements. +--- +title: Configuring Database Audit +summary: Configuring Database Audit +author: Guo Huan +date: 2021-04-26 +--- + +# Configuring Database Audit + +## Overview + +**Background** + +Database security is essential for a database system. MogDB writes all user operations in the database to audit logs. Database security administrators can use the audit logs to reproduce a series of events that cause faults in the database and identify unauthorized users, unauthorized operations, and the time when these operations are performed. + +You need to know the following about the audit function: + +- The overall audit switch **audit_enabled** supports dynamic loading. After you change the switch status when the database is running, the change takes effect immediately and you do not need to restart the database. Its default value is **on**, indicating that the audit function is enabled. +- In addition to the overall audit switch, each audit item has an independent switch. An audit item is available only after its own switch is turned on. +- The switch of each audit supports dynamic loading. After changing the audit switch status when the database is running, the modification takes effect immediately without restarting the database. + +[Table 1](#Table-3.1.1) describes the audit items supported by MogDB. + +**Table 1** Audit items + +| Configuration Item | Description | +| :----------------------------------------------------------- | :----------------------------------------------------------- | +| User login and logout audit | Parameter:**audit_login_logout**
Its default value is **7**, which indicates that the function of user login and logout audit is enabled. **0** indicates that the function of user login and logout audit is disabled. Other values are not recommended. | +| Database startup, stop, recovery, and switchover audit | Parameter:**audit_database_process**
Its default value is **1**, which indicates that the audit of database startup, stop, recovery, and switchover is enabled. | +| User locking and unlocking audit | Parameter:**audit_user_locked**
Its default value is **1**, which indicates that the audit of user locking and unlocking is enabled. | +| Unauthorized access audit | Parameter:**audit_user_violation**
Its default value is **0**, which indicates that the audit of unauthorized access is disabled. | +| Permission granting and revoking audit | Parameter:**audit_grant_revoke**
Its default value is **1**, which indicates that the audit of permission granting and revoking is enabled. | +| Audit of **CREATE**, **ALTER**, and **DROP** operations on database objects | Parameter:**audit_system_object**
Its default value is **12295**, which indicates that the **CREATE**, **ALTER**, and **DROP** operations only on databases, schemas, users, and data sources are audited. | +| Audit of **INSERT**, **UPDATE**, and **DELETE** operations on a specific table | Parameter:**audit_dml_state**
Its default value is **0**, which indicates that the audit of DML operations (except **SELECT**) on a specific table is disabled. | +| **SELECT** operation audit | Parameter:**audit_dml_state_select**
Its default value is **0**, which indicates that the audit of **SELECT** operations is disabled. | +| **COPY** operation audit | Parameter:**audit_copy_exec**
Its default value is **0**, which indicates that the audit of the **COPY** operations is enabled. | +| Execution of stored procedures and customized functions | Parameter:**audit_function_exec**
Its default value is **0**, which indicates that no execution audit logs of stored procedures and customized functions are recorded. | +| **SET** operation audit | Parameter:**audit_set_parameter**
Its default value is **1**, which indicates that the audit of the **SET** operation is enabled. | +| Transaction ID record | Parameter:**audit_xid_info**
Its default value is **0**, which indicates that the function of recording transaction IDs in audit logs is disabled. | + +For details about security-related parameters, see [Table 2](#Table-3.1.2). + +**Table 2** Security-related parameters + +| Parameter | Description | +| :-------------------------- | :----------------------------------------------------------- | +| ssl | Specifies whether the SSL connection is enabled. | +| require_ssl | Specifies whether the server requires the SSL connection. | +| ssl_ciphers | Encryption algorithm list supported by the SSL | +| ssl_cert_file | File containing the SSL server certificate | +| ssl_key_file | File containing the SSL private key | +| ssl_ca_file | File containing CA information | +| ssl_crl_file | File containing CRL information | +| password_policy | Specifies whether to check the password complexity. | +| password_reuse_time | Specifies whether to check the reuse days of a new password. | +| password_reuse_max | Specifies whether to check the reuse times of a new password. | +| password_lock_time | Duration before a locked account is automatically unlocked | +| failed_login_attempts | If the number of consecutive login attempts with incorrect passwords reaches this value, the account is locked. | +| password_encryption_type | Password storage encryption mode | +| password_min_uppercase | Minimum number of uppercase letters in a password | +| password_min_lowercase | Minimum number of lowercase letters in a password | +| password_min_digital | Minimum number of digits in a password | +| password_min_special | Minimum number of special characters in a password | +| password_min_length | Minimum password length
NOTE:
The value of this parameter must be less than or equal to that of **password_max_length**. Otherwise, a password length error message is displayed upon all password-related operations. | +| password_max_length | Maximum password length
NOTE:
The value of this parameter must be greater than or equal to that of **password_min_length**. Otherwise, a password length error message is displayed upon all password-related operations. | +| password_effect_time | Password validity period | +| password_notify_time | Number of days prior to account password expiration that a user is notified | +| audit_enabled | Specifies whether the audit process is enabled or disabled. | +| audit_directory | Audit file storage directory | +| audit_data_format | Audit log file format. Currently, only the binary format is supported. | +| audit_rotation_interval | Time interval of creating an audit log file. If the interval between the creation time of the last audit log file and the current time exceeds the parameter value, the server generates a new audit log file. | +| audit_rotation_size | Maximum capacity of an audit log file. If the total number of messages in an audit log exceeds the value of **audit_rotation_size**, the server will generate a new audit log file. | +| audit_resource_policy | Policy for determining whether audit logs are preferentially stored by space or time. **on** indicates that audit logs are preferentially stored by space. | +| audit_file_remain_time | Minimum duration required for recording audit logs. This parameter is valid only when **audit_resource_policy** is set to **off**. | +| audit_space_limit | Maximum total size of audit log files in a disk | +| audit_file_remain_threshold | Maximum number of audit files in the audit directory | +| audit_login_logout | Specifies whether to audit user logins (including login successes and failures) and logouts. | +| audit_database_process | Specifies whether to audit database startup, stop, switchover, and restoration operations. | +| audit_user_locked | Specifies whether to audit database user locking and unlocking. | +| audit_user_violation | Specifies whether to audit unauthorized access of database users. | +| audit_grant_revoke | Specifies whether to audit user permission granting and reclaiming operations. | +| audit_system_object | Specifies whether to audit the **CREATE**, **DROP**, and **ALTER** operations on database objects. | +| audit_dml_state | Specifies whether to audit the **INSERT**, **UPDATE**, and **DELETE** operations on a specific table. | +| audit_dml_state_select | Specifies whether to audit the **SELECT** operation. | +| audit_copy_exec | Specifies whether to audit the **COPY** operation. | +| audit_function_exec | Specifies whether to record audit information during execution of stored procedures, anonymous blocks, or customized functions (excluding system functions). | +| audit_set_parameter | Specifies whether to audit the **SET** operation. | +| enableSeparationOfDuty | Specifies whether the separation of duties is enabled. | +| session_timeout | If the duration of a connection session exceeds the parameter value, the session is automatically disconnected. | +| auth_iteration_count | Number of iterations during the generation of encrypted information for authentication | + +**Procedure** + +1. Log in as the OS user **omm** to the primary node of the database. + +2. Run the following command to connect to the database: + + ```bash + gsql -d mogdb -p 8000 + ``` + + **mogdb** is the name of the database to be connected, and **8000** is the port number of the database primary node. + + If information similar to the following is displayed, the connection succeeds: + + ```bash + gsql((MogDB x.x.x build f521c606) compiled at 2021-09-16 14:55:22 commit 2935 last mr 6385 release) + Non-SSL connection (SSL connection is recommended when requiring high-security) + Type "help" for help. + + MogDB=# + ``` + +3. Check the status of the overall audit switch. + + 1. Run the **show** command to view the value of **audit_enabled**. + + ```sql + MogDB=# SHOW audit_enabled; + ``` + + If off is displayed, run the **\q** command to exit the database and go to the next step. If on is displayed, no further action is required. + + 2. Run the following command to enable the audit function. The parameter settings take effect immediately. + + ```bash + gs_guc set -N all -I all -c "audit_enabled=on" + ``` + +4. Configure specific audit items. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > + > - After the audit function is enabled, user operations can be recorded into an audit file. + > - The default parameter value of each audit item meets security standards. You can enable audit functions as needed, but system performance may be affected. + + For example, you can enable the audit switch of the **CREATE**, **DROP**, and **ALTER** operations on all database objects. The methods of modifying other configuration items are the same: + + ```bash + gs_guc reload -N all -I all -c "audit_system_object=12295" + ``` + + **audit_system_object** indicates the switch of the audit item, and **12295** indicates the value of the audit switch. + +## Querying Audit Results + +**Prerequisites** + +- Audit has been enabled. +- Audit of required items has been enabled. +- The database is running properly and a series of addition, modification, deletion, and query operations have been executed in the database. Otherwise, no audit result is generated. +- Audit logs are separately recorded on the database nodes. + +**Background** + +- Only users with the **AUDITADMIN** permission can view audit records. For details about database users and how to create users, see [Users](2-managing-users-and-their-permissions.md#users). + +- The SQL function **pg_query_audit** is provided by the database for audit query. Its syntax is as follows: + + ```bash + pg_query_audit(timestamptz startime,timestamptz endtime,audit_log) + ``` + + **startime** and **endtime** indicate the start time and end time of the audit record, respectively. **audit_log** indicates the physical file path of the queried audit logs. If **audit_log** is not specified, the audit log information of the current instance is queried. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > The difference between the values of the **startime** and **endtime** parameters indicates the query period, which can be any value ranging from 00:00:00 of the **startime** parameter to 23:59:59 of the **endtime** parameter. Therefore, the **startime** and **endtime** parameters must be properly set to ensure that the required audit information is displayed. + +**Procedure** + +1. Log in as the OS user **omm** to the primary node of the database. + +2. Run the following command to connect to the database: + + ```bash + gsql -d postgres -p 8000 + ``` + + **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. + +3. Run the following command to query the audit record: + + ```sql + MogDB=# select * from pg_query_audit('2021-03-04 08:00:00','2021-03-04 17:00:00'); + ``` + + The command output is similar to the following: + + ``` + time | type | result | userid | username | database | client_conninfo | object_name | detail_info | node_name | thread_id | local_port | remote_port + ------------------------+----------------+--------+--------+-----------+-----------+-------------------------+-------------------+--------------+-------------------+---------------------------------+------------+------------- + 2021-03-04 08:00:08+08 | login_success | ok | 10 | omm | postgres | gsql@::1 | postgres | login db(postgres) success, SSL=off | dn_6001_6002_6003 | 140477687527168@668131208211425 |17778 | 46946 + ``` + + This audit record indicates that user **omm** logged in to the **postgres** at the time specified by the **time** column. After the host specified by **log_hostname** is started and a client is connected to its IP address, the host name found by reverse DNS resolution is displayed following the at sign (@) in the value of **client_conninfo**. + + > **NOTE:** SSL information is recorded at the end of the audit log **detail_info**. **SSL=on** indicates that the client is connected using SSL, and **SSL=off** indicates that the client is not connected using SSL. + +## Maintaining Audit Logs + +**Prerequisites** + +You have the audit permission. + +**Background** + +- [Table 1](#Table-3.3.1) lists the configuration parameters related to audit logs and the parameter descriptions. + + **Table 1** Configuration parameters of audit logs + + | Parameter | Description | Default Value | + | :-------------------------- | :--------------------------------------------------- | :----------------------------------------------------------- | + | audit_directory | Audit file storage directory | /var/log/mogdb/**User name**/pg_audit | + | audit_resource_policy | Policy for saving audit logs | **on** (indicating that the space configuration policy is used) | + | audit_space_limit | Maximum storage space occupied by audit files | 1GB | + | audit_file_remain_time | Minimum period for storing audit log files | 90 | + | audit_file_remain_threshold | Maximum number of audit files in the audit directory | 1048576 | + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > + > If **gs_om** is used for MogDB deployment, audit logs are stored in **/var/log/mogdb/Username/pg_audit**. + +- The **pg_delete_audit** function is used to delete audit logs and is an internal SQL function of the database. Its syntax is as follows: + + ```bash + pg_delete_audit(timestamp startime,timestamp endtime) + ``` + + **startime** and **endtime** indicate the audit record start time and end time, respectively. + +- Audit content is commonly recorded to database tables or OS files. [Table 2](#Table-3.3.2) lists the advantages and disadvantages of the two record methods. + + **Table 2** Comparison between the two record methods + + | Mode | Advantage | Disadvantage | + | :----------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | + | Record in tables | Users do not need to maintain audit logs. | Any users having certain permissions to access database objects can access the audit tables. If a user illegally performs operations on the audit tables, the audit records may become inaccurate. | + | Record in OS files | This method has higher security because a user with the permission to access the database may not have the permission to access the OS files. | Users need to maintain audit logs. | + + For database security purposes, MogDB adopts the second method to save audit results for reliability. + +**Procedure** + +1. Log in as the OS user **omm** to the primary node of the database. + +2. Run the following command to connect to the database: + + ```bash + gsql -d mogdb -p 8000 + ``` + + **mogdb** is the name of the database to be connected, and **8000** is the port number of the database primary node. + +3. Select a method to maintain audit logs. + + - Automatic deletion of audit logs + + If the storage space occupied by audit files or the number of audit files reaches the upper limit, the system automatically deletes the oldest audit files and records deletion information to audit logs. + + > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** + > By default, the allowed maximum storage space that can be occupied by audit files is set to 1024 MB. Users can set the value as required. + + Configure the allowed maximum size of storage space occupied by audit files (**audit_space_limit**). + + a. View the current value. + + ```sql + MogDB=# SHOW audit_space_limit; + audit_space_limit + ------------------- + 1GB + (1 row) + ``` + + If the command output is not 1 GB (1024 MB), run the **\q** command to exit the database. + + b. Run the following command to set the parameter to its default value **1024MB**: + + ```bash + gs_guc reload -N all -I all -c "audit_space_limit=1024MB" + ``` + + Configure the maximum number of audit files (**audit_file_remain_threshold**). + + a. View the current value. + + ```sql + MogDB=# SHOW audit_file_remain_threshold; + audit_file_remain_threshold + ----------------------------- + 1048576 + (1 row) + ``` + + If the command output is not 1048576, run the **\q** command to exit the database. + + b. Run the following command to set the parameter to its default value **1048576**: + + ```bash + gs_guc reload -N all -I all -c "audit_file_remain_threshold=1048576" + ``` + + - Manual backup of audit files + + If the storage space occupied by audit files or the number of audit logs exceeds the threshold specified by the configuration file, the system automatically deletes the oldest audit files. Therefore, you are advised to periodically save important audit logs. + + a. Run the **show** command to view the directory (**audit_directory**) where audit files are saved. + + ```sql + MogDB=# SHOW audit_directory; + ``` + + b. Copy the entire audit directory elsewhere to save it. + + - Manual deletion of audit files + + Run the **pg_delete_audit** command to manually delete the audit records generated during a specified period of time. + + Example: Manually delete the audit records generated from September 20, 2012 to September 21, 2012. + + ```sql + SELECT pg_delete_audit('2012-09-20 00:00:00','2012-09-21 23:59:59'); + ``` + +## Configuring File Permission Security Policies + +**Background** + +During its installation, the database sets permissions for its files, including files (such as log files) generated during the running process. File permissions are set as follows: + +- The permission of program directories in the database is set to **0750**. + +- The permission for data file directories in the database is set to **0700**. + + During MogDB deployment, the directory specified by the **tmpMppdbPath** parameter in the XML configuration file is created for storing **.s.PGSQL.\*** files. If the parameter is not specified, the **/tmp/$USER_mppdb** directory is created. The directory and file permission is set to **0700**. + +- The permissions of data files and audit logs of the database, as well as data files generated by other database programs, are set to **0600**. The permission of run logs is equal to or lower than **0640** by default. + +- Common OS users are not allowed to modify or delete database files and log files. + +**Directory and File Permissions of Database Programs** + +[Table 1](#Table-3.4.1) lists some of program directories and file permissions of the installed database. + +**Table 1** Program directories and file permissions + +| File or Directory | Parent Contents | Permissions | +| :----------------------------------------- | :------------------------------------- | :---------- | +| bin | - | 0700 | +| lib | - | 0700 | +| share | - | 0700 | +| data (database node/primary database node) | - | 0700 | +| base | Instance data directory | 0700 | +| global | Instance data directory | 0700 | +| pg_audit | Instance data directory (configurable) | 0700 | +| pg_log | Instance data directory (configurable) | 0700 | +| pg_xlog | Instance data directory | 0700 | +| postgresql.conf | Instance data directory | 0600 | +| pg_hba.conf | Instance data directory | 0600 | +| postmaster.opts | Instance data directory | 0600 | +| pg_ident.conf | Instance data directory | 0600 | +| gs_initdb | bin | 0700 | +| gs_dump | bin | 0700 | +| gs_ctl | bin | 0700 | +| gs_guc | bin | 0700 | +| gsql | bin | 0700 | +| archive_status | pg_xlog | 0700 | +| libpq.so.5.5 | lib | 0600 | + +**Suggestion** + +During the installation, the database automatically sets permissions for its files, including files (such as log files) generated during the running process. The specified permissions meet permission requirements in most scenarios. If you have any special requirements for the related permissions, you are advised to periodically check the permission settings to ensure that the permissions meet the product requirements. diff --git a/product/en/docs-mogdb/v5.0/security-guide/security/database-security-management.md b/product/en/docs-mogdb/v5.0/security-guide/security/database-security-management.md index 4783ffc5..a85b73d1 100644 --- a/product/en/docs-mogdb/v5.0/security-guide/security/database-security-management.md +++ b/product/en/docs-mogdb/v5.0/security-guide/security/database-security-management.md @@ -1,15 +1,15 @@ ---- -title: Database Security Management -summary: Database Security Management -author: Guo Huan -date: 2023-05-22 ---- - -# Database Security Management - -+ **[Client Access Authentication](1-client-access-authentication.md)** -+ **[Managing Users and Their Permissions](2-managing-users-and-their-permissions.md)** -+ **[Configuring Database Audit](3-configuring-database-audit.md)** -+ **[Setting Encrypted Equality Query](4-setting-encrypted-equality-query.md)** -+ **[Setting a Ledger Database](5-setting-a-ledger-database.md)** +--- +title: Database Security Management +summary: Database Security Management +author: Guo Huan +date: 2023-05-22 +--- + +# Database Security Management + ++ **[Client Access Authentication](1-client-access-authentication.md)** ++ **[Managing Users and Their Permissions](2-managing-users-and-their-permissions.md)** ++ **[Configuring Database Audit](3-configuring-database-audit.md)** ++ **[Setting Encrypted Equality Query](4-setting-encrypted-equality-query.md)** ++ **[Setting a Ledger Database](5-setting-a-ledger-database.md)** + **[Configuring TDE](6-transparent-data-encryption.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/toc.md b/product/en/docs-mogdb/v5.0/toc.md index dead7552..00189691 100644 --- a/product/en/docs-mogdb/v5.0/toc.md +++ b/product/en/docs-mogdb/v5.0/toc.md @@ -8,6 +8,9 @@ + [MogDB Introduction](/overview.md) + [Comparison Between MogDB and openGauss](/about-mogdb/MogDB-compared-to-openGauss.md) + [MogDB Release Notes](/about-mogdb/mogdb-new-feature/release-note.md) + + [MogDB 5.0.8](/about-mogdb/mogdb-new-feature/5.0.8.md) + + [MogDB 5.0.7](/about-mogdb/mogdb-new-feature/5.0.7.md) + + [MogDB 5.0.6](/about-mogdb/mogdb-new-feature/5.0.6.md) + [MogDB 5.0.5](/about-mogdb/mogdb-new-feature/5.0.5.md) + [MogDB 5.0.4](/about-mogdb/mogdb-new-feature/5.0.4.md) + [MogDB 5.0.3](/about-mogdb/mogdb-new-feature/5.0.3.md) @@ -71,6 +74,9 @@ + [OCK-accelerated Data Transmission](/characteristic-description/high-performance/ock-accelerated-data-transmission.md) + [OCK SCRLock Accelerate Distributed Lock](/characteristic-description/high-performance/ock-scrlock-accelerate-distributed-lock.md) + [Enhancement of WAL Redo Performance](/characteristic-description/high-performance/enhancement-of-wal-redo-performance.md) + + [Enhancement of Dirty Pages Flushing Performance](/characteristic-description/high-performance/enhancement-of-dirty-pages-flushing-performance.md) + + [Sequential Scan Prefetch](/characteristic-description/high-performance/seqscan-prefetch.md) + + [Ustore SMP Parallel Scanning](/characteristic-description/high-performance/ustore-smp.md) + [High Availability (HA)](/characteristic-description/high-availability/high-availability.md) + [Primary/Standby](/characteristic-description/high-availability/1-primary-standby.md) + [Logical Replication](/characteristic-description/high-availability/2-logical-replication.md) @@ -91,6 +97,8 @@ + [Two City and Three Center DR](/characteristic-description/high-availability/17-two-city-three-dc-dr.md) + [CM Cluster Management Component Supporting Two Node Deployment](/characteristic-description/high-availability/cm-cluster-management-component-supporting-two-node-deployment.md) + [Query of the Original DDL Statement for a View](/characteristic-description/high-availability/ddl-query-of-view.md) + + [MogDB/CM/PTK Dual Network Segment Support](/characteristic-description/high-availability/cm-dual-network-segment-deployment.md) + + [Enhanced Efficiency of Logical Backup and Restore](/characteristic-description/high-availability/enhanced-efficiency-of-logical-backup-and-restore.md) + [Maintainability](/characteristic-description/maintainability/maintainability.md) + [Workload Diagnosis Report (WDR)](/characteristic-description/maintainability/2-workload-diagnosis-report.md) + [Slow SQL Diagnosis](/characteristic-description/maintainability/3-slow-sql-diagnosis.md) @@ -104,6 +112,8 @@ + [DCF Module Tracing](./characteristic-description/maintainability/dcf-module-tracing.md) + [Error When Writing Illegal Characters](./characteristic-description/maintainability/error-when-writing-illegal-characters.md) + [Support For Pageinspect & Pagehack](./characteristic-description/maintainability/pageinspect-pagehack.md) + + [Autonomous Transaction Management View and Termination](./characteristic-description/maintainability/autonomous-transaction-management.md) + + [Corrupt Files Handling](./characteristic-description/maintainability/corrupt-files-handling.md) + [Compatibility](/characteristic-description/compatibility/compatibility.md) + [Add %rowtype Attribute To The View](/characteristic-description/compatibility/add-rowtype-attribute-to-the-view.md) + [Aggregate Functions Distinct Performance Optimization](/characteristic-description/compatibility/aggregate-functions-distinct-performance-optimization.md) @@ -128,6 +138,14 @@ + [Support PLPGSQL subtype](/characteristic-description/compatibility/support-plpgsql-subtype.md) + [Support Synonym Calls Without Parentheses For Function Without Parameters](/characteristic-description/compatibility/support-synonym-calls-without-parentheses-for-function-without-parameters.md) + [Support For dbms_utility.format_error_backtrace](/characteristic-description/compatibility/format-error-backtrace.md) + + [Support for PIVOT and UNPIVOT Syntax](/characteristic-description/compatibility/pivot-and-unpivot.md) + + [Mod Function Compatibility](/characteristic-description/compatibility/mod-function-float-to-int.md) + + [Support for Nesting of Aggregate Functions](/characteristic-description/compatibility/nesting-of-aggregate-functions.md) + + [ORDER BY/GROUP BY Scenario Expansion](/characteristic-description/compatibility/order-by-group-by-scenario-expansion.md) + + [Support for Modifying Table Log Properties After Table Creation](/characteristic-description/compatibility/modify-table-log-property.md) + + [Support for INSERT ON CONFLICT Clause](/characteristic-description/compatibility/insert-on-conflict.md) + + [Support for AUTHID CURRENT_USER](/characteristic-description/compatibility/authid-current-user.md) + + [Support for Stored Procedure OUT Parameters in PBE Mode](/characteristic-description/compatibility/stored-procedure-out-parameters-in-pbe-mode.md) + [Database Security](/characteristic-description/database-security/database-security.md) + [Access Control Model](/characteristic-description/database-security/1-access-control-model.md) + [Separation of Control and Access Permissions](/characteristic-description/database-security/2-separation-of-control-and-access-permissions.md) @@ -170,6 +188,11 @@ + [BRIN Index](/characteristic-description/enterprise-level-features/24-brin-index.md) + [BLOOM Index](/characteristic-description/enterprise-level-features/25-bloom-index.md) + [Event Trigger](/characteristic-description/enterprise-level-features/event-trigger.md) + + [Scrollable Cursor Support for Reverse Retrieval](/characteristic-description/enterprise-level-features/scroll-cursor.md) + + [Support for Pruning Subquery Projection Columns](/characteristic-description/enterprise-level-features/support-for-pruning-subquery-projection-columns.md) + + [Pruning ORDER BY in Subqueries](/characteristic-description/enterprise-level-features/pruning-order-by-in-subqueries.md) + + [Automatic Creation of Indexes Supporting Fuzzy Matching](/characteristic-description/enterprise-level-features/index-support-fuzzy-matching.md) + + [Support for Importing and Exporting Specific Objects](/characteristic-description/enterprise-level-features/import-export-specific-objects.md) + [Application Development Interfaces](/characteristic-description/application-development-interfaces/application-development-interfaces.md) + [Standard SQL](/characteristic-description/application-development-interfaces/1-standard-sql.md) + [Standard Development Interfaces](/characteristic-description/application-development-interfaces/2-standard-development-interfaces.md) @@ -435,6 +458,7 @@ + [SQLSetEnvAttr](/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-21-SQLSetEnvAttr.md) + [SQLSetStmtAttr](/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-22-SQLSetStmtAttr.md) + [Examples](/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-23-Examples.md) + + [ODBC Release Note](/developer-guide/dev/3-development-based-on-odbc/odbc-release-notes.md) + [Development Based on libpq](/developer-guide/dev/4-development-based-on-libpq/development-based-on-libpq.md) + [Dependent Header Files of libpq](/developer-guide/dev/4-development-based-on-libpq/dependent-header-files-of-libpq.md) + [Development Process](/developer-guide/dev/4-development-based-on-libpq/development-process.md) @@ -474,6 +498,7 @@ + [PQgetCancel](/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/1-PQgetCancel.md) + [PQfreeCancel](/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/2-PQfreeCancel.md) + [PQcancel](/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/3-PQcancel.md) + + [libpq Release Note](/developer-guide/dev/4-development-based-on-libpq/libpq-release-notes.md) + [Psycopg2-Based Development](/developer-guide/dev/4.1-psycopg-based-development/1-psycopg-based-development.md) + [Psycopg2 Package](/developer-guide/dev/4.1-psycopg-based-development/2-psycopg-package.md) + [Development Process](/developer-guide/dev/4.1-psycopg-based-development/3.1-development-process.md) @@ -525,6 +550,7 @@ + [Logical Decoding](/developer-guide/logical-replication/logical-decoding/logical-decoding.md) + [Overview](/developer-guide/logical-replication/logical-decoding/1-logical-decoding.md) + [Logical Decoding by SQL Function Interfaces](/developer-guide/logical-replication/logical-decoding/2-logical-decoding-by-sql-function-interfaces.md) + + [Logical Decoding Support for DDL](/developer-guide/logical-replication/logical-decoding/logical-decoding-support-for-DDL.md) + [Publication-Subscription](/developer-guide/logical-replication/publication-subscription/publication-subscription.md) + [Publications](/developer-guide/logical-replication/publication-subscription/publications.md) + [Subscriptions](/developer-guide/logical-replication/publication-subscription/subscriptions.md) @@ -708,7 +734,6 @@ + [Setting a Cgroup](./performance-tuning/system-tuning/resource-load-management/resource-management-preparations/setting-control-group.md) + [Creating a Resource Pool](/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/creating-resource-pool.md) + [SQL Optimization](/performance-tuning/sql-tuning/sql-tuning.md) - + [Optimizer]( ./performance-tuning/sql-tuning/sql-tuning-optimizer.md) + [Query Execution Process](/performance-tuning/sql-tuning/query-execution-process.md) + [Introduction to the SQL Execution Plan](/performance-tuning/sql-tuning/introduction-to-the-sql-execution-plan.md) + [Tuning Process](/performance-tuning/sql-tuning/tuning-process.md) @@ -863,6 +888,7 @@ + [GS_SESSION_MEMORY_STATISTICS](./reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_STATISTICS.md) + [GS_SESSION_STAT](./reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_STAT.md) + [GS_SESSION_TIME](./reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_TIME.md) + + [GS_SHARED_MEMORY_DETAIL](./reference-guide/system-catalogs-and-system-views/system-views/GS_SHARED_MEMORY_DETAIL.md) + [GS_SQL_COUNT](./reference-guide/system-catalogs-and-system-views/system-views/GS_SQL_COUNT.md) + [GS_STAT_SESSION_CU](./reference-guide/system-catalogs-and-system-views/system-views/GS_STAT_SESSION_CU.md) + [GS_THREAD_MEMORY_CONTEXT](./reference-guide/system-catalogs-and-system-views/system-views/GS_THREAD_MEMORY_CONTEXT.md) @@ -879,6 +905,7 @@ + [GS_WLM_SESSION_INFO_ALL](./reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_SESSION_INFO_ALL.md) + [GS_WLM_SESSION_STATISTICS](./reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_SESSION_STATISTICS.md) + [GS_WLM_USER_INFO](./reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_USER_INFO.md) + + [IOS_STATUS](./reference-guide/system-catalogs-and-system-views/system-views/IOS_STATUS.md) + [MPP_TABLES](./reference-guide/system-catalogs-and-system-views/system-views/MPP_TABLES.md) + [PG_AVAILABLE_EXTENSION_VERSIONS](./reference-guide/system-catalogs-and-system-views/system-views/PG_AVAILABLE_EXTENSION_VERSIONS.md) + [PG_AVAILABLE_EXTENSIONS](./reference-guide/system-catalogs-and-system-views/system-views/PG_AVAILABLE_EXTENSIONS.md) @@ -957,6 +984,7 @@ + [PG_WLM_STATISTICS](./reference-guide/system-catalogs-and-system-views/system-views/PG_WLM_STATISTICS.md) + [PGXC_PREPARED_XACTS](./reference-guide/system-catalogs-and-system-views/system-views/PGXC_PREPARED_XACTS.md) + [PLAN_TABLE](./reference-guide/system-catalogs-and-system-views/system-views/PLAN_TABLE.md) + + [PATCH_INFORMATION_TABLE](./reference-guide/system-catalogs-and-system-views/system-views/PATCH_INFORMATION_TABLE.md) + [Functions and Operators](./reference-guide/functions-and-operators/functions-and-operators.md) + [Logical Operators](./reference-guide/functions-and-operators/logical-operators.md) + [Comparison Operators](./reference-guide/functions-and-operators/comparison-operators.md) @@ -1306,7 +1334,6 @@ + [Testing a Dictionary](/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-dictionary.md) + [Limitations](/reference-guide/sql-reference/full-text-search/limitations.md) + [System Operation](/reference-guide/sql-reference/system-operation.md) - + [Controlling Transactions](/reference-guide/sql-reference/controlling-transactions.md) + [DDL Syntax Overview](/reference-guide/sql-reference/ddl-syntax-overview.md) + [DML Syntax Overview](/reference-guide/sql-reference/dml-syntax-overview.md) + [DCL Syntax Overview](/reference-guide/sql-reference/dcl-syntax-overview.md) @@ -1317,6 +1344,7 @@ + [Transaction](./reference-guide/sql-reference/transaction/sql-reference-transaction.md) + [Transaction Management](./reference-guide/sql-reference/transaction/transaction-management.md) + [Transaction Control](./reference-guide/sql-reference/transaction/transaction-control.md) + + [SELECT Auto-Commit Transactions](./reference-guide/sql-reference/transaction/transaction-auto-commit.md) + [Ordinary Table](./reference-guide/sql-reference/ordinary-table.md) + [Partitioned Table](./reference-guide/sql-reference/partition-table.md) + [Index](./reference-guide/sql-reference/sql-reference-index.md) @@ -1325,7 +1353,7 @@ + [Anonymous Block](./reference-guide/sql-reference/sql-reference-anonymous-block.md) + [Trigger](./reference-guide/sql-reference/sql-reference-trigger.md) + [INSERT_RIGHT_REF_DEFAULT_VALUE](./reference-guide/sql-reference/type-base-value.md) - + [Appendix](./reference-guide/sql-reference/appendix/appendix.md) + + [Appendix](./reference-guide/sql-reference/appendix/sql-reference-appendix.md) + [GIN Indexes](/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes.md) + [Introduction](/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes-introduction.md.md) + [Scalability](/reference-guide/sql-reference/appendix/gin-indexes/scalability.md) @@ -1410,8 +1438,12 @@ + [Reserved Parameters](./reference-guide/guc-parameters/reserved-parameters.md) + [AI Features](./reference-guide/guc-parameters/AI-features.md) + [Global SysCache Parameters](./reference-guide/guc-parameters/global-syscache-parameters.md) + + [Multi-Level Cache Management Parameters](./reference-guide/guc-parameters/multi-level-cache-management-parameters.md) + + [Resource Pooling Parameters](./reference-guide/guc-parameters/resource-pooling-parameters.md) + [Parameters Related to Efficient Data Compression Algorithms](./reference-guide/guc-parameters/parameters-related-to-efficient-data-compression-algorithms.md) + [Writer Statement Parameters Supported by Standby Servers](./reference-guide/guc-parameters/writer-statement-parameters-supported-by-standby-server.md) + + [Data Import and Export](./reference-guide/guc-parameters/data-import-export.md) + + [Delimiter](./reference-guide/guc-parameters/delimiter.md) + [Appendix](./reference-guide/guc-parameters/appendix.md) + [Schema](./reference-guide/schema/schema.md) + [Information Schema](./reference-guide/schema/information-schema/information-schema.md) @@ -1433,7 +1465,6 @@ + [Memory](./reference-guide/schema/DBE_PERF/memory/memory-schema.md) + [MEMORY_NODE_DETAIL](./reference-guide/schema/DBE_PERF/memory/MEMORY_NODE_DETAIL.md) + [GLOBAL_MEMORY_NODE_DETAIL](./reference-guide/schema/DBE_PERF/memory/GLOBAL_MEMORY_NODE_DETAIL.md) - + [GS_SHARED_MEMORY_DETAIL](./reference-guide/schema/DBE_PERF/memory/GS_SHARED_MEMORY_DETAIL.md) + [GLOBAL_SHARED_MEMORY_DETAIL](./reference-guide/schema/DBE_PERF/memory/GLOBAL_SHARED_MEMORY_DETAIL.md) + [File](./reference-guide/schema/DBE_PERF/file/file.md) + [FILE_IOSTAT](./reference-guide/schema/DBE_PERF/file/FILE_IOSTAT.md) @@ -1666,6 +1697,7 @@ + [Command Reference](./reference-guide/tool-reference/client-tool/gsql/command-reference.md) + [Meta-Command Reference](./reference-guide/tool-reference/client-tool/gsql/meta-command-reference.md) + [FAQs](./reference-guide/tool-reference/client-tool/gsql/gsql-faq.md) + + [gsql Release Note](./reference-guide/tool-reference/client-tool/gsql/gsql-release-notes.md) + [Server Tools](./reference-guide/tool-reference/server-tools/server-tools.md) + [gs_cgroup](./reference-guide/tool-reference/server-tools/gs_cgroup.md) + [gs_check](./reference-guide/tool-reference/server-tools/gs_check.md) diff --git a/product/en/docs-mogdb/v5.0/toc_about.md b/product/en/docs-mogdb/v5.0/toc_about.md index cac48be7..b98a04e7 100644 --- a/product/en/docs-mogdb/v5.0/toc_about.md +++ b/product/en/docs-mogdb/v5.0/toc_about.md @@ -7,13 +7,16 @@ + [MogDB Introduction](/overview.md) + [Comparison Between MogDB and openGauss](/about-mogdb/MogDB-compared-to-openGauss.md) + [MogDB Release Notes](/about-mogdb/mogdb-new-feature/release-note.md) + + [MogDB 5.0.8](/about-mogdb/mogdb-new-feature/5.0.8.md) + + [MogDB 5.0.7](/about-mogdb/mogdb-new-feature/5.0.7.md) + + [MogDB 5.0.6](/about-mogdb/mogdb-new-feature/5.0.6.md) + [MogDB 5.0.5](/about-mogdb/mogdb-new-feature/5.0.5.md) + [MogDB 5.0.4](/about-mogdb/mogdb-new-feature/5.0.4.md) + [MogDB 5.0.3](/about-mogdb/mogdb-new-feature/5.0.3.md) + [MogDB 5.0.2](/about-mogdb/mogdb-new-feature/5.0.2.md) + [MogDB 5.0.1](/about-mogdb/mogdb-new-feature/5.0.1.md) + [MogDB 5.0.0](/about-mogdb/mogdb-new-feature/5.0.0.md) -+ Open Source Components ++ [Open Source Components](/about-mogdb/open-source-components/open-source-components.md) + [Docker-based MogDB](/about-mogdb/open-source-components/2-docker-based-mogdb.md) + [compat-tools](/about-mogdb/open-source-components/compat-tools.md) + [mogdb-monitor](/about-mogdb/open-source-components/mogdb-monitor.md) diff --git a/product/en/docs-mogdb/v5.0/toc_characteristic_description.md b/product/en/docs-mogdb/v5.0/toc_characteristic_description.md index 17d6f1e1..bb5d916c 100644 --- a/product/en/docs-mogdb/v5.0/toc_characteristic_description.md +++ b/product/en/docs-mogdb/v5.0/toc_characteristic_description.md @@ -29,6 +29,9 @@ + [OCK-accelerated Data Transmission](/characteristic-description/high-performance/ock-accelerated-data-transmission.md) + [OCK SCRLock Accelerate Distributed Lock](/characteristic-description/high-performance/ock-scrlock-accelerate-distributed-lock.md) + [Enhancement of WAL Redo Performance](/characteristic-description/high-performance/enhancement-of-wal-redo-performance.md) + + [Enhancement of Dirty Pages Flushing Performance](/characteristic-description/high-performance/enhancement-of-dirty-pages-flushing-performance.md) + + [Sequential Scan Prefetch](/characteristic-description/high-performance/seqscan-prefetch.md) + + [Ustore SMP Parallel Scanning](/characteristic-description/high-performance/ustore-smp.md) + [High Availability (HA)](/characteristic-description/high-availability/high-availability.md) + [Primary/Standby](/characteristic-description/high-availability/1-primary-standby.md) + [Logical Replication](/characteristic-description/high-availability/2-logical-replication.md) @@ -49,6 +52,8 @@ + [Two City and Three Center DR](/characteristic-description/high-availability/17-two-city-three-dc-dr.md) + [CM Cluster Management Component Supporting Two Node Deployment](/characteristic-description/high-availability/cm-cluster-management-component-supporting-two-node-deployment.md) + [Query of the Original DDL Statement for a View](/characteristic-description/high-availability/ddl-query-of-view.md) + + [MogDB/CM/PTK Dual Network Segment Support](/characteristic-description/high-availability/cm-dual-network-segment-deployment.md) + + [Enhanced Efficiency of Logical Backup and Restore](/characteristic-description/high-availability/enhanced-efficiency-of-logical-backup-and-restore.md) + [Maintainability](/characteristic-description/maintainability/maintainability.md) + [Workload Diagnosis Report (WDR)](/characteristic-description/maintainability/2-workload-diagnosis-report.md) + [Slow SQL Diagnosis](/characteristic-description/maintainability/3-slow-sql-diagnosis.md) @@ -62,6 +67,8 @@ + [DCF Module Tracing](./characteristic-description/maintainability/dcf-module-tracing.md) + [Error When Writing Illegal Characters](./characteristic-description/maintainability/error-when-writing-illegal-characters.md) + [Support For Pageinspect & Pagehack](./characteristic-description/maintainability/pageinspect-pagehack.md) + + [Autonomous Transaction Management View and Termination](./characteristic-description/maintainability/autonomous-transaction-management.md) + + [Corrupt Files Handling](./characteristic-description/maintainability/corrupt-files-handling.md) + [Compatibility](/characteristic-description/compatibility/compatibility.md) + [Add %rowtype Attribute To The View](/characteristic-description/compatibility/add-rowtype-attribute-to-the-view.md) + [Aggregate Functions Distinct Performance Optimization](/characteristic-description/compatibility/aggregate-functions-distinct-performance-optimization.md) @@ -86,6 +93,14 @@ + [Support PLPGSQL subtype](/characteristic-description/compatibility/support-plpgsql-subtype.md) + [Support Synonym Calls Without Parentheses For Function Without Parameters](/characteristic-description/compatibility/support-synonym-calls-without-parentheses-for-function-without-parameters.md) + [Support For dbms_utility.format_error_backtrace](/characteristic-description/compatibility/format-error-backtrace.md) + + [Support for PIVOT and UNPIVOT Syntax](/characteristic-description/compatibility/pivot-and-unpivot.md) + + [Mod Function Compatibility](/characteristic-description/compatibility/mod-function-float-to-int.md) + + [Support for Nesting of Aggregate Functions](/characteristic-description/compatibility/nesting-of-aggregate-functions.md) + + [ORDER BY/GROUP BY Scenario Expansion](/characteristic-description/compatibility/order-by-group-by-scenario-expansion.md) + + [Support for Modifying Table Log Properties After Table Creation](/characteristic-description/compatibility/modify-table-log-property.md) + + [Support for INSERT ON CONFLICT Clause](/characteristic-description/compatibility/insert-on-conflict.md) + + [Support for AUTHID CURRENT_USER](/characteristic-description/compatibility/authid-current-user.md) + + [Support for Stored Procedure OUT Parameters in PBE Mode](/characteristic-description/compatibility/stored-procedure-out-parameters-in-pbe-mode.md) + [Database Security](/characteristic-description/database-security/database-security.md) + [Access Control Model](/characteristic-description/database-security/1-access-control-model.md) + [Separation of Control and Access Permissions](/characteristic-description/database-security/2-separation-of-control-and-access-permissions.md) @@ -128,6 +143,11 @@ + [BRIN Index](/characteristic-description/enterprise-level-features/24-brin-index.md) + [BLOOM Index](/characteristic-description/enterprise-level-features/25-bloom-index.md) + [Event Trigger](/characteristic-description/enterprise-level-features/event-trigger.md) + + [Scrollable Cursor Support for Reverse Retrieval](/characteristic-description/enterprise-level-features/scroll-cursor.md) + + [Support for Pruning Subquery Projection Columns](/characteristic-description/enterprise-level-features/support-for-pruning-subquery-projection-columns.md) + + [Pruning ORDER BY in Subqueries](/characteristic-description/enterprise-level-features/pruning-order-by-in-subqueries.md) + + [Automatic Creation of Indexes Supporting Fuzzy Matching](/characteristic-description/enterprise-level-features/index-support-fuzzy-matching.md) + + [Support for Importing and Exporting Specific Objects](/characteristic-description/enterprise-level-features/import-export-specific-objects.md) + [Application Development Interfaces](/characteristic-description/application-development-interfaces/application-development-interfaces.md) + [Standard SQL](/characteristic-description/application-development-interfaces/1-standard-sql.md) + [Standard Development Interfaces](/characteristic-description/application-development-interfaces/2-standard-development-interfaces.md) diff --git a/product/en/docs-mogdb/v5.0/toc_datatypes-and-sql.md b/product/en/docs-mogdb/v5.0/toc_datatypes-and-sql.md index 0e3b8a25..94c39fde 100644 --- a/product/en/docs-mogdb/v5.0/toc_datatypes-and-sql.md +++ b/product/en/docs-mogdb/v5.0/toc_datatypes-and-sql.md @@ -284,7 +284,6 @@ + [Testing a Dictionary](/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-dictionary.md) + [Limitations](/reference-guide/sql-reference/full-text-search/limitations.md) + [System Operation](/reference-guide/sql-reference/system-operation.md) - + [Controlling Transactions](/reference-guide/sql-reference/controlling-transactions.md) + [DDL Syntax Overview](/reference-guide/sql-reference/ddl-syntax-overview.md) + [DML Syntax Overview](/reference-guide/sql-reference/dml-syntax-overview.md) + [DCL Syntax Overview](/reference-guide/sql-reference/dcl-syntax-overview.md) @@ -295,6 +294,7 @@ + [Transaction](./reference-guide/sql-reference/transaction/sql-reference-transaction.md) + [Transaction Management](./reference-guide/sql-reference/transaction/transaction-management.md) + [Transaction Control](./reference-guide/sql-reference/transaction/transaction-control.md) + + [SELECT Auto-Commit Transactions](./reference-guide/sql-reference/transaction/transaction-auto-commit.md) + [Ordinary Table](./reference-guide/sql-reference/ordinary-table.md) + [Partitioned Table](./reference-guide/sql-reference/partition-table.md) + [Index](./reference-guide/sql-reference/sql-reference-index.md) @@ -303,7 +303,7 @@ + [Anonymous Block](./reference-guide/sql-reference/sql-reference-anonymous-block.md) + [Trigger](./reference-guide/sql-reference/sql-reference-trigger.md) + [INSERT_RIGHT_REF_DEFAULT_VALUE](./reference-guide/sql-reference/type-base-value.md) - + [Appendix](./reference-guide/sql-reference/appendix/appendix.md) + + [Appendix](./reference-guide/sql-reference/appendix/sql-reference-appendix.md) + [GIN Indexes](/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes.md) + [Introduction](/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes-introduction.md.md) + [Scalability](/reference-guide/sql-reference/appendix/gin-indexes/scalability.md) diff --git a/product/en/docs-mogdb/v5.0/toc_dev.md b/product/en/docs-mogdb/v5.0/toc_dev.md index c6a33b74..e72e0d73 100644 --- a/product/en/docs-mogdb/v5.0/toc_dev.md +++ b/product/en/docs-mogdb/v5.0/toc_dev.md @@ -46,6 +46,7 @@ + [javax.naming.spi.InitialContextFactory](/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/13-javax-naming-spi-InitialContextFactory.md) + [CopyManager](/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/14-CopyManager.md) + [JDBC-based Common Parameter Reference](/developer-guide/dev/2-development-based-on-jdbc/jdbc-based-common-parameter-reference.md) + + [JDBC Release Note](./developer-guide/dev/2-development-based-on-jdbc/jdbc-release-notes.md) + [Development Based on ODBC](/developer-guide/dev/3-development-based-on-odbc/1-development-based-on-odbc.md) + [ODBC Packages, Dependent Libraries, and Header Files](/developer-guide/dev/3-development-based-on-odbc/2-odbc-packages-dependent-libraries-and-header-files.md) + [Configuring a Data Source in the Linux OS](/developer-guide/dev/3-development-based-on-odbc/3-configuring-a-data-source-in-the-linux-os.md) @@ -76,6 +77,7 @@ + [SQLSetEnvAttr](/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-21-SQLSetEnvAttr.md) + [SQLSetStmtAttr](/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-22-SQLSetStmtAttr.md) + [Examples](/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-23-Examples.md) + + [ODBC Release Note](/developer-guide/dev/3-development-based-on-odbc/odbc-release-notes.md) + [Development Based on libpq](/developer-guide/dev/4-development-based-on-libpq/development-based-on-libpq.md) + [Dependent Header Files of libpq](/developer-guide/dev/4-development-based-on-libpq/dependent-header-files-of-libpq.md) + [Development Process](/developer-guide/dev/4-development-based-on-libpq/development-process.md) @@ -115,6 +117,7 @@ + [PQgetCancel](/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/1-PQgetCancel.md) + [PQfreeCancel](/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/2-PQfreeCancel.md) + [PQcancel](/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/3-PQcancel.md) + + [libpq Release Note](/developer-guide/dev/4-development-based-on-libpq/libpq-release-notes.md) + [Psycopg2-Based Development](/developer-guide/dev/4.1-psycopg-based-development/1-psycopg-based-development.md) + [Psycopg2 Package](/developer-guide/dev/4.1-psycopg-based-development/2-psycopg-package.md) + [Development Process](/developer-guide/dev/4.1-psycopg-based-development/3.1-development-process.md) @@ -166,6 +169,7 @@ + [Logical Decoding](/developer-guide/logical-replication/logical-decoding/logical-decoding.md) + [Overview](/developer-guide/logical-replication/logical-decoding/1-logical-decoding.md) + [Logical Decoding by SQL Function Interfaces](/developer-guide/logical-replication/logical-decoding/2-logical-decoding-by-sql-function-interfaces.md) + + [Logical Decoding Support for DDL](/developer-guide/logical-replication/logical-decoding/logical-decoding-support-for-DDL.md) + [Publication-Subscription](/developer-guide/logical-replication/publication-subscription/publication-subscription.md) + [Publications](/developer-guide/logical-replication/publication-subscription/publications.md) + [Subscriptions](/developer-guide/logical-replication/publication-subscription/subscriptions.md) diff --git a/product/en/docs-mogdb/v5.0/toc_parameters-and-tools.md b/product/en/docs-mogdb/v5.0/toc_parameters-and-tools.md index e3af3e6b..7685fa70 100644 --- a/product/en/docs-mogdb/v5.0/toc_parameters-and-tools.md +++ b/product/en/docs-mogdb/v5.0/toc_parameters-and-tools.md @@ -82,8 +82,12 @@ + [Reserved Parameters](./reference-guide/guc-parameters/reserved-parameters.md) + [AI Features](./reference-guide/guc-parameters/AI-features.md) + [Global SysCache Parameters](./reference-guide/guc-parameters/global-syscache-parameters.md) + + [Multi-Level Cache Management Parameters](./reference-guide/guc-parameters/multi-level-cache-management-parameters.md) + + [Resource Pooling Parameters](./reference-guide/guc-parameters/resource-pooling-parameters.md) + [Parameters Related to Efficient Data Compression Algorithms](./reference-guide/guc-parameters/parameters-related-to-efficient-data-compression-algorithms.md) + [Writer Statement Parameters Supported by Standby Servers](./reference-guide/guc-parameters/writer-statement-parameters-supported-by-standby-server.md) + + [Data Import and Export](./reference-guide/guc-parameters/data-import-export.md) + + [Delimiter](./reference-guide/guc-parameters/delimiter.md) + [Appendix](./reference-guide/guc-parameters/appendix.md) + [Schema](./reference-guide/schema/schema.md) + [Information Schema](./reference-guide/schema/information-schema/information-schema.md) @@ -105,7 +109,6 @@ + [Memory](./reference-guide/schema/DBE_PERF/memory/memory-schema.md) + [MEMORY_NODE_DETAIL](./reference-guide/schema/DBE_PERF/memory/MEMORY_NODE_DETAIL.md) + [GLOBAL_MEMORY_NODE_DETAIL](./reference-guide/schema/DBE_PERF/memory/GLOBAL_MEMORY_NODE_DETAIL.md) - + [GS_SHARED_MEMORY_DETAIL](./reference-guide/schema/DBE_PERF/memory/GS_SHARED_MEMORY_DETAIL.md) + [GLOBAL_SHARED_MEMORY_DETAIL](./reference-guide/schema/DBE_PERF/memory/GLOBAL_SHARED_MEMORY_DETAIL.md) + [File](./reference-guide/schema/DBE_PERF/file/file.md) + [FILE_IOSTAT](./reference-guide/schema/DBE_PERF/file/FILE_IOSTAT.md) @@ -338,6 +341,7 @@ + [Command Reference](./reference-guide/tool-reference/client-tool/gsql/command-reference.md) + [Meta-Command Reference](./reference-guide/tool-reference/client-tool/gsql/meta-command-reference.md) + [FAQs](./reference-guide/tool-reference/client-tool/gsql/gsql-faq.md) + + [gsql Release Note](./reference-guide/tool-reference/client-tool/gsql/gsql-release-notes.md) + [Server Tools](./reference-guide/tool-reference/server-tools/server-tools.md) + [gs_cgroup](./reference-guide/tool-reference/server-tools/gs_cgroup.md) + [gs_check](./reference-guide/tool-reference/server-tools/gs_check.md) diff --git a/product/en/docs-mogdb/v5.0/toc_performance.md b/product/en/docs-mogdb/v5.0/toc_performance.md index 58f1f33d..331c4da7 100644 --- a/product/en/docs-mogdb/v5.0/toc_performance.md +++ b/product/en/docs-mogdb/v5.0/toc_performance.md @@ -18,7 +18,6 @@ + [Setting a Cgroup](/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/setting-control-group.md) + [Creating a Resource Pool](/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/creating-resource-pool.md) + [SQL Optimization](/performance-tuning/sql-tuning/sql-tuning.md) - + [Optimizer]( ./performance-tuning/sql-tuning/sql-tuning-optimizer.md) + [Query Execution Process](/performance-tuning/sql-tuning/query-execution-process.md) + [Introduction to the SQL Execution Plan](/performance-tuning/sql-tuning/introduction-to-the-sql-execution-plan.md) + [Tuning Process](/performance-tuning/sql-tuning/tuning-process.md) diff --git a/product/en/docs-mogdb/v5.0/toc_system-catalogs-and-functions.md b/product/en/docs-mogdb/v5.0/toc_system-catalogs-and-functions.md index 7775c36b..bd620e38 100644 --- a/product/en/docs-mogdb/v5.0/toc_system-catalogs-and-functions.md +++ b/product/en/docs-mogdb/v5.0/toc_system-catalogs-and-functions.md @@ -145,6 +145,7 @@ + [GS_SESSION_MEMORY_STATISTICS](./reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_STATISTICS.md) + [GS_SESSION_STAT](./reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_STAT.md) + [GS_SESSION_TIME](./reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_TIME.md) + + [GS_SHARED_MEMORY_DETAIL](./reference-guide/system-catalogs-and-system-views/system-views/GS_SHARED_MEMORY_DETAIL.md) + [GS_SQL_COUNT](./reference-guide/system-catalogs-and-system-views/system-views/GS_SQL_COUNT.md) + [GS_STAT_SESSION_CU](./reference-guide/system-catalogs-and-system-views/system-views/GS_STAT_SESSION_CU.md) + [GS_THREAD_MEMORY_CONTEXT](./reference-guide/system-catalogs-and-system-views/system-views/GS_THREAD_MEMORY_CONTEXT.md) @@ -161,6 +162,7 @@ + [GS_WLM_SESSION_INFO_ALL](./reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_SESSION_INFO_ALL.md) + [GS_WLM_SESSION_STATISTICS](./reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_SESSION_STATISTICS.md) + [GS_WLM_USER_INFO](./reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_USER_INFO.md) + + [IOS_STATUS](./reference-guide/system-catalogs-and-system-views/system-views/IOS_STATUS.md) + [MPP_TABLES](./reference-guide/system-catalogs-and-system-views/system-views/MPP_TABLES.md) + [PG_AVAILABLE_EXTENSION_VERSIONS](./reference-guide/system-catalogs-and-system-views/system-views/PG_AVAILABLE_EXTENSION_VERSIONS.md) + [PG_AVAILABLE_EXTENSIONS](./reference-guide/system-catalogs-and-system-views/system-views/PG_AVAILABLE_EXTENSIONS.md) @@ -239,6 +241,7 @@ + [PG_WLM_STATISTICS](./reference-guide/system-catalogs-and-system-views/system-views/PG_WLM_STATISTICS.md) + [PGXC_PREPARED_XACTS](./reference-guide/system-catalogs-and-system-views/system-views/PGXC_PREPARED_XACTS.md) + [PLAN_TABLE](./reference-guide/system-catalogs-and-system-views/system-views/PLAN_TABLE.md) + + [PATCH_INFORMATION_TABLE](./reference-guide/system-catalogs-and-system-views/system-views/PATCH_INFORMATION_TABLE.md) + [Functions and Operators](./reference-guide/functions-and-operators/functions-and-operators.md) + [Logical Operators](./reference-guide/functions-and-operators/logical-operators.md) + [Comparison Operators](./reference-guide/functions-and-operators/comparison-operators.md) diff --git a/product/en/docs-mogdb/v5.0/upgrade-guide/2-read-before-upgrade.md b/product/en/docs-mogdb/v5.0/upgrade-guide/2-read-before-upgrade.md index 9146d3c6..cbbe75de 100644 --- a/product/en/docs-mogdb/v5.0/upgrade-guide/2-read-before-upgrade.md +++ b/product/en/docs-mogdb/v5.0/upgrade-guide/2-read-before-upgrade.md @@ -69,8 +69,4 @@ MogDB 5.0.6 supports parallel import/export, which relies on the new built-in pl ```sql create extension tidrangescan; -``` - -### Target Version 5.0.8 - -After upgrading the old version of MogDB to version 5.0.8, when using the dolphin extension to do union (set calculation) in the B-compatible database, all text types will be converted to text type (support for multiple union type inference and conversion to ensure that union returns a fixed type of result set). For the old version of the database using the dolphin extension, you need to do the adaptation of the client transformation. \ No newline at end of file +``` \ No newline at end of file -- Gitee From 173f49140ebb0da05f846c70f40024b044fe3baf Mon Sep 17 00:00:00 2001 From: spaceoddity91719 Date: Wed, 16 Oct 2024 11:51:06 +0800 Subject: [PATCH 2/4] =?UTF-8?q?fix(mogdb):=E9=83=A8=E5=88=86=E5=86=85?= =?UTF-8?q?=E5=AE=B9=E6=9B=B4=E6=AD=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../plpgsql/1-4-arrays-and-records.md | 87 ---- .../plpgsql/1-4-arrays-and-records.md | 87 ---- ...lysis-multi-metric-correlation-analysis.md | 18 - .../migrating-data/data-check.md | 273 ---------- .../migrating-data/full-migration.md | 117 ----- .../migrating-data/incremental-migration.md | 149 ------ .../migrating-data-from-mysql-to-mogdb.md | 20 - .../migrating-data/quick-mysql-migration.md | 470 ------------------ .../migrating-data/reverse-migration.md | 149 ------ .../plpgsql/1-4-arrays-and-records.md | 87 ---- .../guc-parameters/developer-options.md | 10 +- .../ha-replication/primary-server.md | 4 +- .../optimizer-method-configuration.md | 2 +- .../write-ahead-log/archiving.md | 4 +- product/zh/docs-mogdb/v5.0/toc_performance.md | 22 +- .../plpgsql/1-4-arrays-and-records.md | 87 ---- .../guc-parameters/developer-options.md | 10 +- .../optimizer-method-configuration.md | 2 +- product/zh/docs-mogdb/v6.0/toc_performance.md | 22 +- 19 files changed, 35 insertions(+), 1585 deletions(-) delete mode 100644 product/zh/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-multi-metric-correlation-analysis.md delete mode 100644 product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/data-check.md delete mode 100644 product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/full-migration.md delete mode 100644 product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/incremental-migration.md delete mode 100644 product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/migrating-data-from-mysql-to-mogdb.md delete mode 100644 product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/quick-mysql-migration.md delete mode 100644 product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/reverse-migration.md diff --git a/product/en/docs-mogdb/v5.0/developer-guide/plpgsql/1-4-arrays-and-records.md b/product/en/docs-mogdb/v5.0/developer-guide/plpgsql/1-4-arrays-and-records.md index 3af5831d..9b808883 100644 --- a/product/en/docs-mogdb/v5.0/developer-guide/plpgsql/1-4-arrays-and-records.md +++ b/product/en/docs-mogdb/v5.0/developer-guide/plpgsql/1-4-arrays-and-records.md @@ -621,93 +621,6 @@ The set functions support **multiset union**, **intersect**, **except all**, and ANONYMOUS BLOCK EXECUTE ``` -#### Set-related Functions - -- unnest_table(anynesttable) - - Description: Returns a set of elements in a nest-table. - - Return type: setof anyelement - - Restriction: The tableof type cannot be nested with the tableof type, or the tableof type cannot be nested with other types and then the tableof type. - - Example: - - ```sql - create or replace procedure f1() - as - type t1 is table of int; - v2 t1 := t1(null, 2, 3, 4, null); - tmp int; - cursor c1 is select * from unnest_table(v2); - begin - open c1; - for i in 1 .. v2.count loop - fetch c1 into tmp; - if tmp is null then - dbe_output.print_line(i || ': is null'); - else - dbe_output.print_line(i || ': ' || tmp); - end if; - end loop; - close c1; - end; - / - - MogDB=# call f1(); - 1: is null - 2: 2 - 3: 3 - 4: 4 - 5: is null - f1 - ---- - - (1 row) - ``` - -- unnest_table(anyindexbytable) - - Description: Returns the set of elements in an index-by table sorted by index. - - Return type: setof anyelement - - Restriction: The tableof type cannot be nested with the tableof type, or the tableof type cannot be nested with other types and then the tableof type. Only the index by int type is supported. The index by varchar type is not supported. - - Example: - - ```sql - create or replace procedure f1() - as - type t1 is table of int index by int; - v2 t1 := t1(1=>1, -10=>(-10), 6=>6, 4=>null); - tmp int; - cursor c1 is select * from unnest_table(v2); - begin - open c1; - for i in 1 .. v2.count loop - fetch c1 into tmp; - if tmp is null then - dbe_output.print_line(i || ': is null'); - else - dbe_output.print_line(i || ': ' || tmp); - end if; - end loop; - close c1; - end; - / - - MogDB=# call f1(); - 1: -10 - 2: 1 - 3: is null - 4: 6 - f1 - ---- - - (1 row) - ``` - ## record **record Variables** diff --git a/product/en/docs-mogdb/v6.0/developer-guide/plpgsql/1-4-arrays-and-records.md b/product/en/docs-mogdb/v6.0/developer-guide/plpgsql/1-4-arrays-and-records.md index 797dcff6..d9f2572b 100644 --- a/product/en/docs-mogdb/v6.0/developer-guide/plpgsql/1-4-arrays-and-records.md +++ b/product/en/docs-mogdb/v6.0/developer-guide/plpgsql/1-4-arrays-and-records.md @@ -608,93 +608,6 @@ The set functions support **multiset union**, **intersect**, **except all**, and ANONYMOUS BLOCK EXECUTE ``` -#### Set-related Functions - -- unnest_table(anynesttable) - - Description: Returns a set of elements in a nest-table. - - Return type: setof anyelement - - Restriction: The tableof type cannot be nested with the tableof type, or the tableof type cannot be nested with other types and then the tableof type. - - Example: - - ```sql - create or replace procedure f1() - as - type t1 is table of int; - v2 t1 := t1(null, 2, 3, 4, null); - tmp int; - cursor c1 is select * from unnest_table(v2); - begin - open c1; - for i in 1 .. v2.count loop - fetch c1 into tmp; - if tmp is null then - gms_output.put(i || ': is null'); - else - gms_output.put(i || ': ' || tmp); - end if; - end loop; - close c1; - end; - / - - MogDB=# call f1(); - 1: is null - 2: 2 - 3: 3 - 4: 4 - 5: is null - f1 - ---- - - (1 row) - ``` - -- unnest_table(anyindexbytable) - - Description: Returns the set of elements in an index-by table sorted by index. - - Return type: setof anyelement - - Restriction: The tableof type cannot be nested with the tableof type, or the tableof type cannot be nested with other types and then the tableof type. Only the index by int type is supported. The index by varchar type is not supported. - - Example: - - ```sql - create or replace procedure f1() - as - type t1 is table of int index by int; - v2 t1 := t1(1=>1, -10=>(-10), 6=>6, 4=>null); - tmp int; - cursor c1 is select * from unnest_table(v2); - begin - open c1; - for i in 1 .. v2.count loop - fetch c1 into tmp; - if tmp is null then - gms_output.put(i || ': is null'); - else - gms_output.put(i || ': ' || tmp); - end if; - end loop; - close c1; - end; - / - - MogDB=# call f1(); - 1: -10 - 2: 1 - 3: is null - 4: 6 - f1 - ---- - - (1 row) - ``` - ## record **record Variables** diff --git a/product/zh/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-multi-metric-correlation-analysis.md b/product/zh/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-multi-metric-correlation-analysis.md deleted file mode 100644 index 77f86a0c..00000000 --- a/product/zh/docs-mogdb/v5.0/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-multi-metric-correlation-analysis.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: 多指标关联分析 -summary: 多指标关联分析 -author: zhang cuiping -date: 2023-04-07 ---- - -# Anomaly-analysis-多指标关联分析 - -- **[概述](anomaly-analysis-overview.md)** - -- **[使用指导](anomaly-analysis-usage-guide.md)** - -- **[获取帮助](anomaly-analysis-obtaining-help-information.md)** - -- **[命令参考](anomaly-analysis-command-reference.md)** - -- **[常见问题处理](anomaly-analysis-troubleshooting.md)** \ No newline at end of file diff --git a/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/data-check.md b/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/data-check.md deleted file mode 100644 index e599d09d..00000000 --- a/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/data-check.md +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: 数据校验 -summary: 数据校验 -author: Guo Huan -date: 2023-05-29 ---- - -# 数据校验 - -## 功能介绍 - -数据校验工具 gs_datacheck,分为check服务和extract服务。check服务用于数据校验,extract服务用于数据抽取和规整。 - -## 原理介绍 - -全量校验: - -在全量数据迁移完成后,由extract服务对MySQL源端和MogDB目标端数据通过JDBC方式进行数据抽取然后规整计算,并将计算后的中间数据推送到kafka中。最后由check服务提取kafka中的中间数据,构建默克尔树,通过默克尔树比对实现表数据校验且输出校验结果。 - -增量校验: - -由debezium服务侦听源端MySQL数据库的增量数据,到指定topic。再由源端extract服务处理该topic增量数据,触发check增量校验。 - -## 环境准备 - -- ARM+openEuler 20.03 或 X86+CentOS 5.7 -- JDK : JDK11+ -- MYSQL:要求5.7+版本 -- MogDB:MogDB 3.0.0+ - -## 操作步骤 - -全量校验 gs_datacheck 依赖MySQL一键式迁移工具gs_rep_portal,可实现全量迁移的安装、启动、停止、卸载整个过程。 - -- 下载gs_rep_portal - - ```bash - wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/tools/portal/PortalControl-5.0.0.tar.gz - ``` - - 解压,并进入portal对应目录 - - ```bash - tar -zxvf PortalControl-5.0.0.tar.gz - cd portal - ``` - -- 修改gs_rep_portal配置文件 - - 配置文件位于config目录内,数据校验相关的配置文件主要包含如下三个,相关参数含义简要说明如下: - - - application.yml - - ``` - #校验服务配置 修改application.yml文件 - server: - port: 9000 # 为校验服务web端口,默认可不修改 - logging: - config: # absolute_path/config/log4j2.xml 设置校验服务日志路径为config/log4j2.xml文件绝对路径 - spring: - kafka: - bootstrap-servers: localhost:9092 # 为kafka工作地址,默认安装可不修改 - data: - check: - data-path: ./check_result # 校验结果输出地址,默认配置可不修改 - source-uri: http://127.0.0.1:9001 # server.port=9001 源端服务请求地址,默认配置可不修改 - sink-uri: http://127.0.0.1:9002 # server.port=9002 目标端服务请求地址,默认配置可不修改 - auto-delete-topic: 2 #配置是否自动删除Topic,0不删除,1校验全部完成后删除,2表校验完 - 成后删除,默认值为2 - increment-max-diff-count: 10 #增量校验表最大处理差异数,超过则暂停校验,差异降低则重新自动开启增量校验 - core-pool-size: 10 #并发线程数设置,根据当前环境配置,可不修改,默认10,设置为0则系统自动分配 - max-retry-times: 1000 # 心跳等最大尝试次数,默认1000 - retry-interval-times: 10000 # 心跳、进度等最大间隔时间单位毫秒 默认10000 - - # 提供三种过滤规则,分别是表级、行级、列级。规则是以列表集合的形式配置的。 - rules: - enable: false # 过滤规则开关:enable=true启用过滤规则,enable=false关闭过滤规则 - - # 表级过滤规则:通过配置黑白列表来过滤当前数据库表。 - # 黑白列表配置是互斥的,即不能同时配置黑白列表,如果同时配置黑名单和白名单,则只有白名单才会生效。 - # 黑名单和白名单配置规则必须遵守: - # 配置的name属性必须是white或black,否则规则无效,我们将自动过滤无效规则 - # 如果配置的text属性不不符合正则表达式,或者为空,则该规则无效,将自动丢弃该规则 - # 如果配置的TEXT重复,则规则项将自动筛选重复项。 - table: - # - name: white - # text: ^[a-zA-Z][a-zA-Z_]+$ - # - name: black - # text: ^[a-zA-Z][a-zA-Z_]+$ - - # 行级过滤是通过添加规则来过滤所有表中需要验证的记录。 - # 根据主键对表数据进行升序排序,并根据用户配置的抽取范围来进行数据抽取。 - # 如果表规则和行规则同时配置,则行规则将根据表规则进行适配。 - # 行级规则配置规则 例如:10,100 - # 如果表名为table_name,主键为id,给当前表添加该行级过滤规则,则SQL等效为select * from table_name order by id asc limit 10 , 100 - # 行配置规则必须遵守: - # 如果配置的文本与正则表达式^\d+(\,\d+)不匹配,则该规则无效,将自动筛选 - # 如果配置的名称不不符合正则表达式,或者为空,则该规则无效,将自动筛选 - # 如果配置的名称重复,则规则项将自动筛选重复的名称 - # 行过滤规则配置 - row: - # - name: ^[a-zA-Z][a-zA-Z_]+$ - # text: 10,100 - # - name: ^[a-zA-Z][a-zA-Z_]+$ - # text: 100,100 - # - name: ^[a-zA-Z]+$a-zA-Z_]+$ - # text: 100,300 - # - name: ^[a-zA-Z]+$ - # text: 10a,100 - # - name: ^[a-zA-Z][a-zA-Z0-9_]+$ - # text: 10,100 - - # 列级过滤是通过添加的规则来过滤当前表中需要校验的字段列。列级过滤分为两类:包含规则和排他规则。它们之间是互斥的。 - # 包含规则只校验已配置的字段列表,排他规则不校验已配置字段列表。由于我们进行数据校验时,要求待校验表必须包含主键。因此,如果包含规则中没有配置主键字段,包含规则将自动添加主键列。此外,如果排它规则中配置了主键字段,则排它规则将自动删除主键列 - # 列级规则配置 - # name: 表名称 表名称不能为空,会自动过滤重复表配置 - # text: field1,field2,...field - # attribute: include 表示包含规则 或者 exclude表示排它规则 - column: - # - name: t_test_1 - # text: id,portal_id,func_id,name,width,last_upd_time - # attribute: include - # - name: t_test_2 - # text: id,portal_id,func_id,name - # attribute: include - # - name: t_test_2 - # text: name,height,last_upd_time,last_upd_time - # attribute: include - # - name: t_test_4 - # text: name,height,last_upd_time - # attribute: exclude - ``` - - - application-source.yml - - ``` - 源端服务配置 修改application-source.yml文件 - server: - port: 9001 # 为源端抽取服务web端口,默认可不修改 - logging: - config: # absolute_path/log4j2source.xml 设置校验服务日志路径为config/log4j2source.xml文件绝对路径 - - spring: - check: - server-uri: http://127.0.0.1:9000 # 校验服务请求地址,默认配置可不修改 - max-core-pool-size: 10 # 并发线程数设置,可不修改,默认10,设置为0则系统自动分配 - max-retry-times: 1000 # 最大尝试次数 - retry-interval-times: 10000 # 最大间隔时间单位毫秒 默认10000 - - extract: - schema: test # 当前校验数据schema,mysql 数据库名称 - databaseType: MS # 当前校验数据库类型 mysql MS , opengauss OG - query-dop: 8 # 表JDBC并行查询度,当表数据量超过百万时自动生效,默认为8,最大64 - debezium-enable: false # 是否开启增量配置 - debezium-topic: data_check_avro_inc_topic_w1 # debezium topic - debezium-serializer: AvroSerializer # 序列化类型 StringSerializer or AvroSerializer - debezium-avro-registry: http://localhost:8081 # avro schema 注册地址 - debezium-groupId: debezium-extract-group # debezium topic groupId - debezium-time-period: 1 # 增量校验配置周期(单位分钟): 24 * 60 unit: Min - debezium-num-period: 1000 # 增量校验数量周期,最小值100,默认1000 - - kafka: - bootstrap-servers: localhost:9092 # 为kafka工作地址,默认安装可不修改 - - # 数据源配置,工具默认采用druid数据源,用户可以自定义配置连接池参数,可根据当前校验数据库任务数量(表数量)进行调整 - datasource: - druid: - dataSourceOne: - driver-class-name: com.mysql.cj.jdbc.Driver - url: jdbc:mysql://127.0.0.1:3306/mysql?useSSL=false&useUnicode=true&characterEncoding=utf-8&serverTimezone=UTC&allowPublicKeyRetrieval=true - username: - password: 'xxxx' # - initialSize: 5 # 默认初始连接大小 - minIdle: 10 # 默认最小连接池数量 - maxActive: 20 # 最大连接数 maxActive 大于query-dop 一般为query-dop 的2-3倍 - ``` - - - application-sink.yml - - ``` - # 目标端服务配置 修改application-sink.yml文件 - server: - port: 9002 # 为目标端抽取服务web端口,默认可不修改 - logging: - config: # absolute_path/log4j2sink.xml 设置校验服务日志路径为config/log4j2sink.xml文件绝对路径 - - spring: - check: - server-uri: http://127.0.0.1:9000 # 校验服务请求地址,默认配置可不修改 - max-core-pool-size: 10 # 并发线程数设置,可不修改,默认10,设置为0则系统自动分配 - max-retry-times: 1000 # 最大尝试次数 - retry-interval-times: 10000 # 最大间隔时间单位毫秒 默认10000 - - extract: - schema: test # 当前校验数据schema,mysql 数据库名称 - databaseType: OG # 当前校验数据库类型 mysql MS , opengauss OG - query-dop: 8 # 表JDBC并行查询度,当表数据量超过百万时自动生效,默认为8,最大64 - debezium-enable: false # debezium相关配置,在sink 端无需配置 - debezium-topic: data_check_avro_inc_topic_w1 # debezium topic - debezium-serializer: AvroSerializer # 序列化类型 StringSerializer or AvroSerializer - debezium-avro-registry: http://localhost:8081 # avro schema 注册地址 - debezium-groupId: debezium-extract-group # debezium topic groupId - debezium-time-period: 1 # 增量校验配置周期(单位分钟): 24 * 60 unit: Min - debezium-num-period: 1000 # 增量校验数量周期,最小值100,默认1000 - - kafka: - bootstrap-servers: localhost:9092 # 为kafka工作地址,默认安装可不修改 - - # 数据源配置,工具默认采用druid数据源,用户可以自定义配置连接池参数,可根据当前校验数据库任务数量(表数量)进行调整 - datasource: - druid: - dataSourceOne: - driver-class-name: org.opengauss.Driver - url: jdbc:opengauss://xxxxx:xxx/xxxx?useSSL=false&useUnicode=true&characterEncoding=utf-8&serverTimezone=UTC - username: - password: 'xxxx' # - initialSize: 5 # 默认初始连接大小 - minIdle: 10 # 默认最小连接池数量 - maxActive: 20 # 最大连接数 maxActive 大于query-dop 一般为query-dop 的2-3倍 - ``` - -- 安装 - - ```bash - # 全量校验 - sh gs_datacheck.sh install full workspace.id - # 增量校验 - sh gs_datacheck.sh install incremental workspace.id - ``` - - 其中workspace.id表示迁移任务id,取值为一个整数,不同的id区分不同的校验任务,不同校验任务可并行启动。full 表示全量校验 ,incremental 表示增量校验 - -- 启动 - - ```bash - # 全量校验 - sh gs_datacheck.sh start full workspace.id - # 增量校验 - sh gs_datacheck.sh start incremental workspace.id - ``` - -- 停止 - - ```bash - # 全量校验 - sh gs_datacheck.sh stop full workspace.id - # 增量校验 - sh gs_datacheck.sh stop incremental workspace.id - ``` - -- 卸载 - - ```bash - # 全量校验 - sh gs_datacheck.sh uninstall full workspace.id - # 增量校验 - sh gs_datacheck.sh uninstall incremental workspace.id - ``` - -## 注意事项 - -- JDK版本要求JDK11+ -- 当前版本仅支持对源端MySQL,目标端MogDB数据校验 -- 当前版本仅支持数据校验,不支持表对象校验 -- MYSQL需要5.7+版本 -- 当前版本不支持地理位置几何图形数据校验 -- 校验工具当前不支持校验中断(网络故障、kill进程等)自动恢复。 -- 数据校验行级过滤规则配置,只支持[offset,count]指定范围内抽取,不支持排除[offset,count]范围之内的数据过滤。 -- 行过滤规则抽取中间范围内数据(例如:[10,100]),如果源端在该范围之前的数据[0,10]发生删除操作,则会导致该表在指定范围内数据发生偏移,从而导致数据校验结果产生差异。此时需要扩大前置下标范围,以及增加相应的抽取数量。即[3,107]。 -- 当对主键的update语句没有通过增量迁移同步到目的端 或 主键同步发生错误的时候,进行数据校验,源端update后的新数据和目标端的旧数据是两条独立的数据,对校验差异进行处理时,会生成两条语句,即对旧数据进行删除,对新数据做插入。此场景会将一条主键update语句拆分为两条语句(insert+delete)来执行,且分解到两个事务中执行,无法保证原子性。 -- 增量校验不支持表级规则 -- 增量校验不支持行级规则 -- 增量校验目前只支持数据增删改校验,暂时不支持表结构(对象)校验(包括多表少表) \ No newline at end of file diff --git a/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/full-migration.md b/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/full-migration.md deleted file mode 100644 index 9086a1ed..00000000 --- a/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/full-migration.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: 全量迁移 -summary: 全量迁移 -author: Guo Huan -date: 2023-05-29 ---- - -# 全量迁移 - -## 功能介绍 - -全量迁移gs_mysync是一个用Python3编写的将MySQL迁移至MogDB的复制工具,支持初始全量数据的复制功能。gs_mysync通过一次初始化配置,使用只读模式,将MySQL的数据全量拉取到MogDB。支持在同一快照下,表间数据并行迁移。 - -全量迁移支持的功能:支持表及表数据、视图、触发器、自定义函数、存储过程的全量迁移 - -## 特性优势 - -基于sysbench测试模型,2路鲲鹏920 CPU、openEuler操作系统下,MySQL数据库10张表(无主键)单表数据量在500万以上时,gs_mysync使用20并发迁移数据至MogDB,整体全量迁移性能可达300M/s以上。 - -## 环境准备 - -ARM+openEuler 20.03 或 X86+CentOS 5.7 - -## 操作步骤 - -全量迁移gs_mysync依赖MySQL一键式迁移工具gs_rep_portal,可实现全量迁移的安装、启动、停止、卸载整个过程。 - -- 下载gs_rep_portal - - ```bash - wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/tools/portal/PortalControl-5.0.0.tar.gz - ``` - - 解压,并进入portal对应目录 - - ```bash - tar -zxvf PortalControl-5.0.0.tar.gz - cd portal - ``` - -- 修改gs_rep_portal配置文件 - - 配置文件位于config目录内,全量迁移相关的配置文件主要包含如下两个,相关参数含义简要说明如下: - - - toolspath.properties - - ```bash - # 全量迁移python虚拟环境的路径,可自定义修改 - chameleon.venv.path=/ops/portal/tools/chameleon/ - # 全量迁移用户相关路径 - chameleon.path=~/.pg_chameleon/ - # 全量迁移在线whl包的下载路径 - chameleon.pkg.url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/5.0.0/chameleon/chameleon-5.0.0-py3-none-any.whl - # 全量迁移安装包的路径,可自定义修改 - chameleon.pkg.path=/ops/portal/pkg/chameleon/ - # 全量迁移安装包的名称 - chameleon.pkg.name=chameleon-5.0.0-py3-none-any.whl - ``` - - - migrationConfig.properties - - ```bash - # 用于指定全量迁移是否迁移对象,包括函数、存储过程、触发器、视图,默认为yes;若设置为no,表示不迁移对象 - snapshot.object=yes - # mysql用户名 - mysql.user.name=root - # mysql密码 - mysql.user.password=*** - # mysql数据库ip - mysql.database.host=127.0.0.1 - # mysql数据库端口 - mysql.database.port=3306 - # mysql数据库名称 - mysql.database.name=test123 - # MogDB用户名 - opengauss.user.name=test - # MogDB密码 - opengauss.user.password=*** - # MogDB数据库ip - opengauss.database.host=127.0.0.1 - # MogDB数据库端口 - opengauss.database.port=5432 - # MogDB数据库名称 - opengauss.database.name=test1234 - # MogDB数据库的schema名称 - opengauss.database.schema=test123 - # 全量迁移的安装方式,默认为offline,表示离线安装,需通过参数chameleon.pkg.path指定离线安装包的路径;若设置为online,对应在线安装,在线下载的安装包将存放在参数chameleon.pkg.path指定的路径 - default.install.mysql.full.migration.tools.way=offline - ``` - -- 安装 - - ```bash - sh gs_mysync.sh install workspace.id - ``` - - 其中workspace.id表示迁移任务id,取值为数字和小写字母的组合,不同的id区分不同的迁移任务,不同迁移任务可并行启动。若未设置workspace.id,则使用其默认值1。若使用已存在的workspace.id,并修改其中的配置,请在portal/workspace/${workspace.id}/config/路径下修改对应的配置文件。 - -- 启动 - - ```bash - sh gs_mysync.sh start workspace.id - ``` - -- 停止 - - ```bash - sh gs_mysync.sh stop workspace.id - ``` - -- 卸载 - - ```bash - sh gs_mysync.sh uninstall workspace.id - ``` - -上述安装、启动、停止、卸载命令均不会在后台运行,若需在后台运行,请在命令后添加&符号。 \ No newline at end of file diff --git a/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/incremental-migration.md b/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/incremental-migration.md deleted file mode 100644 index 7fc500a2..00000000 --- a/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/incremental-migration.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: 增量迁移 -summary: 增量迁移 -author: Guo Huan -date: 2023-05-29 ---- - -# 增量迁移 - -## 功能介绍 - -增量迁移是指将MySQL数据迁移期间(包括全量和增量迁移)产生的增量数据迁移至MogDB端。 - -## 原理简介 - -debezium mysql connector的source端,监控MySQL数据库的binlog日志,并将数据(DDL和DML操作)以AVRO格式写入到kafka;debezium mysql connector的sink端,从kafka读取AVRO格式数据(DDL和DML操作),并组装为事务,在MogDB端按照事务粒度并行回放,从而完成数据(DDL和DML操作)从MySQL在线迁移至MogDB端。由于该方案严格保证事务的顺序性,因此将DDL和DML路由在kafka的一个topic下,且该topic的分区数只能为1(参数num.partitions=1),从而保证source端推送到kafka,和sink端从kafka拉取数据都是严格保序的。 - -## 特性优势 - -- 利用sysbench对MySQL进行压测,2路鲲鹏920 CPU、openEuler操作系统下,针对混合IUD场景,10张表50个线程(insert-30线程,update-10线程,delete-10线程),在线迁移性能可达3w tps。 -- 目标数据库的数据是有序的,且保证事务一致性。 - -## 操作步骤 - -增量迁移gs_replicate依赖MySQL一键式迁移工具gs_rep_portal,可实现增量迁移的安装、启动、停止、卸载整个过程。 - -- 下载gs_rep_portal - - ```bash - wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/tools/portal/PortalControl-5.0.0.tar.gz - ``` - - 解压,并进入portal对应目录 - - ```bash - tar -zxvf PortalControl-5.0.0.tar.gz - cd portal - ``` - -- 修改gs_rep_portal配置文件 - - 配置文件位于config目录内,增量迁移相关的配置文件主要包含如下两个,相关参数含义简要说明如下: - - - toolspath.properties - - ```bash - # 增量迁移依赖包路径,可自定义修改 - debezium.path=/ops/portal/tools/debezium/ - # kafka路径,位于增量迁移依赖包路径下,可自定义修改 - kafka.path=/ops/portal/tools/debezium/kafka_2.13-3.2.3/ - # confluent路径,位于增量迁移依赖包路径下,可自定义修改 - confluent.path=/ops/portal/tools/debezium/confluent-5.5.1/ - # connector路径,位于增量迁移依赖包路径下,可自定义修改 - connector.path=/ops/portal/tools/debezium/plugin/ - # debezium mysql connector路径,位于connector路径下,可自定义修改 - connector.mysql.path=/ops/portal/tools/debezium/plugin/debezium-connector-mysql/ - # kafka下载路径 - kafka.pkg.url=https://downloads.apache.org/kafka/3.2.3/kafka_2.13-3.2.3.tgz - # confluent下载路径 - confluent.pkg.url=https://packages.confluent.io/archive/5.5/confluent-community-5.5.1-2.12.zip - # debezium mysql connector下载路径 - connector.mysql.pkg.url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/5.0.0/tools/replicate-mysql2openGauss-5.0.0.tar.gz - # 增量迁移依赖安装包路径,可自定义修改 - debezium.pkg.path=/ops/portal/pkg/debezium/ - # kafka包名 - kafka.pkg.name=kafka_2.13-3.2.3.tgz - # confluent包名 - confluent.pkg.name=confluent-community-5.5.1-2.12.zip - # mysql2openGauss在线复制包名 - connector.mysql.pkg.name=replicate-mysql2openGauss-5.0.0.tar.gz - ``` - - - migrationConfig.properties - - ```bash - # mysql用户名 - mysql.user.name=root - # mysql密码 - mysql.user.password=*** - # mysql数据库ip - mysql.database.host=127.0.0.1 - # mysql数据库端口 - mysql.database.port=3306 - # mysql数据库名称 - mysql.database.name=test123 - # MogDB用户名 - opengauss.user.name=test - # MogDB密码 - opengauss.user.password=*** - # MogDB数据库ip - opengauss.database.host=127.0.0.1 - # MogDB数据库端口 - opengauss.database.port=5432 - # MogDB数据库名称 - opengauss.database.name=test1234 - # MogDB数据库的schema名称 - opengauss.database.schema=test123 - # 在线迁移的安装方式,默认为offline,表示离线安装,需通过参数debezium.pkg.path指定离线依赖安装包的路径;若设置为online,对应在线安装,在线下载的安装包将存放在参数debezium.pkg.path指定的路径 - default.install.mysql.incremental.migration.tools.way=offline - ``` - -- 安装 - - ```bash - sh gs_replicate.sh install mysql-opengauss workspace.id - ``` - - 其中workspace.id表示迁移任务id,取值为数字和小写字母的组合,不同的id区分不同的迁移任务,不同迁移任务可并行启动。若未设置workspace.id,则使用其默认值1。若使用已存在的workspace.id,并修改其中的配置,请在portal/workspace/${workspace.id}/config/路径下修改对应的配置文件。 - -- 启动 - - ```bash - sh gs_replicate.sh start mysql-opengauss workspace.id - ``` - -- 停止 - - ```bash - sh gs_replicate.sh stop mysql-opengauss workspace.id - ``` - -- 卸载 - - ```bash - sh gs_replicate.sh uninstall mysql-opengauss workspace.id - ``` - -上述安装、启动、停止、卸载命令均不会在后台运行,若需在后台运行,请在命令后添加&符号。 - -## 注意事项 - -- 当前支持MySQL IUD操作(insert、update、delete)产生的增量数据迁移至MogDB。 - -- 支持迁移MogDB数据库兼容的MySQL DDL语句,对于不兼容的DDL,迁移时会报错处理(MogDB在完善对DDL的兼容性)。 - -- 为保证事务的顺序和一致性,不支持skip_event、limit_table、skip_table等设置。 - -- MySQL需要5.7及以上版本。 - -- MySQL参数设置要求为:log_bin=ON,binlog_format=ROW,binlog_row_image=FULL,gtid_mode = ON。若gtid_mode为off,则sink端按照事务顺序串行回放,会降低在线迁移性能。 - -- 先进行全量迁移,再进行增量迁移,全量迁移可基于[gs_mysync](https://gitee.com/opengauss/openGauss-tools-chameleon)工具完成。 - -- Kafka中以AVRO格式存储数据,AVRO字段名称[命名规则](https://gitee.com/link?target=https%3A%2F%2Favro.apache.org%2Fdocs%2F1.11.1%2Fspecification%2F%23names)为: - - - 以[A-Za-z_]开头 - - 随后仅包含[A-Za-z0-9_] - - 因此,对于MySQL中的标识符命名,包括表名、列名等,需满足上述命名规范,否则在线迁移会报错。 \ No newline at end of file diff --git a/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/migrating-data-from-mysql-to-mogdb.md b/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/migrating-data-from-mysql-to-mogdb.md deleted file mode 100644 index 82e3838c..00000000 --- a/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/migrating-data-from-mysql-to-mogdb.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: 迁移MySQL数据库至MogDB -summary: 迁移MySQL数据库至MogDB -author: Guo Huan -date: 2023-05-29 ---- - -# 迁移MySQL数据库至MogDB - -## 工具部署架构图 - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/migrating-data-from-mysql-to-mogdb.png) - -当前MogDB支持对MySQL迁移服务,具体包括: - -- **[MySQL一键式迁移](./quick-mysql-migration.md)** -- **[全量迁移](./full-migration.md)** -- **[增量迁移](./incremental-migration.md)** -- **[反向迁移](./reverse-migration.md)** -- **[数据校验](./data-check.md)** \ No newline at end of file diff --git a/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/quick-mysql-migration.md b/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/quick-mysql-migration.md deleted file mode 100644 index 41e107ac..00000000 --- a/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/quick-mysql-migration.md +++ /dev/null @@ -1,470 +0,0 @@ ---- -title: MySQL一键式迁移 -summary: MySQL一键式迁移 -author: Guo Huan -date: 2023-05-29 ---- - -# MySQL一键式迁移 - -## 功能介绍 - -gs_rep_portal是一个用Java编写的,在linux系统上运行的,集成了全量迁移、增量迁移、反向迁移、数据校验的工具。gs_rep_portal支持以上工具的一键式安装上述工具,设定迁移任务,任务根据用户设定的执行计划顺序的调用相应工具完成每个迁移步骤,并能实时展示每个步骤的状态、进度、异常原因等。 - -## 注意事项 - -- portal在执行增量迁移、反向迁移、增量校验时需要使用curl工具。 -- 同一个迁移计划的增量迁移和反向迁移不会同时开启,如果一个计划中包含了增量迁移和反向迁移,那么需要用户手动停止增量迁移,启动反向迁移。当用户启动反向迁移之后,无法再启动增量迁移。 -- 用户在停止增量迁移之后到启动反向迁移之前,禁止向MogDB进行作业,否则会导致这之间的数据丢失。 -- portal使用的workspace.id只能为小写字母与数字的组合。 -- portal在启动多个计划时,需要保证MySQL数据库实例各不相同,MogDB端数据库各不相同,且同一个MySQL数据库实例和MogDB端数据库的增量迁移和反向迁移不能同时开启。 - -## 默认文件结构 - -使用默认配置安装的portal的文件结构如下。 - -``` -portal/ - config/ - migrationConfig.properties - toolspath.properties - status - currentPlan - input - chameleon/ - config-example.yml - datacheck/ - application-source.yml - application-sink.yml - application.yml - log4j2.xml - log4j2source.xml - log4j2sink.xml - debezium/ - connect-avro-standalone.properties - mysql-sink.properties - mysql-source.properties - opengauss-sink.properties - opengauss-source.properties - logs/ - portal.log - pkg/ - chameleon/ - chameleon-5.0.0-py3-none-any.whl - datacheck/ - openGauss-datachecker-performance-5.0.0.tar.gz - debezium/ - confluent-community-5.5.1-2.12.zip - debezium-connector-mysql-1.8.1.Final-plugin.tar.gz - debezium-connector-opengauss-1.8.1.Final-plugin.tar.gz - kafka_2.13-3.2.3.tgz - tmp/ - tools/ - chameleon/ - datacheck/ - debezium/ - confluent-5.5.1/ - kafka_2.13-3.2.3/ - plugin/ - debezium-connector-mysql/ - debezium-connector-opengauss/ - portal.portId.lock - portalControl-1.0-SNAPSHOT-exec.jar - gs_datacheck.sh - gs_mysync.sh - gs_rep_portal.sh - gs_replicate.sh - README.md -``` - -## 安装教程 - -portal的安装目录默认为/ops/portal,可根据实际需要更换。 - -### 源码安装 - -1. 通过git命令下载源代码,将源代码中的portal文件夹复制到/ops下。 - - ```bash - git clone https://gitee.com/opengauss/openGauss-migration-portal.git - ``` - -2. 使用maven命令编译源代码获得portalControl-1.0-SNAPSHOT-exec.jar,并将jar包放在/ops/portal下。 - - ```bash - mvn clean package -Dmaven.test.skip=true - ``` - - java版本:open JDK11及以上 - - maven版本:3.8.1以上 - -3. 使用一键式脚本启动portal时,请将/ops/portal/shell目录下中的.sh文件提取出来,放在/ops/portal/目录,也就是和jar包同一目录下。 - -### 安装包安装 - -下载链接: - -[https://opengauss.obs.cn-south-1.myhuaweicloud.com/tools/portal/PortalControl-5.0.0.tar.gz](https://opengauss.obs.cn-south-1.myhuaweicloud.com/tools/portal/PortalControl-5.0.0.tar.gz) - -1. 下载gs_rep_portal安装包 - - ```bash - wget -c https://opengauss.obs.cn-south-1.myhuaweicloud.com/tools/portal/PortalControl-5.0.0.tar.gz - ``` - -2. 解压gs_rep_portal安装包 - - ```bash - tar -zxvf PortalControl-5.0.0.tar.gz - ``` - -## 启动方式 - -使用一键式脚本gs_rep_portal启动portal,通过参数使用portal的各项功能。 - -```bash -sh gs_rep_portal.sh 参数 workspace.id & -``` - -这里的参数为数个单词之间加下划线,比如"start_mysql_full_migration"这种形式,分为安装指令,启动指令,停止指令,卸载指令等,会在下文介绍。 - -portal会在workspace文件夹下创造对应id的文件夹,并将执行任务时的参数和日志等信息存入该文件夹。如果不指定workspace.id,那么workspace.id默认值为1。 - -命令行输入以下指令可以查看帮助(包括使用方式和可用指令): - -```bash -sh gs_rep_portal.sh help & -``` - -参数优先级: workspace下设置的参数 > 公共空间参数。如果使用的workspace.id和之前存在的workspace.id相同的话将沿用之前的workspace里面的参数,如果不同的话,那么portal将从config文件夹中复制一份配置文件到id对应的workspace下面作为这个任务的配置文件。 - -建议每次运行迁移任务时使用不同的workspace.id。 - -### 安装迁移工具 - -迁移功能与对应的迁移工具如下表所示: - -| 迁移功能 | 使用工具 | -| :--------------------------------- | :--------------------------------------------- | -| 全量迁移 | chameleon | -| 增量迁移 | kafka、confluent、debezium-connector-mysql | -| 反向迁移 | kafka、confluent、debezium-connector-opengauss | -| 数据校验(包括全量校验和增量校验) | kafka、confluent、datacheck | - -各工具推荐版本: - -| 工具 | 版本 | -| :--------------------------- | :--------- | -| chameleon | 5.0.0 | -| kafka | 2.13-3.2.3 | -| confluent | 5.5.1 | -| datacheck | 5.0.0 | -| debezium-connector-mysql | 1.8.1 | -| debezium-connector-opengauss | 1.8.1 | - -在/ops/portal/config目录的toolspath.properties文件中修改工具安装路径,其中文件夹要以/结尾: - -| 参数名称 | 参数说明 | -| :--------------------------- | :----------------------------------------------------------- | -| chameleon.venv.path | 变色龙虚拟环境所在位置 | -| chameleon.pkg.path | 变色龙的安装包所在路径 | -| chameleon.pkg.name | 变色龙的安装包名 | -| chameleon.pkg.url | 变色龙的安装包下载链接 | -| debezium.path | debezium+kafka所在路径(默认kafka、confluent、connector都安装在该路径下) | -| kafka.path | kafka所在路径 | -| confluent.path | confluent所在路径 | -| connector.path | connector所在路径 | -| debezium.pkg.path | debezium+kafka安装包所在路径(默认kafka、confluent、connector安装包都在该路径下) | -| kafka.pkg.name | kafka安装包名 | -| kafka.pkg.url | kafka安装包下载链接 | -| confluent.pkg.name | confluent安装包名 | -| confluent.pkg.url | confluent安装包下载链接 | -| connector.mysql.pkg.name | mysql connector安装包名 | -| connector.mysql.pkg.url | mysql connector安装包下载链接 | -| connector.opengauss.pkg.name | opengauss connector安装包名 | -| connector.opengauss.pkg.url | opengauss connector安装包下载链接 | -| datacheck.install.path | datacheck安装路径 | -| datacheck.path | datacheck所在路径 | -| datacheck.pkg.path | datacheck安装包所在路径 | -| datacheck.pkg.name | datacheck安装包名 | -| datacheck.pkg.url | datacheck安装包下载链接 | - -工具的安装支持离线安装和在线安装: - -- 在线安装:将会从指定链接下载安装包到安装包指定位置,并从指定位置获取安装包解压并进行安装。 -- 离线安装:从指定位置获取安装包解压并进行安装。 - -如果输入命令时不指定安装方式,那么portal会根据/ops/portal/config目录的migrationConfig.properties文件中参数决定安装方式: - -| 参数名称 | 参数说明 | -| :---------------------------------------------------- | :---------------------------------------------------- | -| default.install.mysql.full.migration.tools.way | 全量迁移工具默认安装方式:offline为离线,online为在线 | -| default.install.mysql.incremental.migration.tools.way | 增量迁移工具默认安装方式:offline为离线,online为在线 | -| default.install.mysql.datacheck.tools.way | 数据校验工具默认安装方式:offline为离线,online为在线 | -| default.install.mysql.reverse.migration.tools.way | 反向迁移工具默认安装方式:offline为离线,online为在线 | - -使用以下指令可以安装对应的迁移工具,举例: - -```bash -sh gs_rep_portal.sh install_mysql_all_migration_tools 1 & -``` - -在命令行运行这条命令可以安装所有迁移功能用到的迁移工具。 - -### 安装指令 - -| 指令名称 | 指令说明 | -| :------------------------------------------------ | :------------------------------------------------ | -| install_mysql_full_migration_tools_online | 在线安装mysql全量迁移工具 | -| install_mysql_full_migration_tools_offline | 离线安装mysql全量迁移工具 | -| install_mysql_full_migration_tools | 安装mysql全量迁移工具(安装方式由配置文件指定) | -| install_mysql_incremental_migration_tools_online | 在线安装mysql增量迁移工具 | -| install_mysql_incremental_migration_tools_offline | 离线安装mysql增量迁移工具 | -| install_mysql_incremental_migration_tools | 安装mysql增量迁移工具(安装方式由配置文件指定) | -| install_mysql_reverse_migration_tools_online | 在线安装mysql反向迁移工具 | -| install_mysql_reverse_migration_tools_offline | 离线安装mysql反向迁移工具 | -| install_mysql_reverse_migration_tools | 安装mysql反向迁移工具(安装方式由配置文件指定) | -| install_mysql_datacheck_tools_online | 在线安装mysql数据校验工具 | -| install_mysql_datacheck_tools_offline | 离线安装mysql数据校验工具 | -| install_mysql_datacheck_tools | 安装mysql数据校验工具(安装方式由配置文件指定) | -| install_mysql_all_migration_tools | 安装mysql迁移工具(各工具安装方式由配置文件指定) | - -### 配置参数 - -用户可以在/ops/portal/config目录的migrationConfig.properties文件中修改迁移所用参数。 - -参数优先级:workspace下设置的参数 > 公共空间参数。如果使用的workspace.id和之前存在的workspace.id相同的话将沿用之前的workspace里面的参数,如果不同的话,那么portal将从config文件夹中复制一份配置文件到id对应的workspace下面作为这个任务的配置文件。 - -| 参数名称 | 参数说明 | -| :------------------------ | :------------------ | -| mysql.user.name | mysql数据库用户名 | -| mysql.user.password | mysql数据库用户密码 | -| mysql.database.host | mysql数据库ip | -| mysql.database.port | mysql数据库端口 | -| mysql.database.name | mysql数据库名 | -| opengauss.user.name | MogDB数据库用户名 | -| opengauss.user.password | MogDB数据库用户密码 | -| opengauss.database.host | MogDB数据库ip | -| opengauss.database.port | MogDB数据库端口 | -| opengauss.database.name | MogDB数据库名 | -| opengauss.database.schema | MogDB数据库模式名 | - -除了配置迁移所用基本参数外,用户还可在指定位置自行配置工具自身的所用参数。但是portal会默认修改工具自身的临时文件和日志位置,并分配部分工具自身所用的端口。用户可自行查看并修改工具的配置文件,默认工具配置文件位置如下表。 - -注意事项: - -- zookeeper默认端口2181、kafka默认端口9092、schema-registry默认端口8081不会自动分配,其余工具均会自动分配端口。用户如果需要修改工具的端口,请不要修改IP。如果需要修改kafka的端口,要注意将kafka的文件中的参数listeners的值修改为`PLAINTEXT://localhost:要配置的端口`。 -- 下表使用${config}代表/ops/portal/config目录,即公共空间配置的参数。如果想修改某个workspace的参数,比如workspace.id=2的计划的参数,请将/ops/portal/config替换为/ops/portal/workspace/2/config。 -- 下表使用${kafka.path}代表/ops/portal/config目录的toolspath.properties文件里面kafka.path的值。 -- 下表使用${confluent.path}代表/ops/portal/config目录的toolspath.properties文件里面confluent.path的值。 -- 每次创建新的任务时,/ops/portal/config/debezium目录的connect-avro-standalone.properties文件会被自动复制成四份并修改端口。 - -| 工具名称 | 配置文件位置 | -| ------------------- | ------------------------------------------------------------ | -| chameleon | ${config}/chameleon/config-example.yml | -| zookeeper | ${kafka.path}/config/zookeeper.properties | -| kafka | ${kafka.path}/config/server.properties | -| schema-registry | ${confluent.path}/etc/schema-registry/schema-registry.properties | -| connector-mysql | ${config}/debezium/connect-avro-standalone.properties | -| | ${config}/debezium/mysql-source.properties | -| | ${config}/debezium/mysql-sink.properties | -| connector-opengauss | ${config}/debezium/connect-avro-standalone.properties | -| | ${config}/debezium/opengauss-source.properties | -| | ${config}/debezium/opengauss-sink.properties | -| datacheck | ${config}/datacheck/application-source.yml | -| | ${config}/datacheck/application-sink.yml | -| | ${config}/datacheck/application.yml | - -## 执行迁移计划 - -portal支持启动多个任务执行不同的迁移计划,但是要求各迁移计划使用的MySQL实例和MogDB数据库互不相同。 - -启动迁移计划时需要添加参数,这样不同的迁移计划可以根据不同的workspace.id进行区分,如果不添加的话,workspace.id默认值为1。 - -启动workspace.id为2的全量迁移: - -```bash -sh gs_rep_portal.sh start_mysql_full_migration 2 & -``` - -portal除了支持单项功能的启动与停止,也会提供一些组合的默认计划: - -启动workspace.id为2的包括全量迁移和全量校验在内的迁移计划: - -```bash -sh gs_rep_portal.sh start_plan1 2 & -``` - -### 计划列表 - -| 计划名称 | 包括指令 | -| :------- | :------------------------------------------- | -| plan1 | 全量迁移→全量校验 | -| plan2 | 全量迁移→全量校验→增量迁移→增量校验 | -| plan3 | 全量迁移→全量校验→增量迁移→增量校验→反向迁移 | - -### 增量迁移和反向迁移 - -增量迁移功能是持续将MySQL端的数据修改同步到MogDB端的功能,而反向迁移功能是持续将MogDB端的数据修改同步到MySQL端的功能,所以二者均不会自动关闭。如果用户想要停止增量迁移功能,需要另开窗口输入指令停止增量迁移功能,反向迁移功能同理。 - -并且需要注意的是:增量迁移和反向迁移不能同时开启,如果一个计划中包含了增量迁移和反向迁移,那么需要用户手动停止增量迁移,启动反向迁移。用户在停止增量迁移之后到启动反向迁移之前,禁止向MogDB进行作业,否则会导致这之间的数据丢失。 - -以启动默认计划3为例: - -1. 在配置好配置文件后输入以下指令启动workspace.id为3的计划plan3: - - ```bash - sh gs_rep_portal.sh start_plan3 3 & - ``` - - 这时portal会自动执行全量迁移→全量校验→增量迁移→增量校验,然后一直处于增量迁移状态(此时增量迁移和增量校验同时运行)。 - -2. 如果用户想要停止增量迁移功能,需要另开窗口输入以下指令停止增量迁移功能: - - ```bash - sh gs_rep_portal.sh stop_incremental_migration 3 & - ``` - - 输入指令后,这个进程会退出,而正在执行计划的workspace.id为3的portal主进程会接收到停止增量迁移的消息,从而停止增量迁移,等待下一步指令。 - -3. 如果用户想要启动反向迁移功能,需要输入以下指令: - - ```bash - sh gs_rep_portal.sh run_reverse_migration 3 & - ``` - - 输入指令后,这个进程会退出,而正在执行计划的workspace.id为3的portal主进程会接收到启动反向迁移的消息,从而启动反向迁移,此时portal一直处于反向迁移状态。 - -如果想要停止整个迁移计划,请参考下方的“停止计划”小节。 - -以下为启动迁移计划的指令列表: - -### 启动指令列表 - -| 指令名称 | 指令说明 | -| :------------------------------------------ | :----------------------------------------------------------- | -| start_mysql_full_migration | 开始mysql全量迁移 | -| start_mysql_incremental_migration | 开始mysql增量迁移 | -| start_mysql_reverse_migration | 开始mysql反向迁移 | -| start_mysql_full_migration_datacheck | 开始mysql全量校验 | -| start_mysql_incremental_migration_datacheck | 开始mysql增量校验 | -| start_plan1 | 开始默认计划plan1 | -| start_plan2 | 开始默认计划plan2 | -| start_plan3 | 开始默认计划plan3 | -| start_current_plan | 开始自定义计划 | -| show_plans | 显示默认计划 | -| show_information | 显示数据库相关信息,包括mysql和openGuass端的数据库名、用户名、密码、ip、端口等 | -| stop_plan | 停止计划 | - -用户也可以在/ops/portal/config目录的currentPlan文件中自定义迁移计划,但自定义迁移计划需要遵守以下规则: - -- 在currentPlan中每行填入一条启动单个迁移任务的指令,如start_mysql_full_migration,start_mysql_incremental_migration等。指令的顺序遵循: - - - start_mysql_full_migration - - start_mysql_full_migration_datacheck - - start_mysql_incremental_migration - - start_mysql_incremental_migration_datacheck - - start_mysql_reverse_migration - - 如果顺序错误则portal报错。 - -- 增量校验的上一项一定是增量迁移,全量校验的上一项一定是全量迁移。 - -- 每个单项任务只能添加一次。 - -### 停止计划 - -举例: - -在portal正在执行计划的状态下,另开一个窗口输入以下指令可以停止workspace.id为3的任务: - -```bash -sh gs_rep_portal.sh stop_plan 3 & -``` - -输入指令后,这个进程会退出,而正在执行计划的workspace.id为3的portal主进程会接收到停止计划的消息,从而停止计划。 - -### 启动多个计划 - -portal支持同时启动多个计划,但是这些计划的mysql端应该为各不相同的实例,MogDB端应该为各不相同的数据库: - -首先修改配置文件,详情见配置参数环节。 - -使用workspace.id为p1启动第一个迁移计划(这里以启动计划3为例): - -```bash -sh gs_rep_portal.sh start_plan3 p1 & -``` - -然后再次修改配置文件。 - -使用workspace.id为p2启动第一个迁移计划(这里以启动计划3为例): - -```bash -sh gs_rep_portal.sh start_plan3 p2 & -``` - -这样就启动了多个portal。 - -## 卸载迁移工具 - -使用以下指令可以卸载不同功能对应的迁移工具,举例: - -```bash -sh gs_rep_portal.sh uninstall_mysql_all_migration_tools 1 & -``` - -在命令行运行这条命令可以卸载所有功能用到的迁移工具。 - -| 指令名称 | 指令说明 | -| :------------------------------------------ | :-------------------- | -| uninstall_mysql_full_migration_tools | 卸载mysql全量迁移工具 | -| uninstall_mysql_incremental_migration_tools | 卸载mysql增量迁移工具 | -| uninstall_mysql_datacheck_tools | 卸载mysql数据校验工具 | -| uninstall_mysql_reverse_migration_tools | 卸载mysql反向迁移工具 | -| uninstall_mysql_all_migration_tools | 卸载mysql迁移工具 | - -## 完整数据迁移流程 - -1. 下载gs_rep_portal安装包 - - ```bash - wget -c https://opengauss.obs.cn-south-1.myhuaweicloud.com/tools/portal/PortalControl-5.0.0.tar.gz - ``` - -2. 解压gs_rep_portal安装包 - - ```bash - tar -zxvf PortalControl-5.0.0.tar.gz - ``` - -3. 在/ops/portal/config目录的toolspath.properties文件中修改安装路径,然后启动命令安装 - - ```bash - sh gs_rep_portal.sh install_mysql_all_migration_tools 1 & - ``` - -4. 在/ops/portal/config目录的migrationConfig.properties文件中修改迁移参数,指定新的workspace.id为2启动迁移计划3 - - ```bash - sh gs_rep_portal.sh start_plan3 2 & - ``` - -5. 程序将自动运行至增量迁移和增量校验同时开启中,让workspace.id为2的任务停止增量迁移,此时程序进入等待状态,之后可以启动反向迁移或停止计划 - - ```bash - sh gs_rep_portal.sh stop_incremental_migration 2 & - ``` - -6. 启动反向迁移,此时程序进入反向迁移状态,之后可以停止计划 - - ```bash - sh gs_rep_portal.sh run_reverse_migration 2 & - ``` - -7. 停止workspace.id为2的计划 - - ```bash - sh gs_rep_portal.sh stop_plan 2 & - ``` \ No newline at end of file diff --git a/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/reverse-migration.md b/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/reverse-migration.md deleted file mode 100644 index ee1aa222..00000000 --- a/product/zh/docs-mogdb/v5.0/developer-guide/mysql-compatibility-description/migrating-data/reverse-migration.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: 反向迁移 -summary: 反向迁移 -author: Guo Huan -date: 2023-05-29 ---- - -# 反向迁移 - -## 功能介绍 - -反向迁移是指将MogDB端产生的增量数据迁移至mysql端。 - -## 原理简介 - -debezium opengauss connector的source端,监控MogDB的xlog日志,并将数据的DML操作以AVRO格式写入到kafka; - -debezium opengauss connector的sink端,从kafka读取AVRO格式的数据,在mysql端按表并行回放,从而完成数据的DML操作从MogDB在线迁移至mysql。 - -## 特性优势 - -反向迁移可满足用户业务迁移逃生的诉求,保持源端、目标端两个库并行运行,在目标端数据库出问题后应用能及时切回源端数据库。 - -## 操作步骤 - -反向迁移gs_replicate依赖MySQL一键式迁移工具gs_rep_portal,可实现反向迁移的安装、启动、停止、卸载整个过程。 - -1. 下载gs_rep_portal - - ```bash - wget https://opengauss.obs.cn-south-1.myhuaweicloud.com/tools/portal/PortalControl-5.0.0.tar.gz - ``` - -2. 解压,并进入portal对应目录 - - ```bash - tar -zxvf PortalControl-5.0.0.tar.gz - cd portal - ``` - -3. 修改gs_rep_portal配置文件 - - 配置文件位于config目录内,反向迁移相关的配置文件主要包含如下两个,相关参数含义简要说明如下: - - - toolspath.properties - - ```bash - # 反向迁移依赖包路径,可自定义修改 - debezium.path=/ops/portal/tools/debezium/ - # kafka路径,位于反向迁移依赖包路径下,可自定义修改 - kafka.path=/ops/portal/tools/debezium/kafka_2.13-3.2.3/ - # confluent路径,位于反向迁移依赖包路径下,可自定义修改 - confluent.path=/ops/portal/tools/debezium/confluent-5.5.1/ - # connector路径,位于反向迁移依赖包路径下,可自定义修改 - connector.path=/ops/portal/tools/debezium/plugin/ - # debezium opengauss connector路径,位于connector路径下,可自定义修改 - connector.opengauss.path=/ops/portal/tools/debezium/plugin/debezium-connector-opengauss/ - # kafka下载路径 - kafka.pkg.url=https://downloads.apache.org/kafka/3.2.3/kafka_2.13-3.2.3.tgz - # confluent下载路径 - confluent.pkg.url=https://packages.confluent.io/archive/5.5/confluent-community-5.5.1-2.12.zip - # debezium opengauss connector下载路径 - connector.opengauss.pkg.url=https://opengauss.obs.cn-south-1.myhuaweicloud.com/5.0.0/tools/replicate-openGauss2mysql-5.0.0.tar.gz - # 反向迁移依赖安装包路径,可自定义修改 - debezium.pkg.path=/ops/portal/pkg/debezium/ - # kafka包名 - kafka.pkg.name=kafka_2.13-3.2.3.tgz - # confluent包名 - confluent.pkg.name=confluent-community-5.5.1-2.12.zip - # openGauss2mysql在线复制包名 - connector.opengauss.pkg.name=replicate-openGauss2mysql-5.0.0.tar.gz - ``` - - - migrationConfig.properties - - ```bash - # mysql用户名 - mysql.user.name=root - # mysql密码 - mysql.user.password=*** - # mysql数据库ip - mysql.database.host=127.0.0.1 - # mysql数据库端口 - mysql.database.port=3306 - # mysql数据库名称 - mysql.database.name=test123 - # MogDB用户名 - opengauss.user.name=test - # MogDB密码 - opengauss.user.password=*** - # MogDB数据库ip - opengauss.database.host=127.0.0.1 - # MogDB数据库端口 - opengauss.database.port=5432 - # MogDB数据库名称 - opengauss.database.name=test1234 - # MogDB数据库的schema名称 - opengauss.database.schema=test123 - # 反向迁移的安装方式,默认为offline,表示离线安装,需通过参数debezium.pkg.path指定离线依赖安装包的路径;若设置为online,对应在线安装,在线下载的安装包将存放在参数debezium.pkg.path指定的路径 - default.install.mysql.reverse.migration.tools.way=offline - ``` - -4. 安装 - - ```bash - sh gs_replicate.sh install opengauss-mysql workspace.id - ``` - - 其中workspace.id表示迁移任务id,取值为数字和小写字母的组合,不同的id区分不同的迁移任务,不同迁移任务可并行启动。若使用已存在的workspace.id,并修改其中的配置,请在portal/workspace/${workspace.id}/config/路径下修改对应的配置文件。 - -5. 启动 - - ```bash - sh gs_replicate.sh start opengauss-mysql workspace.id - ``` - -6. 停止 - - ```bash - sh gs_replicate.sh stop opengauss-mysql workspace.id - ``` - -7. 卸载 - - ```bash - sh gs_replicate.sh uninstall opengauss-mysql workspace.id - ``` - -## 注意事项 - -- 当前支持MogDB IUD操作(insert、update、delete)产生的增量数据迁移至MySQL。 - -- MogDB需要3.1.0及以上版本。 - -- 反向迁移依赖于MogDB的逻辑复制,仅限能进行逻辑复制的用户进行操作。 - -- MogDB 的GUC参数设置要求为:wal_level = logical。 - -- 需要调整pg_hba.conf以允许复制(这里的值取决于实际的网络配置以及用于连接的用户): - - ```bash - host replication repuser 0.0.0.0/0 sha256 - ``` - -- Kafka中以AVRO格式存储数据,AVRO字段名称[命名规则](https://gitee.com/link?target=https%3A%2F%2Favro.apache.org%2Fdocs%2F1.11.1%2Fspecification%2F%23names)为: - - 以[A-Za-z_]开头,随后仅包含[A-Za-z0-9_] - - 因此,对于MogDB中的标识符命名,包括表名、列名等,需满足上述命名规范,否则迁移会报错。 \ No newline at end of file diff --git a/product/zh/docs-mogdb/v5.0/developer-guide/plpgsql/1-4-arrays-and-records.md b/product/zh/docs-mogdb/v5.0/developer-guide/plpgsql/1-4-arrays-and-records.md index 16a4f0e5..f99f36aa 100644 --- a/product/zh/docs-mogdb/v5.0/developer-guide/plpgsql/1-4-arrays-and-records.md +++ b/product/zh/docs-mogdb/v5.0/developer-guide/plpgsql/1-4-arrays-and-records.md @@ -761,93 +761,6 @@ MogDB=# DROP PROCEDURE nest_table_proc; ANONYMOUS BLOCK EXECUTE ``` -#### 集合相关函数 - -- unnest_table(anynesttable) - - 描述:返回nesttable中的元素集合。 - - 返回类型:setof anyelement - - 约束:不支持tableof类型嵌套tableof类型或者tableof嵌套其他类型再嵌套tableof类型的情况。 - - 示例: - - ```sql - create or replace procedure f1() - as - type t1 is table of int; - v2 t1 := t1(null, 2, 3, 4, null); - tmp int; - cursor c1 is select * from unnest_table(v2); - begin - open c1; - for i in 1 .. v2.count loop - fetch c1 into tmp; - if tmp is null then - RAISE NOTICE '%', i || ': is null'; - else - RAISE NOTICE '%', i || ': ' || tmp; - end if; - end loop; - close c1; - end; - / - - MogDB=# call f1(); - 1: is null - 2: 2 - 3: 3 - 4: 4 - 5: is null - f1 - ---- - - (1 row) - ``` - -- unnest_table(anyindexbytable) - - 描述:返回table of index by类型根据index排序后的元素集合。 - - 返回类型:setof anyelement - - 约束:不支持tableof类型嵌套tableof类型或者tableof嵌套其他类型再嵌套tableof类型的情况。只支持index by int类型,不支持index by varchar类型。 - - 示例: - - ```sql - create or replace procedure f1() - as - type t1 is table of int index by int; - v2 t1 := t1(1=>1, -10=>(-10), 6=>6, 4=>null); - tmp int; - cursor c1 is select * from unnest_table(v2); - begin - open c1; - for i in 1 .. v2.count loop - fetch c1 into tmp; - if tmp is null then - RAISE NOTICE '%', i || ': is null'; - else - RAISE NOTICE '%', i || ': ' || tmp; - end if; - end loop; - close c1; - end; - / - - MogDB=# call f1(); - 1: -10 - 2: 1 - 3: is null - 4: 6 - f1 - ---- - - (1 row) - ``` - ## record ### record类型的变量 diff --git a/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/developer-options.md b/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/developer-options.md index 966b81e3..7243eb1c 100644 --- a/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/developer-options.md +++ b/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/developer-options.md @@ -374,14 +374,12 @@ ustore_attr='ustore_verify_level=FAST;ustore_verify_module=UPAGE:UBTREE:UNDO:RED - ustore_verify_module:控制校验的模块。 - **取值范围**:字符串,设置值UPAGE,UBTREE,UNDO, REDO中的一个或者多个,或者单独设置ALL或者NULL(不区分大小写)。当设置 - - UPAGE,UBTREE,UNDO,REDO中的多个值时,使用”:“作为连接符。例如ustore_verify_module=UPAGE:UBTREE:UNDO:REDO。 + **取值范围**:字符串,设置值UPAGE,UBTREE,UNDO, REDO中的一个或者多个,或者单独设置ALL或者NULL(不区分大小写)。当设置UPAGE,UBTREE,UNDO,REDO中的多个值时,使用”:“作为连接符。例如ustore_verify_module=UPAGE:UBTREE:UNDO:REDO。 **表 2** ustore_verify_module取值含义说明 | 参数取值 | 含义 | - | :------- | :---------------------------------------------------- | +| :------- | :---------------------------------------------------- | | UPAGE | 表示开启数据页面校验。 | | UBTREE | 表示开启UBTREE索引校验。 | | UNDO | 表示开启回滚段数据校验。 | @@ -389,7 +387,7 @@ ustore_attr='ustore_verify_level=FAST;ustore_verify_module=UPAGE:UBTREE:UNDO:RED | ROACH | 表示开启ROACH备份的数据页面校验。 | | ALL | 表示同时开启UPAGE,UBTREE,UNDO,REDO模块数据的校验。 | | NULL | 表示同时关闭UPAGE,UBTREE,UNDO,REDO模块数据的校验。 | - + **默认值**: UPAGE:UBTREE:UNDO - index_trace_level:控制开启索引追踪并控制打印级别,开启后在索引扫描的过程中,会根据不同的打印级别对符合条件的索引元组的信息进行打印。 @@ -456,7 +454,7 @@ ustore_attr='ustore_verify_level=FAST;ustore_verify_module=UPAGE:UBTREE:UNDO:RED **取值范围**:1~INT_MAX/1000 - **默认值**:10,单次(次) + **默认值**:10,单位(次) - ustore_unit_test:研发白盒测试指定测试参数 diff --git a/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/primary-server.md b/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/primary-server.md index 8e6cb244..4919f947 100644 --- a/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/primary-server.md +++ b/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/ha-replication/primary-server.md @@ -294,9 +294,9 @@ date: 2021-04-20 ## hadr_super_user_record_path -**参数说明**:该参数为流式异地容灾参数,表示备数据库实例中hadr_disaster用户的加密文件存放路径。该参数属于SIGHUP类型参数,请 +**参数说明**:该参数为流式异地容灾参数,表示备数据库实例中hadr_disaster用户的加密文件存放路径。 -参考[GUC参数分类](../../../reference-guide/guc-parameters/appendix.md)中方式对应设置方法进行设置。 +该参数属于SIGHUP类型参数,请参考[GUC参数分类](../../../reference-guide/guc-parameters/appendix.md)中方式对应设置方法进行设置。 **修改建议**:由流式容灾密码传递工具自动设置,不需要用户手动添加。 diff --git a/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md b/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md index 149136b7..f5254271 100644 --- a/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md +++ b/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md @@ -13,7 +13,7 @@ date: 2021-04-20 **参数说明**:表示是否开启自适应两阶段哈希聚合功能。 -该参数属于session级别参数,请参考[GUC参数设置方式](../../../reference-guide/guc-parameters/appendix.md)中对应设置方法进行设置。 +该参数属于USERSET级别参数,请参考[GUC参数设置方式](../../../reference-guide/guc-parameters/appendix.md)中对应设置方法进行设置。 **取值范围**:布尔型 diff --git a/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/archiving.md b/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/archiving.md index c87737d4..f41b5dbb 100644 --- a/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/archiving.md +++ b/product/zh/docs-mogdb/v5.0/reference-guide/guc-parameters/write-ahead-log/archiving.md @@ -121,9 +121,7 @@ archive_timeout适用于未使用主备流式复制或者需要对WAL日志进 双数据库实例异地灾备模式下,主数据库实例日志将被归档到OBS。 -0是指不开启日志流控, - -1~3600是指设置主数据库实例发生异常发生时到已归档到OBS的恢复点所允许的time_to_target_rpo秒,保证主数据库实例因灾难崩溃时,最多可能丢失的数据的时长在允许范围内。 +0是指不开启日志流控,1~3600是指设置主数据库实例发生异常发生时到已归档到OBS的恢复点所允许的time_to_target_rpo秒,保证主数据库实例因灾难崩溃时,最多可能丢失的数据的时长在允许范围内。 time_to_target_rpo设置时间过小会影响主机的性能,设置过大会失去流控效果。 diff --git a/product/zh/docs-mogdb/v5.0/toc_performance.md b/product/zh/docs-mogdb/v5.0/toc_performance.md index 24c0d55b..e1b4c578 100644 --- a/product/zh/docs-mogdb/v5.0/toc_performance.md +++ b/product/zh/docs-mogdb/v5.0/toc_performance.md @@ -18,16 +18,16 @@ + [设置控制组](/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/setting-control-group.md) + [创建资源池](/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/creating-resource-pool.md) + [SQL优化指南](/performance-tuning/sql-tuning/sql-tuning.md) - + [Query执行流程](/performance-tuning/sql-tuning/query-execution-process.md) - + [SQL执行计划介绍](/performance-tuning/sql-tuning/introduction-to-the-sql-execution-plan.md) - + [调优流程](/performance-tuning/sql-tuning/tuning-process.md) - + [更新统计信息](/performance-tuning/sql-tuning/updating-statistics.md) - + [审视和修改表定义](/performance-tuning/sql-tuning/reviewing-and-modifying-a-table-definition.md) - + [典型SQL调优点](/performance-tuning/sql-tuning/typical-sql-optimization-methods.md) - + [SQL语句改写规则](/performance-tuning/sql-tuning/experience-in-rewriting-sql-statements.md) - + [SQL调优关键参数调整](/performance-tuning/sql-tuning/resetting-key-parameters-during-sql-tuning.md) - + [使用Plan Hint进行调优](/performance-tuning/sql-tuning/hint-based-tuning.md) + + [Query执行流程](/performance-tuning/sql-tuning/query-execution-process.md) + + [SQL执行计划介绍](/performance-tuning/sql-tuning/introduction-to-the-sql-execution-plan.md) + + [调优流程](/performance-tuning/sql-tuning/tuning-process.md) + + [更新统计信息](/performance-tuning/sql-tuning/updating-statistics.md) + + [审视和修改表定义](/performance-tuning/sql-tuning/reviewing-and-modifying-a-table-definition.md) + + [典型SQL调优点](/performance-tuning/sql-tuning/typical-sql-optimization-methods.md) + + [SQL语句改写规则](/performance-tuning/sql-tuning/experience-in-rewriting-sql-statements.md) + + [SQL调优关键参数调整](/performance-tuning/sql-tuning/resetting-key-parameters-during-sql-tuning.md) + + [使用Plan Hint进行调优](/performance-tuning/sql-tuning/hint-based-tuning.md) + [WDR解读指南](/performance-tuning/wdr/wdr.md) - + [WDR Snapshot Schema](/performance-tuning/wdr/wdr-snapshot-schema.md) - + [查看WDR报告](/performance-tuning/wdr/wdr-report.md) + + [WDR Snapshot Schema](/performance-tuning/wdr/wdr-snapshot-schema.md) + + [查看WDR报告](/performance-tuning/wdr/wdr-report.md) + [TPCC性能优化指南](/performance-tuning/TPCC-performance-tuning-guide.md) diff --git a/product/zh/docs-mogdb/v6.0/developer-guide/plpgsql/1-4-arrays-and-records.md b/product/zh/docs-mogdb/v6.0/developer-guide/plpgsql/1-4-arrays-and-records.md index 3780cac5..17f73b08 100644 --- a/product/zh/docs-mogdb/v6.0/developer-guide/plpgsql/1-4-arrays-and-records.md +++ b/product/zh/docs-mogdb/v6.0/developer-guide/plpgsql/1-4-arrays-and-records.md @@ -774,93 +774,6 @@ MogDB=# DROP PROCEDURE nest_table_proc; ANONYMOUS BLOCK EXECUTE ``` -#### 集合相关函数 - -- unnest_table(anynesttable) - - 描述:返回nesttable中的元素集合。 - - 返回类型:setof anyelement - - 约束:不支持tableof类型嵌套tableof类型或者tableof嵌套其他类型再嵌套tableof类型的情况。 - - 示例: - - ```sql - create or replace procedure f1() - as - type t1 is table of int; - v2 t1 := t1(null, 2, 3, 4, null); - tmp int; - cursor c1 is select * from unnest_table(v2); - begin - open c1; - for i in 1 .. v2.count loop - fetch c1 into tmp; - if tmp is null then - gms_output.put(i || ': is null'); - else - gms_output.put(i || ': ' || tmp); - end if; - end loop; - close c1; - end; - / - - MogDB=# call f1(); - 1: is null - 2: 2 - 3: 3 - 4: 4 - 5: is null - f1 - ---- - - (1 row) - ``` - -- unnest_table(anyindexbytable) - - 描述:返回table of index by类型根据index排序后的元素集合。 - - 返回类型:setof anyelement - - 约束:不支持tableof类型嵌套tableof类型或者tableof嵌套其他类型再嵌套tableof类型的情况。只支持index by int类型,不支持index by varchar类型。 - - 示例: - - ```sql - create or replace procedure f1() - as - type t1 is table of int index by int; - v2 t1 := t1(1=>1, -10=>(-10), 6=>6, 4=>null); - tmp int; - cursor c1 is select * from unnest_table(v2); - begin - open c1; - for i in 1 .. v2.count loop - fetch c1 into tmp; - if tmp is null then - gms_output.put(i || ': is null'); - else - gms_output.put(i || ': ' || tmp); - end if; - end loop; - close c1; - end; - / - - MogDB=# call f1(); - 1: -10 - 2: 1 - 3: is null - 4: 6 - f1 - ---- - - (1 row) - ``` - ## record ### record类型的变量 diff --git a/product/zh/docs-mogdb/v6.0/reference-guide/guc-parameters/developer-options.md b/product/zh/docs-mogdb/v6.0/reference-guide/guc-parameters/developer-options.md index 6551c0de..d8276518 100644 --- a/product/zh/docs-mogdb/v6.0/reference-guide/guc-parameters/developer-options.md +++ b/product/zh/docs-mogdb/v6.0/reference-guide/guc-parameters/developer-options.md @@ -358,14 +358,12 @@ ustore_attr='ustore_verify_level=FAST;ustore_verify_module=UPAGE:UBTREE:UNDO:RED - ustore_verify_module:控制校验的模块。 - **取值范围**:字符串,设置值UPAGE,UBTREE,UNDO, REDO中的一个或者多个,或者单独设置ALL或者NULL(不区分大小写)。当设置 - - UPAGE,UBTREE,UNDO,REDO中的多个值时,使用”:“作为连接符。例如ustore_verify_module=UPAGE:UBTREE:UNDO:REDO。 + **取值范围**:字符串,设置值UPAGE,UBTREE,UNDO, REDO中的一个或者多个,或者单独设置ALL或者NULL(不区分大小写)。当设置UPAGE,UBTREE,UNDO,REDO中的多个值时,使用”:“作为连接符。例如ustore_verify_module=UPAGE:UBTREE:UNDO:REDO。 **表 2** ustore_verify_module取值含义说明 | 参数取值 | 含义 | - | :------- | :---------------------------------------------------- | +| :------- | :---------------------------------------------------- | | UPAGE | 表示开启数据页面校验。 | | UBTREE | 表示开启UBTREE索引校验。 | | UNDO | 表示开启回滚段数据校验。 | @@ -373,7 +371,7 @@ ustore_attr='ustore_verify_level=FAST;ustore_verify_module=UPAGE:UBTREE:UNDO:RED | ROACH | 表示开启ROACH备份的数据页面校验。 | | ALL | 表示同时开启UPAGE,UBTREE,UNDO,REDO模块数据的校验。 | | NULL | 表示同时关闭UPAGE,UBTREE,UNDO,REDO模块数据的校验。 | - + **默认值**: UPAGE:UBTREE:UNDO - index_trace_level:控制开启索引追踪并控制打印级别,开启后在索引扫描的过程中,会根据不同的打印级别对符合条件的索引元组的信息进行打印。 @@ -440,7 +438,7 @@ ustore_attr='ustore_verify_level=FAST;ustore_verify_module=UPAGE:UBTREE:UNDO:RED **取值范围**:1~INT_MAX/1000 - **默认值**:10,单次(次) + **默认值**:10,单位(次) - ustore_unit_test:研发白盒测试指定测试参数 diff --git a/product/zh/docs-mogdb/v6.0/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md b/product/zh/docs-mogdb/v6.0/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md index 268474cc..88cea535 100644 --- a/product/zh/docs-mogdb/v6.0/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md +++ b/product/zh/docs-mogdb/v6.0/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md @@ -13,7 +13,7 @@ date: 2021-04-20 **参数说明**:表示是否开启自适应两阶段哈希聚合功能。 -该参数属于session级别参数,请参考[GUC参数设置方式](../../../reference-guide/guc-parameters/appendix.md)中对应设置方法进行设置。 +该参数属于USERSET级别参数,请参考[GUC参数设置方式](../../../reference-guide/guc-parameters/appendix.md)中对应设置方法进行设置。 **取值范围**:布尔型 diff --git a/product/zh/docs-mogdb/v6.0/toc_performance.md b/product/zh/docs-mogdb/v6.0/toc_performance.md index c4f45659..d996ae70 100644 --- a/product/zh/docs-mogdb/v6.0/toc_performance.md +++ b/product/zh/docs-mogdb/v6.0/toc_performance.md @@ -21,16 +21,16 @@ + [设置控制组](/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/setting-control-group.md) + [创建资源池](/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/creating-resource-pool.md) + [SQL优化指南](/performance-tuning/sql-tuning/sql-tuning.md) - + [Query执行流程](/performance-tuning/sql-tuning/query-execution-process.md) - + [SQL执行计划介绍](/performance-tuning/sql-tuning/introduction-to-the-sql-execution-plan.md) - + [调优流程](/performance-tuning/sql-tuning/tuning-process.md) - + [更新统计信息](/performance-tuning/sql-tuning/updating-statistics.md) - + [审视和修改表定义](/performance-tuning/sql-tuning/reviewing-and-modifying-a-table-definition.md) - + [典型SQL调优点](/performance-tuning/sql-tuning/typical-sql-optimization-methods.md) - + [SQL语句改写规则](/performance-tuning/sql-tuning/experience-in-rewriting-sql-statements.md) - + [SQL调优关键参数调整](/performance-tuning/sql-tuning/resetting-key-parameters-during-sql-tuning.md) - + [使用Plan Hint进行调优](/performance-tuning/sql-tuning/hint-based-tuning.md) + + [Query执行流程](/performance-tuning/sql-tuning/query-execution-process.md) + + [SQL执行计划介绍](/performance-tuning/sql-tuning/introduction-to-the-sql-execution-plan.md) + + [调优流程](/performance-tuning/sql-tuning/tuning-process.md) + + [更新统计信息](/performance-tuning/sql-tuning/updating-statistics.md) + + [审视和修改表定义](/performance-tuning/sql-tuning/reviewing-and-modifying-a-table-definition.md) + + [典型SQL调优点](/performance-tuning/sql-tuning/typical-sql-optimization-methods.md) + + [SQL语句改写规则](/performance-tuning/sql-tuning/experience-in-rewriting-sql-statements.md) + + [SQL调优关键参数调整](/performance-tuning/sql-tuning/resetting-key-parameters-during-sql-tuning.md) + + [使用Plan Hint进行调优](/performance-tuning/sql-tuning/hint-based-tuning.md) + [WDR解读指南](/performance-tuning/wdr/wdr.md) - + [WDR Snapshot Schema](/performance-tuning/wdr/wdr-snapshot-schema.md) - + [查看WDR报告](/performance-tuning/wdr/wdr-report.md) + + [WDR Snapshot Schema](/performance-tuning/wdr/wdr-snapshot-schema.md) + + [查看WDR报告](/performance-tuning/wdr/wdr-report.md) + [TPCC性能优化指南](/performance-tuning/TPCC-performance-tuning-guide.md) -- Gitee From 6ba4fe3e8ad6c3aec49360a2dbd5082c0ce795ad Mon Sep 17 00:00:00 2001 From: spaceoddity91719 Date: Wed, 16 Oct 2024 14:23:06 +0800 Subject: [PATCH 3/4] fix(mogdb):issue #IATRZS --- .../schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md | 2 +- .../schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md | 2 +- .../schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md | 2 +- .../schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md | 2 +- .../schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md | 2 +- .../schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md | 2 +- .../schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md | 2 +- .../schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md | 2 +- .../schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md | 2 +- .../schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md | 2 +- .../schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md | 2 +- .../schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/product/en/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md b/product/en/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md index 867723ac..524158b4 100644 --- a/product/en/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md +++ b/product/en/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md @@ -7,4 +7,4 @@ date: 2021-04-19 # STATEMENT_COMPLEX_HISTORY -**STATEMENT_COMPLEX_HISTORY** displays load management information about a completed job executed on the primary database node. Columns in this view are the same as those in Table 1 of [GS_SESSION_MEMORY_DETAIL](../../../../reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md). \ No newline at end of file +**STATEMENT_COMPLEX_HISTORY** displays load management information about a completed job executed on the primary database node. Columns in this view are the same as those in Table 1 of [GLOBAL_STATEMENT_COMPLEX_RUNTIME](./GLOBAL_STATEMENT_COMPLEX_RUNTIME.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md b/product/en/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md index 4f9d6648..93d1f4ee 100644 --- a/product/en/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md +++ b/product/en/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md @@ -7,4 +7,4 @@ date: 2021-04-19 # STATEMENT_COMPLEX_HISTORY_TABLE -**STATEMENT_COMPLEX_HISTORY_TABLE** displays load management information about completed jobs executed on the current primary database node. Data is dumped from the kernel to this system catalog. Columns in this view are the same as those in Table 1 of [GS_SESSION_MEMORY_DETAIL](../../../../reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md). \ No newline at end of file +**STATEMENT_COMPLEX_HISTORY_TABLE** displays load management information about completed jobs executed on the current primary database node. Data is dumped from the kernel to this system catalog. Columns in this view are the same as those in Table 1 of [GLOBAL_STATEMENT_COMPLEX_RUNTIME](./GLOBAL_STATEMENT_COMPLEX_RUNTIME.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md b/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md index 867723ac..524158b4 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md @@ -7,4 +7,4 @@ date: 2021-04-19 # STATEMENT_COMPLEX_HISTORY -**STATEMENT_COMPLEX_HISTORY** displays load management information about a completed job executed on the primary database node. Columns in this view are the same as those in Table 1 of [GS_SESSION_MEMORY_DETAIL](../../../../reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md). \ No newline at end of file +**STATEMENT_COMPLEX_HISTORY** displays load management information about a completed job executed on the primary database node. Columns in this view are the same as those in Table 1 of [GLOBAL_STATEMENT_COMPLEX_RUNTIME](./GLOBAL_STATEMENT_COMPLEX_RUNTIME.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md b/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md index 4f9d6648..93d1f4ee 100644 --- a/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md +++ b/product/en/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md @@ -7,4 +7,4 @@ date: 2021-04-19 # STATEMENT_COMPLEX_HISTORY_TABLE -**STATEMENT_COMPLEX_HISTORY_TABLE** displays load management information about completed jobs executed on the current primary database node. Data is dumped from the kernel to this system catalog. Columns in this view are the same as those in Table 1 of [GS_SESSION_MEMORY_DETAIL](../../../../reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md). \ No newline at end of file +**STATEMENT_COMPLEX_HISTORY_TABLE** displays load management information about completed jobs executed on the current primary database node. Data is dumped from the kernel to this system catalog. Columns in this view are the same as those in Table 1 of [GLOBAL_STATEMENT_COMPLEX_RUNTIME](./GLOBAL_STATEMENT_COMPLEX_RUNTIME.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md b/product/en/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md index 867723ac..524158b4 100644 --- a/product/en/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md +++ b/product/en/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md @@ -7,4 +7,4 @@ date: 2021-04-19 # STATEMENT_COMPLEX_HISTORY -**STATEMENT_COMPLEX_HISTORY** displays load management information about a completed job executed on the primary database node. Columns in this view are the same as those in Table 1 of [GS_SESSION_MEMORY_DETAIL](../../../../reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md). \ No newline at end of file +**STATEMENT_COMPLEX_HISTORY** displays load management information about a completed job executed on the primary database node. Columns in this view are the same as those in Table 1 of [GLOBAL_STATEMENT_COMPLEX_RUNTIME](./GLOBAL_STATEMENT_COMPLEX_RUNTIME.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md b/product/en/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md index 4f9d6648..93d1f4ee 100644 --- a/product/en/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md +++ b/product/en/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md @@ -7,4 +7,4 @@ date: 2021-04-19 # STATEMENT_COMPLEX_HISTORY_TABLE -**STATEMENT_COMPLEX_HISTORY_TABLE** displays load management information about completed jobs executed on the current primary database node. Data is dumped from the kernel to this system catalog. Columns in this view are the same as those in Table 1 of [GS_SESSION_MEMORY_DETAIL](../../../../reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md). \ No newline at end of file +**STATEMENT_COMPLEX_HISTORY_TABLE** displays load management information about completed jobs executed on the current primary database node. Data is dumped from the kernel to this system catalog. Columns in this view are the same as those in Table 1 of [GLOBAL_STATEMENT_COMPLEX_RUNTIME](./GLOBAL_STATEMENT_COMPLEX_RUNTIME.md). \ No newline at end of file diff --git a/product/zh/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md b/product/zh/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md index a7201d8e..23e21983 100644 --- a/product/zh/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md +++ b/product/zh/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md @@ -7,4 +7,4 @@ date: 2021-04-19 # STATEMENT_COMPLEX_HISTORY -STATEMENT_COMPLEX_HISTORY视图显示在数据库主节点上执行作业结束后的负载管理记录。具体的字段请参考[GS_SESSION_MEMORY_DETAIL](../../../../reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md)表1。 +STATEMENT_COMPLEX_HISTORY视图显示在数据库主节点上执行作业结束后的负载管理记录。具体的字段请参考[GLOBAL_STATEMENT_COMPLEX_RUNTIME](./GLOBAL_STATEMENT_COMPLEX_RUNTIME.md)表1。 diff --git a/product/zh/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md b/product/zh/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md index 7635fc29..d27abac4 100644 --- a/product/zh/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md +++ b/product/zh/docs-mogdb/v3.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md @@ -7,4 +7,4 @@ date: 2021-04-19 # STATEMENT_COMPLEX_HISTORY_TABLE -STATEMENT_COMPLEX_HISTORY_TABLE系统表显示数据库主节点执行作业结束后的负载管理记录。此数据是从内核中转储到系统表中的数据。具体的字段请参考[GS_SESSION_MEMORY_DETAIL](../../../../reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md)表1。 +STATEMENT_COMPLEX_HISTORY_TABLE系统表显示数据库主节点执行作业结束后的负载管理记录。此数据是从内核中转储到系统表中的数据。具体的字段请参考[GLOBAL_STATEMENT_COMPLEX_RUNTIME](./GLOBAL_STATEMENT_COMPLEX_RUNTIME.md)表1。 diff --git a/product/zh/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md b/product/zh/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md index a7201d8e..23e21983 100644 --- a/product/zh/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md +++ b/product/zh/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md @@ -7,4 +7,4 @@ date: 2021-04-19 # STATEMENT_COMPLEX_HISTORY -STATEMENT_COMPLEX_HISTORY视图显示在数据库主节点上执行作业结束后的负载管理记录。具体的字段请参考[GS_SESSION_MEMORY_DETAIL](../../../../reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md)表1。 +STATEMENT_COMPLEX_HISTORY视图显示在数据库主节点上执行作业结束后的负载管理记录。具体的字段请参考[GLOBAL_STATEMENT_COMPLEX_RUNTIME](./GLOBAL_STATEMENT_COMPLEX_RUNTIME.md)表1。 diff --git a/product/zh/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md b/product/zh/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md index 7635fc29..d27abac4 100644 --- a/product/zh/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md +++ b/product/zh/docs-mogdb/v5.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md @@ -7,4 +7,4 @@ date: 2021-04-19 # STATEMENT_COMPLEX_HISTORY_TABLE -STATEMENT_COMPLEX_HISTORY_TABLE系统表显示数据库主节点执行作业结束后的负载管理记录。此数据是从内核中转储到系统表中的数据。具体的字段请参考[GS_SESSION_MEMORY_DETAIL](../../../../reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md)表1。 +STATEMENT_COMPLEX_HISTORY_TABLE系统表显示数据库主节点执行作业结束后的负载管理记录。此数据是从内核中转储到系统表中的数据。具体的字段请参考[GLOBAL_STATEMENT_COMPLEX_RUNTIME](./GLOBAL_STATEMENT_COMPLEX_RUNTIME.md)表1。 diff --git a/product/zh/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md b/product/zh/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md index a7201d8e..23e21983 100644 --- a/product/zh/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md +++ b/product/zh/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md @@ -7,4 +7,4 @@ date: 2021-04-19 # STATEMENT_COMPLEX_HISTORY -STATEMENT_COMPLEX_HISTORY视图显示在数据库主节点上执行作业结束后的负载管理记录。具体的字段请参考[GS_SESSION_MEMORY_DETAIL](../../../../reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md)表1。 +STATEMENT_COMPLEX_HISTORY视图显示在数据库主节点上执行作业结束后的负载管理记录。具体的字段请参考[GLOBAL_STATEMENT_COMPLEX_RUNTIME](./GLOBAL_STATEMENT_COMPLEX_RUNTIME.md)表1。 diff --git a/product/zh/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md b/product/zh/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md index 7635fc29..d27abac4 100644 --- a/product/zh/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md +++ b/product/zh/docs-mogdb/v6.0/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md @@ -7,4 +7,4 @@ date: 2021-04-19 # STATEMENT_COMPLEX_HISTORY_TABLE -STATEMENT_COMPLEX_HISTORY_TABLE系统表显示数据库主节点执行作业结束后的负载管理记录。此数据是从内核中转储到系统表中的数据。具体的字段请参考[GS_SESSION_MEMORY_DETAIL](../../../../reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md)表1。 +STATEMENT_COMPLEX_HISTORY_TABLE系统表显示数据库主节点执行作业结束后的负载管理记录。此数据是从内核中转储到系统表中的数据。具体的字段请参考[GLOBAL_STATEMENT_COMPLEX_RUNTIME](./GLOBAL_STATEMENT_COMPLEX_RUNTIME.md)表1。 -- Gitee From b30832bc81b93c8276a661915613b8cf5208d82a Mon Sep 17 00:00:00 2001 From: spaceoddity91719 Date: Wed, 16 Oct 2024 14:24:07 +0800 Subject: [PATCH 4/4] =?UTF-8?q?fix(mogdb):=E5=88=A0=E9=99=A45.2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../docs-mogdb/v5.2/AI-features/ai-feature.md | 20 - .../ai4db/abo-optimizer/abo-optimizer.md | 12 - .../adaptive-plan-selection-best-practices.md | 31 - .../adaptive-plan-selection-overview.md | 10 - .../adaptive-plan-selection-prerequisites.md | 10 - ...adaptive-plan-selection-troubleshooting.md | 10 - .../adaptive-plan-selection-usage-guide.md | 22 - .../ai4db-adaptive-plan-selection.md | 18 - ...i4db-intelligent-cardinality-estimation.md | 18 - ...t-cardinality-estimation-best-practices.md | 67 - ...lligent-cardinality-estimation-overview.md | 10 - ...nt-cardinality-estimation-prerequisites.md | 10 - ...-cardinality-estimation-troubleshooting.md | 10 - ...gent-cardinality-estimation-usage-guide.md | 15 - .../1-1-x-tuner-overview.md | 10 - .../1-2-preparations.md | 227 -- .../1-3-examples.md | 158 -- .../1-4-obtaining-help-information.md | 51 - .../1-5-command-reference.md | 50 - .../1-6-Troubleshooting.md | 14 - .../2-1-single-query-index-recommendation.md | 59 - .../2-2-virtual-index.md | 123 - ...2-3-workload-level-index-recommendation.md | 105 - .../3-1-overview.md | 10 - .../3-2-environment-deployment.md | 11 - .../3-3-usage-guide.md | 34 - .../3-4-obtaining-help-information.md | 41 - .../3-5-command-reference.md | 20 - .../3-6-troubleshooting.md | 15 - .../4-1-overview.md | 10 - .../4-2-environment-deployment.md | 10 - .../4-3-usage-guide.md | 40 - .../4-4-obtaining-help-information.md | 38 - .../4-5-command-reference.md | 21 - .../4-6-troubleshooting.md | 11 - .../5-1-overview.md | 16 - .../5-2-usage-guide.md | 96 - .../5-3-obtaining-help-information.md | 49 - .../5-4-command-reference.md | 17 - .../5-5-troubleshooting.md | 11 - .../6-1-overview.md | 20 - .../6-2-usage-guide.md | 42 - .../6-3-obtaining-help-information.md | 36 - .../6-4-command-reference.md | 19 - .../6-5-troubleshooting.md | 10 - .../7-anomaly-detection/7-1-overview.md | 10 - .../7-anomaly-detection/7-2-usage-guide.md | 54 - .../7-3-obtaining-help-information.md | 41 - .../7-4-command-reference.md | 21 - .../7-5-troubleshooting.md | 11 - .../ai-sub-functions-of-the-dbmind.md | 19 - .../anomaly-analysis-command-reference.md | 20 - ...aly-analysis-obtaining-help-information.md | 39 - .../anomaly-analysis-overview.md | 10 - .../anomaly-analysis-troubleshooting.md | 10 - .../anomaly-analysis-usage-guide.md | 25 - .../anomaly-analysis/anomaly-analysis.md | 14 - .../anomaly-detection-command-reference.md | 21 - ...ly-detection-obtaining-help-information.md | 41 - .../anomaly-detection-overview.md | 10 - .../anomaly-detection-troubleshooting.md | 11 - .../anomaly-detection-usage-guide.md | 54 - .../anomaly-detection/anomaly-detection.md | 14 - ...cast-trend-prediction-command-reference.md | 21 - ...trend-prediction-environment-deployment.md | 10 - ...d-prediction-obtaining-help-information.md | 57 - .../forcast-trend-prediction-overview.md | 10 - ...orcast-trend-prediction-troubleshooting.md | 11 - .../forcast-trend-prediction-usage-guide.md | 40 - .../forcast-trend-prediction.md | 15 - .../index-advisor-index-recommendation.md | 14 - .../single-query-index-recommendation.md | 71 - .../virtual-index.md | 125 - .../workload-level-index-recommendation.md | 135 -- .../slow-sql-statements-command-reference.md | 20 - ...w-sql-statements-environment-deployment.md | 11 - ...l-statements-obtaining-help-information.md | 41 - .../slow-sql-statements-overview.md | 10 - .../slow-sql-statements-troubleshooting.md | 15 - .../slow-sql-statements-usage-guide.md | 34 - .../slow-sql-statements.md | 15 - ...l-statement-rewriting-command-reference.md | 19 - ...nt-rewriting-obtaining-help-information.md | 36 - ...writer-sql-statement-rewriting-overview.md | 20 - ...sql-statement-rewriting-troubleshooting.md | 10 - ...ter-sql-statement-rewriting-usage-guide.md | 42 - .../sql-rewriter-sql-statement-rewriting.md | 14 - ...ag-slow-sql-discovery-command-reference.md | 17 - ...ql-discovery-obtaining-help-information.md | 49 - .../sqldiag-slow-sql-discovery-overview.md | 16 - ...diag-slow-sql-discovery-troubleshooting.md | 11 - .../sqldiag-slow-sql-discovery-usage-guide.md | 96 - .../sqldiag-slow-sql-discovery.md | 22 - .../x-tuner-command-reference.md | 50 - .../x-tuner-examples.md | 158 -- .../x-tuner-obtaining-help-information.md | 48 - .../x-tuner-overview.md | 10 - ...er-parameter-optimization-and-diagnosis.md | 15 - .../x-tuner-preparations.md | 227 -- .../x-tuner-troubleshooting.md | 14 - .../ai4db/ai4db-autonomous-database-o&m.md | 34 - .../components-that-support-dbmind.md | 12 - .../prometheus-exporter-command-reference.md | 192 -- ...metheus-exporter-environment-deployment.md | 170 -- ...eus-exporter-obtaining-help-information.md | 16 - .../prometheus-exporter-overview.md | 12 - .../prometheus-exporter-troubleshooting.md | 16 - .../prometheus-exporter-usage-guide.md | 73 - .../prometheus-exporter.md | 15 - .../ai4db/dbmind-mode/1-service.md | 207 -- .../ai4db/dbmind-mode/2-component.md | 37 - .../AI-features/ai4db/dbmind-mode/3-set.md | 62 - .../ai4db/dbmind-mode/component.md | 37 - .../ai4db/dbmind-mode/dbmind-mode.md | 51 - .../AI-features/ai4db/dbmind-mode/service.md | 207 -- .../v5.2/AI-features/ai4db/dbmind-mode/set.md | 62 - .../v5.2/AI-features/db4ai/db4ai.md | 13 - ...query-for-model-training-and-prediction.md | 310 --- ...i-snapshots-for-data-version-management.md | 266 --- .../db4ai/full-process-ai/full-process-ai.md | 16 - .../full-process-ai/plpython-fenced-mode.md | 135 -- .../AI-features/db4ai/native-db4ai-engine.md | 313 --- product/en/docs-mogdb/v5.2/_index.md | 90 - .../MogDB-compared-to-openGauss.md | 124 - .../v5.2/about-mogdb/about-mogdb.md | 15 - .../mogdb-new-feature/release-note.md | 12 - .../2-docker-based-mogdb.md | 37 - .../open-source-components/DBMS-RANDOM.md | 531 ----- .../open-source-components/compat-tools.md | 18 - .../open-source-components/mog_filedump.md | 106 - .../open-source-components/mog_xlogdump.md | 319 --- .../open-source-components/mogdb-monitor.md | 27 - .../open-source-components.md | 14 - .../v5.2/about-mogdb/terms-of-use.md | 22 - .../v5.2/about-mogdb/usage-limitations.md | 27 - .../administrator-guide.md | 17 - .../backup-and-restoration-overview.md | 93 - .../backup-and-restoration.md | 13 - .../flashback-restoration.md | 210 -- .../logical-backup-and-restoration.md | 24 - .../physical-backup-and-restoration.md | 129 -- .../column-store-tables-management.md | 386 ---- ...mon-primary-backup-deployment-scenarios.md | 85 - .../database-deployment-scenario.md | 12 - .../resource-pooling-architecture.md | 20 - ...rrent-architectural-feature-constraints.md | 34 - ...-developer-environment-deployment-guide.md | 272 --- .../two-city-three-dc-dr.md | 192 -- ...-and-gs_dumpall-to-export-data-overview.md | 91 - .../2-exporting-a-single-database.md | 288 --- .../3-exporting-all-databases.md | 121 - ...-by-a-user-without-required-permissions.md | 82 - .../exporting-data/exporting-data.md | 13 - .../importing-and-exporting-data.md | 11 - .../importing-data/1-import-modes.md | 18 - ...10-managing-concurrent-write-operations.md | 177 -- ...ing-the-INSERT-statement-to-insert-data.md | 20 - ...OPY-FROM-STDIN-statement-to-import-data.md | 318 --- ...sing-a-gsql-meta-command-to-import-data.md | 213 -- .../5-using-gs_restore-to-import-data.md | 263 --- .../6-updating-data-in-a-table.md | 166 -- .../importing-data/7-deep-copy.md | 116 - .../importing-data/8-ANALYZE-table.md | 46 - .../9-doing-VACUUM-to-a-table.md | 22 - .../importing-data/importing-data.md | 19 - .../localization/character-set-support.md | 206 -- .../localization/collation-support.md | 128 -- .../localization/locale-support.md | 68 - .../localization/localization.md | 12 - .../1-introducing-mot/1-mot-introduction.md | 33 - .../2-mot-features-and-benefits.md | 22 - .../3-mot-key-technologies.md | 24 - .../4-mot-usage-scenarios.md | 22 - .../5-mot-performance-benchmarks.md | 189 -- .../1-introducing-mot/introducing-mot.md | 16 - .../2-using-mot/1-using-mot-overview.md | 18 - .../2-using-mot/2-mot-preparation.md | 206 -- .../2-using-mot/3-mot-deployment.md | 660 ------ .../mot-engine/2-using-mot/4-mot-usage.md | 506 ----- .../2-using-mot/5-mot-administration.md | 419 ---- .../6-mot-sample-tpcc-benchmark.md | 116 - .../mot-engine/2-using-mot/using-mot.md | 17 - .../mot-engine/3-concepts-of-mot/3-1.md | 90 - .../mot-engine/3-concepts-of-mot/3-2.md | 179 -- .../mot-engine/3-concepts-of-mot/3-3.md | 59 - .../mot-engine/3-concepts-of-mot/3-4.md | 22 - .../mot-engine/3-concepts-of-mot/3-5.md | 41 - .../mot-engine/3-concepts-of-mot/3-6.md | 204 -- .../mot-engine/3-concepts-of-mot/3-7.md | 24 - .../mot-engine/3-concepts-of-mot/3-8.md | 73 - .../mot-engine/3-concepts-of-mot/3-9.md | 33 - .../3-concepts-of-mot/concepts-of-mot.md | 20 - .../mot-engine/4-appendix/1-references.md | 36 - .../mot-engine/4-appendix/2-glossary.md | 59 - .../mot-engine/4-appendix/mot-appendix.md | 11 - .../mot-engine/mot-engine.md | 13 - .../primary-and-standby-management.md | 126 -- .../0-starting-and-stopping-mogdb.md | 218 -- .../1-routine-maintenance-check-items.md | 164 -- ...0-data-security-maintenance-suggestions.md | 29 - .../routine-maintenance/11-log-reference.md | 152 -- .../2-checking-os-parameters.md | 178 -- .../3-checking-mogdb-health-status.md | 645 ------ .../4-checking-database-performance.md | 83 - .../5-checking-and-deleting-logs.md | 160 -- .../6-checking-time-consistency.md | 52 - ...g-the-number-of-application-connections.md | 130 -- .../8-routinely-maintaining-tables.md | 111 - .../9-routinely-recreating-an-index.md | 88 - .../exporting-and-viewing-the-wdr.md | 97 - .../routine-maintenance.md | 24 - .../routine-maintenance/slow-sql-diagnosis.md | 31 - .../using-the-gsql-client-for-connection.md | 212 -- .../abo-optimizer/adaptive-plan-selection.md | 42 - ...haracteristic-description-abo-optimizer.md | 11 - .../intelligent-cardinality-estimation.md | 46 - .../ai-capabilities/ai-capabilities.md | 12 - ...ection-forecast-and-exception-detection.md | 45 - ...-cause-analysis-for-slow-sql-statements.md | 42 - .../3-index-recommendation.md | 44 - .../4-parameter-tuning-and-diagnosis.md | 50 - .../5-slow-sql-statement-discovery.md | 43 - .../characteristic-description-ai4db.md | 14 - .../db4ai-database-driven-ai.md | 43 - .../1-standard-sql.md | 42 - .../2-standard-development-interfaces.md | 40 - .../3-postgresql-api-compatibility.md | 40 - .../ECPG.md | 113 - .../MogDB-MySQL-compatibility.md | 30 - .../MogDB-Oracle-compatibility.md | 30 - .../application-development-interfaces.md | 15 - .../characteristic-description-overview.md | 150 -- .../characteristic-description.md | 20 - .../add-rowtype-attribute-to-the-view.md | 65 - ...tions-distinct-performance-optimization.md | 50 - ...aggregate-functions-support-keep-clause.md | 101 - ...e-functions-support-scenario-extensions.md | 48 - .../compatibility/compatibility.md | 32 - ...h-mysql-alias-support-for-single-quotes.md | 71 - ...ate-current_time-keywords-as-field-name.md | 83 - .../compatibility/custom-type-array.md | 65 - .../for-update-supports-outer-join.md | 58 - .../compatibility/format-error-backtrace.md | 139 -- .../mogdb-supports-insert-all.md | 178 -- .../oracle-dblink-syntax-compatibility.md | 242 -- ...hen-creating-package-function-procedure.md | 41 - ...bypass-method-when-merge-into-hit-index.md | 91 - ...es-to-procedure-and-function-parameters.md | 80 - ...-constants-in-package-as-default-values.md | 143 -- .../support-passing-the-count-attribute.md | 50 - .../compatibility/support-plpgsql-subtype.md | 188 -- .../support-q-quote-escape-character.md | 296 --- ...g-two-date-types-to-return-numeric-type.md | 64 - ...ntheses-for-function-without-parameters.md | 56 - .../compatibility/support-table-function.md | 65 - ...the-same-name-after-the-end-with-oracle.md | 75 - .../compatibility/support-where-current-of.md | 58 - .../1-access-control-model.md | 46 - .../10-row-level-access-control.md | 47 - .../11-password-strength-verification.md | 75 - ...ity-query-in-a-fully-encrypted-database.md | 94 - .../13-ledger-database-mechanism.md | 48 - .../14-transparent-data-encryption.md | 57 - ...ation-of-control-and-access-permissions.md | 50 - .../3-database-encryption-authentication.md | 40 - .../4-data-encryption-and-storage.md | 52 - .../database-security/5-database-audit.md | 40 - .../6-network-communication-security.md | 48 - .../database-security/7-resource-label.md | 53 - .../database-security/8-unified-audit.md | 78 - .../9-dynamic-data-anonymization.md | 111 - .../database-security/database-security.md | 23 - ...ort-for-functions-and-stored-procedures.md | 42 - .../10-autonomous-transaction.md | 48 - .../11-global-temporary-table.md | 49 - .../12-pseudocolumn-rownum.md | 47 - .../13-stored-procedure-debugging.md | 42 - ...-load-balancing-and-readwrite-isolation.md | 40 - .../15-in-place-update-storage-engine.md | 40 - .../16-publication-subscription.md | 56 - .../17-foreign-key-lock-enhancement.md | 49 - .../18-data-compression-in-oltp-scenarios.md | 41 - .../19-transaction-async-submit.md | 31 - .../enterprise-level-features/2-sql-hints.md | 44 - .../20-copy-import-optimization.md | 93 - .../21-dynamic-partition-pruning.md | 32 - .../22-sql-running-status-observation.md | 105 - .../23-index-creation-parallel-control.md | 28 - .../24-brin-index.md | 261 --- .../25-bloom-index.md | 215 -- .../3-full-text-indexing.md | 57 - .../4-copy-interface-for-error-tolerance.md | 40 - .../5-partitioning.md | 64 - ...support-for-advanced-analysis-functions.md | 61 - .../7-materialized-view.md | 40 - .../8-hyperloglog.md | 42 - .../9-creating-an-index-online.md | 45 - .../enterprise-level-features.md | 35 - .../event-trigger.md | 43 - .../high-availability/1-primary-standby.md | 46 - .../10-adding-or-deleting-a-standby-server.md | 62 - ...-entering-the-maximum-availability-mode.md | 46 - .../12-parallel-logical-decoding.md | 52 - .../high-availability/13-dcf.md | 54 - .../high-availability/14-cm.md | 68 - .../high-availability/15-global-syscache.md | 47 - ...-a-standby-node-to-build-a-standby-node.md | 40 - .../17-two-city-three-dc-dr.md | 47 - .../2-logical-replication.md | 41 - .../high-availability/4-logical-backup.md | 44 - .../high-availability/5-physical-backup.md | 56 - .../6-automatic-job-retry-upon-failure.md | 91 - .../high-availability/7-ultimate-rto.md | 43 - .../8-cascaded-standby-server.md | 49 - .../high-availability/9-delayed-replay.md | 50 - ...omponent-supporting-two-node-deployment.md | 34 - .../high-availability/ddl-query-of-view.md | 83 - ...vailability-based-on-the-paxos-protocol.md | 41 - .../high-availability/high-availability.md | 28 - .../high-performance/1-cbo-optimizer.md | 36 - .../high-performance/10-xlog-no-lock-flush.md | 36 - .../11-parallel-page-based-redo-for-ustore.md | 36 - ...store-execution-to-vectorized-execution.md | 110 - .../high-performance/2-llvm.md | 40 - .../high-performance/3-vectorized-engine.md | 47 - .../4-hybrid-row-column-store.md | 72 - .../5-adaptive-compression.md | 42 - ...-kunpeng-numa-architecture-optimization.md | 40 - .../8-high-concurrency-of-thread-pools.md | 38 - .../9-smp-for-parallel-execution.md | 48 - .../adaptive-two-phase-aggregation.md | 150 -- .../astore-row-level-compression.md | 95 - .../btree-index-compression.md | 193 -- ...hancement-of-tracing-backend-key-thread.md | 32 - .../enhancement-of-wal-redo-performance.md | 52 - .../high-performance/high-performance.md | 30 - .../ock-accelerated-data-transmission.md | 36 - ...ock-scrlock-accelerate-distributed-lock.md | 36 - .../ordering-operator-optimization.md | 114 - .../high-performance/parallel-index-scan.md | 148 -- .../parallel-query-optimization.md | 92 - .../high-performance/sql-bypass.md | 141 -- .../high-performance/tracing-SQL-function.md | 36 - .../2-workload-diagnosis-report.md | 76 - .../maintainability/3-slow-sql-diagnosis.md | 178 -- .../4-session-performance-diagnosis.md | 106 - .../5-system-kpi-aided-diagnosis.md | 70 - .../maintainability/built-in-stack-tool.md | 186 -- .../maintainability/dcf-module-tracing.md | 34 - .../error-when-writing-illegal-characters.md | 93 - .../maintainability/extension-splitting.md | 67 - .../maintainability/fault-diagnosis.md | 34 - .../light-lock-export-and-analysis.md | 32 - .../maintainability/maintainability.md | 20 - .../maintainability/pageinspect-pagehack.md | 254 --- .../maintainability/sql-patch.md | 133 -- ...a-distributed-database-using-kubernetes.md | 36 - .../distributed-analysis-capabilities.md | 36 - .../distributed-database-capability.md | 36 - .../middleware/middleware.md | 12 - ...ency-escape-at-the-infrastructure-layer.md | 42 - .../workload-management.md | 10 - .../cm-fault/cm-cluster-brain-split-fault.md | 273 --- .../cm-fault/cm-cluster-manual-failover.md | 88 - .../cm-fault/cm-fault.md | 11 - .../common-fault-locating-cases.md | 17 - ...e-dump-occurs-after-installation-on-x86.md | 26 - ...core-dump-occurs-due-to-full-disk-space.md | 22 - ...settings-of-guc-parameter-log-directory.md | 20 - ...e-dump-occurs-when-removeipc-is-enabled.md | 24 - .../core-fault-locating.md | 13 - .../after-you-run-the-du-command.md | 32 - .../disk-space-usage-reaches-the-threshold.md | 58 - ...or-no-space-left-on-device-is-displayed.md | 64 - .../file-is-damaged-in-the-xfs-file-system.md | 22 - .../file-system-disk-memory.md | 16 - .../insufficient-memory.md | 20 - .../shared-memory-leak.md | 73 - .../when-the-tpcc-is-running.md | 20 - .../index-fault/b-tree-index-faults.md | 68 - .../index-fault/index-fault.md | 12 - .../index-fault/reindexing-fails.md | 29 - ...hen-a-user-specifies-only-an-index-name.md | 47 - ...-error-occurs-during-integer-conversion.md | 24 - .../different-data-is-displayed.md | 32 - .../forcibly-terminating-a-session.md | 62 - .../permission-session-data-type.md | 12 - .../performance-deterioration.md | 26 - .../primary-node-is-hung-in-demoting.md | 24 - .../service-ha-concurrency/query-failure.md | 81 - .../service-ha-concurrency.md | 15 - .../service-startup-failure.md | 88 - .../standby-node-in-the-need-repair-state.md | 20 - .../too-many-clients-already.md | 50 - ...alyzing-the-status-of-a-query-statement.md | 57 - ...ng-whether-a-query-statement-is-blocked.md | 56 - .../lock-wait-timeout-is-displayed.md | 25 - .../sql-fault/low-query-efficiency.md | 32 - .../slow-response-to-a-query-statement.md | 54 - .../sql-fault/sql-fault.md | 14 - ...ed-when-the-table-partition-is-modified.md | 46 - .../table-partition-table.md | 11 - .../table-size-does-not-change.md | 39 - .../common-fault-locating-methods.md | 283 --- .../common-faults-and-identification.md | 11 - .../docs-mogdb/v5.2/communication-matrix.md | 25 - .../developer-guide/1-1-stored-procedure.md | 16 - ...-introduction-to-autonomous-transaction.md | 17 - ...ction-supporting-autonomous-transaction.md | 41 - ...edure-supporting-autonomous-transaction.md | 43 - .../autonomous-transaction/4-restrictions.md | 73 - ...block-supporting-autonomous-transaction.md | 36 - .../1-development-based-on-jdbc-overview.md | 10 - .../10-example-common-operations.md | 280 --- ...e-retrying-sql-queries-for-applications.md | 205 -- ...-and-exporting-data-through-local-files.md | 119 - ...rating-data-from-a-my-database-to-mogdb.md | 97 - .../14-example-logic-replication-code.md | 181 -- ...-to-the-database-in-different-scenarios.md | 60 - .../15-JDBC/1-java-sql-Connection.md | 67 - .../15-JDBC/10-javax-sql-DataSource.md | 21 - .../15-JDBC/11-javax-sql-PooledConnection.md | 19 - .../15-JDBC/12-javax-naming-Context.md | 25 - ...-javax-naming-spi-InitialContextFactory.md | 16 - .../15-JDBC/14-CopyManager.md | 40 - .../15-JDBC/2-java-sql-CallableStatement.md | 46 - .../15-JDBC/3-java-sql-DatabaseMetaData.md | 197 -- .../15-JDBC/4-java-sql-Driver.md | 22 - .../15-JDBC/5-java-sql-PreparedStatement.md | 70 - .../15-JDBC/6-java-sql-ResultSet.md | 154 -- .../15-JDBC/7-java-sql-ResultSetMetaData.md | 36 - .../15-JDBC/8-java-sql-Statement.md | 69 - .../9-javax-sql-ConnectionPoolDataSource.md | 17 - .../15-JDBC/jdbc-interface-reference.md | 25 - ...kage-driver-class-and-environment-class.md | 51 - .../3-development-process.md | 12 - .../4-loading-the-driver.md | 19 - .../5-connecting-to-a-database.md | 105 - .../6-connecting-to-a-database-using-ssl.md | 146 -- .../7-running-sql-statements.md | 242 -- .../8-processing-data-in-a-result-set.md | 76 - .../8.1-log-management.md | 118 - .../9-closing-a-connection.md | 17 - .../connecting-to-a-database-using-uds.md | 48 - .../development-based-on-jdbc.md | 31 - ...imary-and-backup-cluster-load-balancing.md | 107 - .../jdbc-based-common-parameter-reference.md | 108 - .../jdbc-release-notes.md | 159 -- .../1-development-based-on-odbc.md | 41 - ...es-dependent-libraries-and-header-files.md | 12 - ...nfiguring-a-data-source-in-the-linux-os.md | 328 --- .../4-development-process.md | 42 - ...mple-common-functions-and-batch-binding.md | 440 ---- ...pplication-scenarios-and-configurations.md | 496 ---- .../6-ODBC/2-0-odbc-overview.md | 10 - .../6-ODBC/2-1-SQLAllocEnv.md | 10 - .../6-ODBC/2-10-SQLExecDirect.md | 48 - .../6-ODBC/2-11-SQLExecute.md | 44 - .../6-ODBC/2-12-SQLFetch.md | 43 - .../6-ODBC/2-13-SQLFreeStmt.md | 10 - .../6-ODBC/2-14-SQLFreeConnect.md | 10 - .../6-ODBC/2-15-SQLFreeHandle.md | 43 - .../6-ODBC/2-16-SQLFreeEnv.md | 10 - .../6-ODBC/2-17-SQLPrepare.md | 46 - .../6-ODBC/2-18-SQLGetData.md | 53 - .../6-ODBC/2-19-SQLGetDiagRec.md | 74 - .../6-ODBC/2-2-SQLAllocConnect.md | 10 - .../6-ODBC/2-20-SQLSetConnectAttr.md | 47 - .../6-ODBC/2-21-SQLSetEnvAttr.md | 47 - .../6-ODBC/2-22-SQLSetStmtAttr.md | 47 - .../6-ODBC/2-23-Examples.md | 345 --- .../6-ODBC/2-3-SQLAllocHandle.md | 45 - .../6-ODBC/2-4-SQLAllocStmt.md | 10 - .../6-ODBC/2-5-SQLBindCol.md | 51 - .../6-ODBC/2-6-SQLBindParameter.md | 59 - .../6-ODBC/2-7-SQLColAttribute.md | 53 - .../6-ODBC/2-8-SQLConnect.md | 54 - .../6-ODBC/2-9-SQLDisconnect.md | 41 - .../6-ODBC/odbc-interface-reference.md | 32 - ...1-database-connection-control-functions.md | 20 - .../10-PQstatus.md | 64 - .../2-PQconnectdbParams.md | 42 - .../3-PQconnectdb.md | 39 - .../4-PQconninfoParse.md | 31 - .../5-PQconnectStart.md | 30 - .../6-PQerrorMessage.md | 34 - .../7-PQsetdbLogin.md | 51 - .../8-PQfinish.md | 34 - .../9-PQreset.md | 34 - .../1-PQclear.md | 34 - .../10-PQntuples.md | 34 - .../11-PQprepare.md | 50 - .../12-PQresultStatus.md | 72 - .../2-PQexec.md | 42 - .../3-PQexecParams.md | 44 - .../4-PQexecParamsBatch.md | 46 - .../5-PQexecPrepared.md | 42 - .../6-PQexecPreparedBatch.md | 44 - .../7-PQfname.md | 36 - .../8-PQgetvalue.md | 42 - .../9-PQnfields.md | 34 - .../database-statement-execution-functions.md | 23 - ...ons-for-asynchronous-command-processing.md | 23 - .../2-PQsendQuery.md | 39 - .../3-PQsendQueryParams.md | 52 - .../4-PQsendPrepare.md | 46 - .../5-PQsendQueryPrepared.md | 50 - .../6-PQflush.md | 38 - .../1-PQgetCancel.md | 38 - .../2-PQfreeCancel.md | 34 - .../3-PQcancel.md | 41 - ...tions-for-canceling-queries-in-progress.md | 14 - .../2-libpq/libpq-api-reference.md | 13 - .../dependent-header-files-of-libpq.md | 10 - .../development-based-on-libpq.md | 16 - .../development-process.md | 34 - .../libpq-example.md | 282 --- .../link-parameters.md | 53 - .../1-psycopg-based-development.md | 31 - .../10.1-example-common-operations.md | 102 - .../1-psycopg2-connect.md | 42 - .../10-connection-close.md | 32 - .../2-connection-cursor.md | 37 - .../3-cursor-execute-query-vars-list.md | 35 - .../4-curosr-executemany-query-vars-list.md | 35 - .../5-connection-commit.md | 32 - .../6-connection-rollback.md | 32 - .../7-cursor-fetchone.md | 30 - .../8-cursor-fetchall.md | 30 - .../9-cursor-close.md | 30 - .../psycopg-api-reference.md | 21 - .../12-psycopg2-release-notes.md | 44 - .../2-psycopg-package.md | 30 - .../3.1-development-process.md | 12 - .../4-connecting-to-a-database.md | 127 -- ...daptation-of-python-values-to-sql-types.md | 35 - .../6-new-features-in-mogdb.md | 100 - .../9-connecting-to-the-database-using-ssl.md | 30 - .../developer-guide/dev/5-commissioning.md | 40 - .../dev/application-development-tutorial.md | 15 - .../design-specification.md | 351 --- ...roduction-to-development-specifications.md | 27 - .../naming-specification.md | 74 - .../overview-of-development-specifications.md | 26 - .../postgresql-compatibility.md | 130 -- .../query-operations.md | 69 - .../syntax-specification.md | 92 - .../v5.2/developer-guide/developer-guide.md | 20 - .../developer-guide/extension/extension.md | 17 - .../foreign-data-wrapper/1-oracle_fdw.md | 118 - .../foreign-data-wrapper/2-mysql_fdw.md | 81 - .../foreign-data-wrapper/3-postgres_fdw.md | 79 - .../extension/foreign-data-wrapper/dblink.md | 176 -- .../foreign-data-wrapper/fdw-introduction.md | 16 - .../foreign-data-wrapper/file_fdw.md | 86 - .../extension/pg_bulkload-user-guide.md | 108 - .../extension/pg_prewarm-user-guide.md | 109 - .../extension/pg_repack-user-guide.md | 203 -- .../extension/pg_trgm-user-guide.md | 79 - .../postgis-extension/postgis-extension.md | 12 - .../postgis-extension/postgis-overview.md | 18 - .../postgis-support-and-constraints.md | 51 - .../postgis-extension/using-postgis.md | 97 - .../extension/wal2json-user-guide.md | 128 -- .../v5.2/developer-guide/extension/whale.md | 248 -- .../logical-decoding/1-logical-decoding.md | 67 - ...cal-decoding-by-sql-function-interfaces.md | 85 - .../logical-decoding/logical-decoding.md | 11 - .../logical-replication.md | 11 - .../publication-subscription/architecture.md | 16 - .../configuration-settings.md | 16 - .../publication-subscription/conflicts.md | 14 - .../publication-subscription/monitoring.md | 14 - .../publication-subscription.md | 30 - .../publication-subscription/publications.md | 22 - .../publication-subscription/quick-setup.md | 36 - .../publication-subscription/restrictions.md | 18 - .../publication-subscription/security.md | 20 - .../publication-subscription/subscriptions.md | 28 - .../1-materialized-view-overview.md | 15 - .../1-full-materialized-view-overview.md | 10 - .../2-full-materialized-view-usage.md | 69 - ...terialized-view-support-and-constraints.md | 22 - .../full-materialized-view.md | 12 - ...-incremental-materialized-view-overview.md | 10 - .../2-incremental-materialized-view-usage.md | 92 - ...terialized-view-support-and-constraints.md | 31 - .../incremental-materialized-view.md | 12 - .../assessment-tool.md | 149 -- .../dolphin-extension/dolphin-extension.md | 13 - .../dolphin-extension/dolphin-installation.md | 12 - .../dolphin-extension/dolphin-overview.md | 10 - .../dolphin-extension/dolphin-restrictions.md | 14 - .../dolphin-reset-parameters.md | 159 -- .../dolphin-syntax/dolphin-syntax.md | 15 - .../dolphin-syntax/guc-parameters.md | 266 --- .../dolphin-column-name-identifiers.md | 48 - .../identifiers/dolphin-identifiers.md | 10 - .../data-types/dolphin-binary-types.md | 80 - .../data-types/dolphin-bit-string-types.md | 43 - .../data-types/dolphin-bool-types.md | 31 - .../data-types/dolphin-character-types.md | 75 - .../data-types/dolphin-data-types.md | 16 - .../data-types/dolphin-date-time-types.md | 250 -- .../data-types/dolphin-enumeration-types.md | 83 - .../data-types/dolphin-numeric-types.md | 154 -- .../sql-reference/dolphin-dcl-syntax.md | 86 - .../sql-reference/dolphin-ddl-syntax.md | 53 - .../sql-reference/dolphin-dml-syntax.md | 22 - .../sql-reference/dolphin-keywords.md | 41 - .../sql-reference/dolphin-sql-reference.md | 17 - .../dolphin-conditional-expressions.md | 80 - .../expressions/dolphin-expressions.md | 10 - .../dolphin-advisory-lock-functions.md | 244 -- .../dolphin-aggregate-functions.md | 73 - ...phin-arithmetic-functions-and-operators.md | 125 - .../dolphin-assignment-operators.md | 12 - .../dolphin-b-compatible-database-lock.md | 79 - ...phin-bit-string-functions-and-operators.md | 38 - ...cter-processing-functions-and-operators.md | 541 ----- .../dolphin-comment-operators.md | 28 - ...hin-compatible-operators-and-operations.md | 327 --- ...olphin-conditional-expression-functions.md | 126 -- ...time-processing-functions-and-operators.md | 1772 --------------- .../dolphin-functions-and-operators.md | 25 - ...phin-json-jsonb-functions-and-operators.md | 646 ------ .../dolphin-logical-operators.md | 85 - ...network-address-functions-and-operators.md | 173 -- .../dolphin-system-information-functions.md | 76 - .../dolphin-type-conversion-functions.md | 58 - .../sql-syntax/dolphin-alter-database.md | 57 - .../sql-syntax/dolphin-alter-function.md | 99 - .../sql-syntax/dolphin-alter-procedure.md | 93 - .../sql-syntax/dolphin-alter-server.md | 91 - .../dolphin-alter-table-partition.md | 377 ---- .../sql-syntax/dolphin-alter-table.md | 182 -- .../sql-syntax/dolphin-alter-tablespace.md | 173 -- .../sql-syntax/dolphin-alter-view.md | 160 -- .../sql-syntax/dolphin-analyze-analyse.md | 93 - .../sql-reference/sql-syntax/dolphin-ast.md | 43 - .../sql-syntax/dolphin-checksum-table.md | 111 - .../sql-syntax/dolphin-create-database.md | 67 - .../sql-syntax/dolphin-create-function.md | 148 -- .../sql-syntax/dolphin-create-index.md | 194 -- .../sql-syntax/dolphin-create-procedure.md | 71 - .../sql-syntax/dolphin-create-server.md | 86 - .../sql-syntax/dolphin-create-table-as.md | 72 - .../dolphin-create-table-partition.md | 1277 ----------- .../sql-syntax/dolphin-create-table.md | 198 -- .../sql-syntax/dolphin-create-tablespace.md | 44 - .../sql-syntax/dolphin-create-trigger.md | 449 ---- .../sql-syntax/dolphin-create-view.md | 137 -- .../sql-syntax/dolphin-describe-table.md | 139 -- .../sql-reference/sql-syntax/dolphin-do.md | 61 - .../sql-syntax/dolphin-drop-database.md | 48 - .../sql-syntax/dolphin-drop-index.md | 64 - .../sql-syntax/dolphin-drop-tablespace.md | 50 - .../sql-syntax/dolphin-execute.md | 63 - .../sql-syntax/dolphin-explain.md | 304 --- .../sql-syntax/dolphin-flush-binary-logs.md | 36 - .../sql-syntax/dolphin-grant-revoke-proxy.md | 113 - .../sql-reference/sql-syntax/dolphin-grant.md | 112 - .../sql-syntax/dolphin-insert.md | 227 -- .../sql-reference/sql-syntax/dolphin-kill.md | 146 -- .../sql-syntax/dolphin-load-data.md | 149 -- .../sql-syntax/dolphin-optimize-table.md | 80 - .../sql-syntax/dolphin-prepare.md | 55 - .../sql-syntax/dolphin-rename-table.md | 73 - .../sql-syntax/dolphin-rename-user.md | 45 - .../sql-syntax/dolphin-revoke.md | 114 - .../sql-syntax/dolphin-select-hint.md | 49 - .../sql-syntax/dolphin-select.md | 123 - .../sql-syntax/dolphin-set-charset.md | 72 - .../sql-syntax/dolphin-set-password.md | 69 - .../sql-syntax/dolphin-show-character-set.md | 52 - .../sql-syntax/dolphin-show-collation.md | 62 - .../sql-syntax/dolphin-show-columns.md | 131 -- .../dolphin-show-create-database.md | 43 - .../dolphin-show-create-function.md | 63 - .../dolphin-show-create-procedure.md | 70 - .../sql-syntax/dolphin-show-create-table.md | 47 - .../sql-syntax/dolphin-show-create-trigger.md | 46 - .../sql-syntax/dolphin-show-create-view.md | 49 - .../sql-syntax/dolphin-show-databases.md | 94 - .../dolphin-show-function-status.md | 61 - .../sql-syntax/dolphin-show-grants.md | 40 - .../sql-syntax/dolphin-show-index.md | 85 - .../sql-syntax/dolphin-show-master-status.md | 55 - .../sql-syntax/dolphin-show-plugins.md | 76 - .../sql-syntax/dolphin-show-privileges.md | 78 - .../dolphin-show-procedure-status.md | 51 - .../sql-syntax/dolphin-show-processlist.md | 97 - .../sql-syntax/dolphin-show-slave-hosts.md | 136 -- .../sql-syntax/dolphin-show-status.md | 499 ---- .../sql-syntax/dolphin-show-table-status.md | 109 - .../sql-syntax/dolphin-show-tables.md | 87 - .../sql-syntax/dolphin-show-triggers.md | 78 - .../sql-syntax/dolphin-show-variables.md | 61 - .../sql-syntax/dolphin-show-warnings.md | 175 -- .../sql-syntax/dolphin-sql-syntax.md | 80 - .../sql-syntax/dolphin-update.md | 248 -- .../sql-syntax/dolphin-use-db_name.md | 78 - .../dolphin-assignment-statements.md | 57 - .../dolphin-basic-statements.md | 10 - .../dolphin-stored-procedures.md | 10 - .../system-views/dolphin-INDEX_STATISTIC.md | 29 - .../dolphin-PG_TYPE_NONSTRICT_BASIC_VALUE.md | 17 - .../system-views/dolphin-system-views.md | 11 - .../mysql-compatible-description.md | 37 - .../partition-management.md | 11 - .../benefits-of-partition-pruning.md | 66 - .../dynamic-partition-pruning.md | 260 --- ...whether-partition-pruning-has-been-used.md | 12 - ...-that-can-be-used-for-partition-pruning.md | 33 - .../partition-pruning/partition-pruning.md | 14 - .../static-partition-pruning.md | 60 - ...ns-for-choosing-a-partitioning-strategy.md | 12 - .../when-to-use-hash-partitioning.md | 45 - .../when-to-use-list-partitioning.md | 37 - .../when-to-use-range-partitioning.md | 46 - .../plpgsql/1-1-plpgsql-overview.md | 39 - .../plpgsql/1-10-other-statements.md | 20 - .../developer-guide/plpgsql/1-11-cursors.md | 180 -- .../plpgsql/1-12-retry-management.md | 26 - .../developer-guide/plpgsql/1-13-debugging.md | 175 -- .../developer-guide/plpgsql/1-14-package.md | 21 - .../developer-guide/plpgsql/1-2-data-types.md | 10 - .../plpgsql/1-3-data-type-conversion.md | 41 - .../plpgsql/1-4-arrays-and-records.md | 813 ------- .../plpgsql/1-5-declare-syntax.md | 82 - .../plpgsql/1-6-basic-statements.md | 215 -- .../plpgsql/1-7-dynamic-statements.md | 166 -- .../plpgsql/1-8-control-statements.md | 690 ------ .../plpgsql/1-9-transaction-management.md | 408 ---- .../advanced-packages/advanced-packages.md | 12 - .../basic-interfaces/PKG_SERVICE.md | 399 ---- .../basic-interfaces/basic-interfaces.md | 10 - .../scheduled-jobs/pkg-service.md | 359 --- .../scheduled-jobs/scheduled-jobs.md | 10 - .../developer-guide/user-defined-functions.md | 16 - .../v5.2/faqs/application-development-faqs.md | 8 - .../faqs/deployment-and-maintenance-faqs.md | 330 --- product/en/docs-mogdb/v5.2/faqs/faqs.md | 15 - .../v5.2/faqs/high-availability-faqs.md | 28 - .../en/docs-mogdb/v5.2/faqs/migration-faqs.md | 21 - .../en/docs-mogdb/v5.2/faqs/product-faqs.md | 165 -- .../en/docs-mogdb/v5.2/faqs/upgrade-faqs.md | 16 - product/en/docs-mogdb/v5.2/glossary.md | 159 -- .../cluster-management/cluster-management.md | 20 - .../cm-configuration-parameter/cm-cm_agent.md | 274 --- .../cm-cm_server.md | 394 ---- .../cm-configuration-parameter.md | 13 - .../feature-introduction.md | 445 ---- .../introduction-to-cm_ctl-tool.md | 391 ---- .../introduction-to-cm_persist.md | 49 - ...to-installation-and-uninstallation-tool.md | 63 - .../manual-configuration-of-vip.md | 174 -- .../cluster-management/safety-design.md | 60 - .../high-available-dcf.md | 213 -- .../high-available-guide.md | 11 - .../docker-installation.md | 439 ---- .../installation-guide/installation-guide.md | 14 - .../environment-requirement.md | 74 - .../installation-preparation.md | 11 - .../os-configuration.md | 278 --- .../installation-guide/manual-installation.md | 383 ---- .../ptk-based-installation.md | 209 -- .../recommended-parameter-settings.md | 233 -- .../v5.2/mogeaver/mogeaver-overview.md | 47 - .../v5.2/mogeaver/mogeaver-release-notes.md | 165 -- .../en/docs-mogdb/v5.2/mogeaver/mogeaver.md | 11 - product/en/docs-mogdb/v5.2/overview.md | 163 -- .../TPCC-performance-tuning-guide.md | 788 ------- .../performance-tuning/performance-tuning.md | 13 - .../experience-in-rewriting-sql-statements.md | 63 - .../sql-tuning/hint-based-tuning.md | 826 ------- .../introduction-to-the-sql-execution-plan.md | 134 -- .../sql-tuning/query-execution-process.md | 57 - ...etting-key-parameters-during-sql-tuning.md | 27 - ...iewing-and-modifying-a-table-definition.md | 64 - .../sql-tuning/sql-tuning.md | 18 - .../sql-tuning/tuning-process.md | 33 - .../typical-sql-optimization-methods.md | 694 ------ .../sql-tuning/updating-statistics.md | 54 - .../system-tuning/configuring-llvm.md | 75 - .../system-tuning/configuring-smp.md | 111 - .../system-tuning/configuring-ustore.md | 118 - .../configuring-vector-engine.md | 59 - .../system-tuning/optimizing-os-parameters.md | 117 - .../resource-load-management-overview.md | 26 - .../resource-load-management.md | 11 - .../creating-resource-pool.md | 152 -- .../enabling-resource-load-management.md | 55 - .../resource-management-preparations.md | 13 - .../resource-planning.md | 34 - .../setting-control-group.md | 209 -- .../system-tuning/system-tuning.md | 15 - .../v5.2/performance-tuning/wdr/wdr-report.md | 385 ---- .../wdr/wdr-snapshot-schema.md | 273 --- .../v5.2/performance-tuning/wdr/wdr.md | 11 - .../container-based-installation.md | 199 -- .../installation-on-a-single-node.md | 181 -- .../quick-start/mogdb-access/mogdb-access.md | 13 - .../use-cli-to-access-mogdb/gsql.md | 216 -- .../use-cli-to-access-mogdb/pgcli.md | 85 - .../use-cli-to-access-mogdb.md | 11 - .../use-gui-tools-to-access-mogdb/dbeaver.md | 64 - .../mogeaver-usage.md | 60 - .../use-gui-tools-to-access-mogdb.md | 11 - .../use-middleware-to-access-mogdb.md | 11 - ...-configures-mogdb-data-source-reference.md | 175 -- ...-configures-mogdb-data-source-reference.md | 212 -- .../adonet.md | 16 - .../c-cpp.md | 15 - .../go.md | 16 - .../java.md | 14 - .../nodejs.md | 16 - .../python.md | 18 - .../rust.md | 16 - ...se-programming-language-to-access-mogdb.md | 16 - .../v5.2/quick-start/mogdb-playground.md | 46 - .../en/docs-mogdb/v5.2/quick-start/mogila.md | 406 ---- .../v5.2/quick-start/quick-start.md | 14 - .../GAUSS-00001-GAUSS-00100.md | 828 ------- .../GAUSS-00101-GAUSS-00200.md | 836 ------- .../GAUSS-00201-GAUSS-00300.md | 748 ------ .../GAUSS-00301-GAUSS-00400.md | 804 ------- .../GAUSS-00401-GAUSS-00500.md | 692 ------ .../GAUSS-00501-GAUSS-00600.md | 728 ------ .../GAUSS-00601-GAUSS-00700.md | 748 ------ .../GAUSS-00701-GAUSS-00800.md | 764 ------- .../GAUSS-00801-GAUSS-00900.md | 724 ------ .../GAUSS-00901-GAUSS-01000.md | 756 ------- .../GAUSS-01001-GAUSS-01100.md | 812 ------- .../GAUSS-01101-GAUSS-01200.md | 780 ------- .../GAUSS-01201-GAUSS-01300.md | 822 ------- .../GAUSS-01301-GAUSS-01400.md | 748 ------ .../GAUSS-01401-GAUSS-01500.md | 820 ------- .../GAUSS-01501-GAUSS-01600.md | 644 ------ .../GAUSS-01601-GAUSS-01700.md | 646 ------ .../GAUSS-01701-GAUSS-01800.md | 750 ------ .../GAUSS-01801-GAUSS-01900.md | 740 ------ .../GAUSS-01901-GAUSS-02000.md | 614 ----- .../GAUSS-02001-GAUSS-02100.md | 376 --- .../GAUSS-02101-GAUSS-02200.md | 580 ----- .../GAUSS-02201-GAUSS-02300.md | 554 ----- .../GAUSS-02301-GAUSS-02400.md | 759 ------- .../GAUSS-02401-GAUSS-02500.md | 708 ------ .../GAUSS-02501-GAUSS-02600.md | 772 ------- .../GAUSS-02601-GAUSS-02700.md | 764 ------- .../GAUSS-02701-GAUSS-02800.md | 732 ------ .../GAUSS-02801-GAUSS-02900.md | 780 ------- .../GAUSS-02901-GAUSS-03000.md | 780 ------- .../GAUSS-03001-GAUSS-03100.md | 812 ------- .../GAUSS-03101-GAUSS-03200.md | 828 ------- .../GAUSS-03201-GAUSS-03300.md | 820 ------- .../GAUSS-03301-GAUSS-03400.md | 828 ------- .../GAUSS-03401-GAUSS-03500.md | 668 ------ .../GAUSS-03501-GAUSS-03600.md | 700 ------ .../GAUSS-03601-GAUSS-03700.md | 748 ------ .../GAUSS-03701-GAUSS-03800.md | 700 ------ .../GAUSS-03801-GAUSS-03900.md | 644 ------ .../GAUSS-03901-GAUSS-04000.md | 644 ------ .../GAUSS-04001-GAUSS-04100.md | 724 ------ .../GAUSS-04101-GAUSS-04200.md | 478 ---- .../GAUSS-04201-GAUSS-04300.md | 692 ------ .../GAUSS-04301-GAUSS-04400.md | 444 ---- .../GAUSS-04401-GAUSS-04500.md | 457 ---- .../GAUSS-04501-GAUSS-04600.md | 644 ------ .../GAUSS-04601-GAUSS-04700.md | 614 ----- .../GAUSS-04701-GAUSS-04800.md | 559 ----- .../GAUSS-04801-GAUSS-04900.md | 844 ------- .../GAUSS-04901-GAUSS-05000.md | 860 ------- .../GAUSS-05001-GAUSS-05100.md | 868 ------- .../GAUSS-05101-GAUSS-05200.md | 604 ----- .../GAUSS-05201-GAUSS-05300.md | 868 ------- .../GAUSS-05301-GAUSS-05400.md | 860 ------- .../GAUSS-05401-GAUSS-05500.md | 860 ------- .../GAUSS-05501-GAUSS-05600.md | 868 ------- .../GAUSS-05601-GAUSS-05700.md | 868 ------- .../GAUSS-05701-GAUSS-05800.md | 868 ------- .../GAUSS-05801-GAUSS-05900.md | 698 ------ .../GAUSS-05901-GAUSS-06000.md | 764 ------- .../GAUSS-06001-GAUSS-06100.md | 852 ------- .../GAUSS-06101-GAUSS-06200.md | 860 ------- .../GAUSS-06201-GAUSS-06300.md | 924 -------- .../GAUSS-06301-GAUSS-06400.md | 860 ------- .../GAUSS-06401-GAUSS-06500.md | 788 ------- .../GAUSS-06501-GAUSS-06600.md | 868 ------- .../GAUSS-06601-GAUSS-06700.md | 868 ------- .../GAUSS-06701-GAUSS-06800.md | 868 ------- .../GAUSS-06801-GAUSS-06900.md | 868 ------- .../GAUSS-06901-GAUSS-07000.md | 868 ------- .../GAUSS-07001-GAUSS-07100.md | 868 ------- .../GAUSS-07101-GAUSS-07200.md | 868 ------- .../GAUSS-07201-GAUSS-07300.md | 868 ------- .../GAUSS-07301-GAUSS-07400.md | 868 ------- .../GAUSS-07401-GAUSS-07500.md | 632 ------ .../GAUSS-50000-GAUSS-50999.md | 1100 --------- .../GAUSS-51000-GAUSS-51999.md | 1252 ---------- .../GAUSS-52000-GAUSS-52999.md | 900 -------- .../GAUSS-53000-GAUSS-53699.md | 1402 ------------ .../class00-class21.md | 41 - .../class0A-class0Z.md | 27 - .../class22-class24.md | 81 - .../class25-class40.md | 66 - .../class2B-class2F.md | 22 - .../class3B-class3F.md | 19 - .../class42-class44.md | 74 - .../class53-class58.md | 47 - .../classCG-classTS.md | 103 - .../classF0-classP0.md | 20 - .../classXX-classYY.md | 45 - .../description-of-sql-error-codes.md | 18 - .../sqlstate-values-of-mogdb-cm-error-code.md | 27 - .../sqlstate-values-of-mogdb-error-code.md | 30 - .../error-code-reference.md | 90 - .../third-party-library-error-codes.md | 31 - .../error-log-reference.md | 10 - .../kernel-error-message.md | 1328 ----------- .../aggregate-functions.md | 943 -------- .../ai-feature-functions.md | 200 -- .../array-functions-and-operators.md | 630 ------ .../binary-string-functions-and-operators.md | 224 -- .../bit-string-functions-and-operators.md | 153 -- ...cter-processing-functions-and-operators.md | 1925 ---------------- .../comparison-operators.md | 27 - .../conditional-expressions-functions.md | 173 -- ...a-damage-detection-and-repair-functions.md | 207 -- ...time-processing-functions-and-operators.md | 1320 ----------- .../dynamic-data-masking-functions.md | 66 - .../encrypted-equality-functions.md | 188 -- .../event-trigger-functions.md | 116 - .../fault-injection-system-function.md | 20 - .../functions-and-operators.md | 50 - .../geometric-functions-and-operators.md | 948 -------- .../global-syscache-feature-functions.md | 99 - .../global-temporary-table-functions.md | 132 -- .../functions-and-operators/hash-function.md | 594 ----- .../hll-functions-and-operators.md | 876 ------- .../internal-functions-1.md | 89 - .../internal-functions-2.md | 123 - .../internal-functions/internal-functions.md | 12 - .../functions-and-operators/json-functions.md | 745 ------ .../ledger-database-functions.md | 96 - .../logical-operators.md | 26 - .../mathematical-functions-and-operators.md | 1202 ---------- .../mode-matching-operators.md | 230 -- ...network-address-functions-and-operators.md | 456 ---- .../obsolete-functions.md | 18 - .../other-system-functions-1.md | 270 --- .../other-system-functions-2.md | 606 ----- .../other-system-functions.md | 12 - .../prompt-message-function.md | 21 - .../range-functions-and-operators.md | 435 ---- .../security-functions.md | 339 --- .../sequence-functions.md | 165 -- .../set-returning-functions.md | 131 -- .../statistics-information-functions-1.md | 660 ------ .../statistics-information-functions-2.md | 662 ------ .../statistics-information-functions-3.md | 620 ----- .../statistics-information-functions.md | 14 - .../access-privilege-inquiry-function.md | 314 --- .../comment-information-functions.md | 40 - .../guc-value-inquiry-functions.md | 49 - .../other-function.md | 22 - .../schema-visibility-inquiry-functions.md | 90 - .../session-information-functions.md | 590 ----- .../system-catalog-information-functions.md | 445 ---- .../system-information-functions.md | 17 - .../transaction-ids-and-snapshots.md | 329 --- .../advisory-lock-functions.md | 244 -- ...ackup-and-restoration-control-functions.md | 256 --- .../configuration-settings-functions.md | 66 - .../database-object-functions.md | 438 ---- .../logical-replication-functions.md | 593 ----- .../other-functions.md | 660 ------ .../row-store-compression-system-functions.md | 95 - .../segment-page-storage-functions.md | 113 - .../server-signal-functions.md | 66 - .../snapshot-synchronization-functions.md | 24 - .../system-management-functions.md | 21 - .../undo-system-functions.md | 89 - .../universal-file-access-functions.md | 148 -- .../text-search-functions-and-operators.md | 535 ----- .../trigger-functions.md | 55 - .../type-conversion-functions-1.md | 721 ------ .../type-conversion-functions-2.md | 722 ------ .../type-conversion-functions.md | 12 - .../window-functions.md | 626 ----- .../functions-and-operators/xml-functions.md | 380 ---- .../guc-parameters/AI-features.md | 64 - .../guc-parameters/DCF-parameters-settings.md | 362 --- .../guc-parameters/HyperLogLog.md | 101 - .../guc-parameters/MogDB-transaction.md | 150 -- .../guc-parameters/alarm-detection.md | 73 - .../guc-parameters/appendix.md | 28 - .../guc-parameters/auditing/audit-switch.md | 133 -- .../guc-parameters/auditing/auditing.md | 12 - .../auditing/operation-audit.md | 185 -- .../auditing/user-and-permission-audit.md | 83 - .../guc-parameters/automatic-vacuuming.md | 187 -- .../guc-parameters/backend-compression.md | 120 - .../backup-and-restoration-parameter.md | 46 - .../guc-parameters/cm-parameters.md | 10 - .../communication-library-parameters.md | 86 - .../connection-and-authentication.md | 12 - .../connection-settings.md | 197 -- .../security-and-authentication.md | 412 ---- .../connection-pool-parameters.md | 47 - .../default-settings-of-client-connection.md | 12 - .../other-default-parameters.md | 54 - .../statement-behavior.md | 242 -- .../zone-and-formatting.md | 190 -- .../guc-parameters/developer-options.md | 317 --- .../error-reporting-and-logging.md | 13 - .../logging-content.md | 334 --- .../logging-destination.md | 175 -- .../logging-time.md | 115 - .../using-csv-log-output.md | 88 - .../guc-parameters/fault-tolerance.md | 119 - .../guc-parameters/file-location.md | 85 - .../guc-parameters/flashback.md | 81 - .../global-syscache-parameters.md | 43 - .../guc-parameters/global-temporary-table.md | 31 - .../guc-parameters/guc-parameter-list.md | 660 ------ .../guc-parameters/guc-parameter-usage.md | 18 - .../guc-user-defined-functions.md | 54 - .../ha-replication/ha-replication.md | 12 - .../ha-replication/primary-server.md | 275 --- .../ha-replication/sending-server.md | 167 -- .../ha-replication/standby-server.md | 171 -- .../guc-parameters/load-management.md | 414 ---- .../guc-parameters/lock-management.md | 152 -- .../miscellaneous-parameters.md | 269 --- .../reference-guide/guc-parameters/mot.md | 73 - ...o-efficient-data-compression-algorithms.md | 24 - .../query-planning/genetic-query-optimizer.md | 106 - .../optimizer-cost-constants.md | 94 - .../optimizer-method-configuration.md | 341 --- .../query-planning/other-optimizer-options.md | 754 ------- .../query-planning/query-planning.md | 21 - .../reference-guide/guc-parameters/query.md | 192 -- .../reference-guide-guc-parameters.md | 53 - ...on-parameters-of-two-database-instances.md | 18 - .../guc-parameters/reserved-parameters.md | 30 - .../asynchronous-io-operations.md | 121 - .../resource-consumption/background-writer.md | 164 -- .../cost-based-vacuum-delay.md | 76 - .../resource-consumption/disk-space.md | 35 - .../kernel-resource-usage.md | 41 - .../resource-consumption/memory.md | 343 --- .../resource-consumption.md | 15 - .../guc-parameters/rollback-parameters.md | 38 - .../guc-parameters/scheduled-task.md | 42 - .../guc-parameters/security-configuration.md | 94 - .../performance-statistics.md | 35 - .../query-and-index-statistics-collector.md | 141 -- .../statistics-during-the-database-running.md | 11 - .../system-performance-snapshot.md | 124 - .../guc-parameters/thread-pool.md | 38 - .../guc-parameters/upgrade-parameters.md | 47 - .../compatibility-with-earlier-versions.md | 148 -- .../platform-and-client-compatibility.md | 192 -- .../version-and-platform-compatibility.md | 11 - .../guc-parameters/wait-events.md | 23 - .../write-ahead-log/archiving.md | 95 - .../write-ahead-log/checkpoints.md | 136 -- .../write-ahead-log/log-replay.md | 158 -- .../write-ahead-log/settings.md | 289 --- .../write-ahead-log/write-ahead-log.md | 13 - ...-parameters-supported-by-standby-server.md | 27 - .../v5.2/reference-guide/reference-guide.md | 19 - .../schema/DB4AI-schema/DB4AI-schema.md | 22 - .../DB4AI-schema/DB4AI.ARCHIVE_SNAPSHOT.md | 18 - .../DB4AI-schema/DB4AI.CREATE_SNAPSHOT.md | 21 - .../DB4AI.CREATE_SNAPSHOT_INTERNAL.md | 21 - .../DB4AI.MANAGE_SNAPSHOT_INTERNAL.md | 19 - .../DB4AI-schema/DB4AI.PREPARE_SNAPSHOT.md | 21 - .../DB4AI.PREPARE_SNAPSHOT_INTERNAL.md | 27 - .../DB4AI-schema/DB4AI.PUBLISH_SNAPSHOT.md | 18 - .../DB4AI-schema/DB4AI.PURGE_SNAPSHOT.md | 18 - .../DB4AI.PURGE_SNAPSHOT_INTERNAL.md | 17 - .../DB4AI-schema/DB4AI.SAMPLE_SNAPSHOT.md | 22 - .../schema/DB4AI-schema/DB4AI.SNAPSHOT.md | 28 - .../schema/DBE_PERF/DBE_PERF.md | 33 - .../schema/DBE_PERF/cache-io/Cache-IO.md | 38 - .../cache-io/GLOBAL_STATIO_ALL_INDEXES.md | 23 - .../cache-io/GLOBAL_STATIO_ALL_SEQUENCES.md | 21 - .../cache-io/GLOBAL_STATIO_ALL_TABLES.md | 27 - .../cache-io/GLOBAL_STATIO_SYS_INDEXES.md | 23 - .../cache-io/GLOBAL_STATIO_SYS_SEQUENCES.md | 21 - .../cache-io/GLOBAL_STATIO_SYS_TABLES.md | 27 - .../cache-io/GLOBAL_STATIO_USER_INDEXES.md | 23 - .../cache-io/GLOBAL_STATIO_USER_SEQUENCES.md | 21 - .../cache-io/GLOBAL_STATIO_USER_TABLES.md | 27 - .../DBE_PERF/cache-io/GLOBAL_STAT_DB_CU.md | 20 - .../cache-io/GLOBAL_STAT_SESSION_CU.md | 18 - .../DBE_PERF/cache-io/STATIO_ALL_INDEXES.md | 22 - .../DBE_PERF/cache-io/STATIO_ALL_SEQUENCES.md | 20 - .../DBE_PERF/cache-io/STATIO_ALL_TABLES.md | 26 - .../DBE_PERF/cache-io/STATIO_SYS_INDEXES.md | 22 - .../DBE_PERF/cache-io/STATIO_SYS_SEQUENCES.md | 20 - .../DBE_PERF/cache-io/STATIO_SYS_TABLES.md | 26 - .../DBE_PERF/cache-io/STATIO_USER_INDEXES.md | 22 - .../cache-io/STATIO_USER_SEQUENCES.md | 20 - .../DBE_PERF/cache-io/STATIO_USER_TABLES.md | 26 - .../cache-io/SUMMARY_STATIO_ALL_INDEXES.md | 20 - .../cache-io/SUMMARY_STATIO_ALL_SEQUENCES.md | 19 - .../cache-io/SUMMARY_STATIO_ALL_TABLES.md | 25 - .../cache-io/SUMMARY_STATIO_SYS_INDEXES.md | 20 - .../cache-io/SUMMARY_STATIO_SYS_SEQUENCES.md | 19 - .../cache-io/SUMMARY_STATIO_SYS_TABLES.md | 25 - .../cache-io/SUMMARY_STATIO_USER_INDEXES.md | 20 - .../cache-io/SUMMARY_STATIO_USER_SEQUENCES.md | 19 - .../cache-io/SUMMARY_STATIO_USER_TABLES.md | 25 - .../DBE_PERF/configuration/CONFIG_SETTINGS.md | 31 - .../configuration/GLOBAL_CONFIG_SETTINGS.md | 32 - .../DBE_PERF/configuration/configuration.md | 11 - .../schema/DBE_PERF/file/FILE_IOSTAT.md | 28 - .../schema/DBE_PERF/file/FILE_REDO_IOSTAT.md | 22 - .../DBE_PERF/file/GLOBAL_FILE_IOSTAT.md | 29 - .../DBE_PERF/file/GLOBAL_FILE_REDO_IOSTAT.md | 23 - .../schema/DBE_PERF/file/GLOBAL_REL_IOSTAT.md | 20 - .../schema/DBE_PERF/file/LOCAL_REL_IOSTAT.md | 19 - .../DBE_PERF/file/SUMMARY_FILE_IOSTAT.md | 28 - .../DBE_PERF/file/SUMMARY_FILE_REDO_IOSTAT.md | 22 - .../DBE_PERF/file/SUMMARY_REL_IOSTAT.md | 19 - .../schema/DBE_PERF/file/file.md | 18 - .../GLOBAL_PLANCACHE_CLEAN.md | 10 - .../GLOBAL_PLANCACHE_STATUS.md | 23 - .../global-plancache/global-plancache.md | 13 - .../DBE_PERF/instance/GLOBAL_INSTANCE_TIME.md | 19 - .../schema/DBE_PERF/instance/INSTANCE_TIME.md | 29 - .../schema/DBE_PERF/instance/instance.md | 11 - .../schema/DBE_PERF/lock/GLOBAL_LOCKS.md | 31 - .../schema/DBE_PERF/lock/LOCKS.md | 34 - .../schema/DBE_PERF/lock/lock.md | 11 - .../memory/GLOBAL_MEMORY_NODE_DETAIL.md | 18 - .../memory/GLOBAL_SHARED_MEMORY_DETAIL.md | 22 - .../memory/GS_SHARED_MEMORY_DETAIL.md | 21 - .../DBE_PERF/memory/MEMORY_NODE_DETAIL.md | 18 - .../schema/DBE_PERF/memory/memory-schema.md | 13 - .../object/GLOBAL_STAT_ALL_INDEXES.md | 24 - .../DBE_PERF/object/GLOBAL_STAT_ALL_TABLES.md | 37 - .../DBE_PERF/object/GLOBAL_STAT_BAD_BLOCK.md | 23 - .../DBE_PERF/object/GLOBAL_STAT_DATABASE.md | 35 - .../object/GLOBAL_STAT_DATABASE_CONFLICTS.md | 23 - .../object/GLOBAL_STAT_SYS_INDEXES.md | 24 - .../DBE_PERF/object/GLOBAL_STAT_SYS_TABLES.md | 37 - .../object/GLOBAL_STAT_USER_FUNCTIONS.md | 22 - .../object/GLOBAL_STAT_USER_INDEXES.md | 24 - .../object/GLOBAL_STAT_USER_TABLES.md | 37 - .../object/GLOBAL_STAT_XACT_ALL_TABLES.md | 27 - .../object/GLOBAL_STAT_XACT_SYS_TABLES.md | 27 - .../object/GLOBAL_STAT_XACT_USER_FUNCTIONS.md | 22 - .../object/GLOBAL_STAT_XACT_USER_TABLES.md | 27 - .../DBE_PERF/object/STAT_ALL_INDEXES.md | 23 - .../schema/DBE_PERF/object/STAT_ALL_TABLES.md | 36 - .../schema/DBE_PERF/object/STAT_BAD_BLOCK.md | 24 - .../schema/DBE_PERF/object/STAT_DATABASE.md | 34 - .../object/STAT_DATABASE_CONFLICTS.md | 22 - .../DBE_PERF/object/STAT_SYS_INDEXES.md | 23 - .../schema/DBE_PERF/object/STAT_SYS_TABLES.md | 36 - .../DBE_PERF/object/STAT_USER_FUNCTIONS.md | 21 - .../DBE_PERF/object/STAT_USER_INDEXES.md | 23 - .../DBE_PERF/object/STAT_USER_TABLES.md | 36 - .../DBE_PERF/object/STAT_XACT_ALL_TABLES.md | 26 - .../DBE_PERF/object/STAT_XACT_SYS_TABLES.md | 26 - .../object/STAT_XACT_USER_FUNCTIONS.md | 21 - .../DBE_PERF/object/STAT_XACT_USER_TABLES.md | 26 - .../object/SUMMARY_STAT_ALL_INDEXES.md | 21 - .../object/SUMMARY_STAT_ALL_TABLES.md | 35 - .../DBE_PERF/object/SUMMARY_STAT_BAD_BLOCK.md | 22 - .../DBE_PERF/object/SUMMARY_STAT_DATABASE.md | 33 - .../object/SUMMARY_STAT_DATABASE_CONFLICTS.md | 21 - .../object/SUMMARY_STAT_SYS_INDEXES.md | 21 - .../object/SUMMARY_STAT_SYS_TABLES.md | 35 - .../object/SUMMARY_STAT_USER_FUNCTIONS.md | 20 - .../object/SUMMARY_STAT_USER_INDEXES.md | 21 - .../object/SUMMARY_STAT_USER_TABLES.md | 35 - .../object/SUMMARY_STAT_XACT_ALL_TABLES.md | 25 - .../object/SUMMARY_STAT_XACT_SYS_TABLES.md | 25 - .../SUMMARY_STAT_XACT_USER_FUNCTIONS.md | 20 - .../object/SUMMARY_STAT_XACT_USER_TABLES.md | 25 - .../schema/DBE_PERF/object/object-schema.md | 51 - .../operator/GLOBAL_OPERATOR_HISTORY.md | 37 - .../operator/GLOBAL_OPERATOR_HISTORY_TABLE.md | 10 - .../operator/GLOBAL_OPERATOR_RUNTIME.md | 38 - .../DBE_PERF/operator/OPERATOR_HISTORY.md | 10 - .../operator/OPERATOR_HISTORY_TABLE.md | 37 - .../DBE_PERF/operator/OPERATOR_RUNTIME.md | 38 - .../DBE_PERF/operator/operator-schema.md | 15 - .../schema/DBE_PERF/os/GLOBAL_OS_RUNTIME.md | 21 - .../schema/DBE_PERF/os/GLOBAL_OS_THREADS.md | 20 - .../schema/DBE_PERF/os/OS_RUNTIME.md | 20 - .../schema/DBE_PERF/os/OS_THREADS.md | 20 - .../schema/DBE_PERF/os/os-schema.md | 13 - .../query/GLOBAL_SLOW_QUERY_HISTORY.md | 10 - .../DBE_PERF/query/GLOBAL_SLOW_QUERY_INFO.md | 10 - .../query/GLOBAL_STATEMENT_COMPLEX_HISTORY.md | 83 - .../GLOBAL_STATEMENT_COMPLEX_HISTORY_TABLE.md | 10 - .../query/GLOBAL_STATEMENT_COMPLEX_RUNTIME.md | 63 - .../DBE_PERF/query/GLOBAL_STATEMENT_COUNT.md | 41 - .../DBE_PERF/query/GS_SLOW_QUERY_HISTORY.md | 10 - .../DBE_PERF/query/GS_SLOW_QUERY_INFO.md | 43 - .../schema/DBE_PERF/query/STATEMENT.md | 80 - .../query/STATEMENT_COMPLEX_HISTORY.md | 10 - .../query/STATEMENT_COMPLEX_HISTORY_TABLE.md | 10 - .../query/STATEMENT_COMPLEX_RUNTIME.md | 63 - .../schema/DBE_PERF/query/STATEMENT_COUNT.md | 44 - .../DBE_PERF/query/STATEMENT_HISTORY_query.md | 97 - .../STATEMENT_RESPONSETIME_PERCENTILE.md | 17 - .../STATEMENT_WLMSTAT_COMPLEX_RUNTIME.md | 38 - .../DBE_PERF/query/SUMMARY_STATEMENT.md | 59 - .../DBE_PERF/query/SUMMARY_STATEMENT_COUNT.md | 40 - .../schema/DBE_PERF/query/query-schema.md | 27 - .../schema/DBE_PERF/rto/RTO-RPO.md | 12 - .../schema/DBE_PERF/rto/global_rto_status.md | 19 - .../global_streaming_hadr_rto_and_rpo_stat.md | 23 - .../rto/gs_hadr_local_rto_and_rpo_stat.md | 27 - .../session-thread/GLOBAL_SESSION_MEMORY.md | 20 - .../GLOBAL_SESSION_MEMORY_DETAIL.md | 24 - .../session-thread/GLOBAL_SESSION_STAT.md | 21 - .../GLOBAL_SESSION_STAT_ACTIVITY.md | 37 - .../session-thread/GLOBAL_SESSION_TIME.md | 20 - .../GLOBAL_THREADPOOL_STATUS.md | 10 - .../GLOBAL_THREAD_WAIT_STATUS.md | 31 - .../session-thread/LOCAL_ACTIVE_SESSION.md | 44 - .../session-thread/LOCAL_THREADPOOL_STATUS.md | 23 - .../session-thread/SESSION_CPU_RUNTIME.md | 25 - .../DBE_PERF/session-thread/SESSION_MEMORY.md | 19 - .../session-thread/SESSION_MEMORY_DETAIL.md | 23 - .../session-thread/SESSION_MEMORY_RUNTIME.md | 25 - .../DBE_PERF/session-thread/SESSION_STAT.md | 20 - .../session-thread/SESSION_STAT_ACTIVITY.md | 36 - .../DBE_PERF/session-thread/SESSION_TIME.md | 19 - .../STATEMENT_IOSTAT_COMPLEX_RUNTIME.md | 24 - .../session-thread/THREAD_WAIT_STATUS.md | 27 - .../DBE_PERF/session-thread/session-thread.md | 27 - .../GLOBAL_TRANSACTIONS_PREPARED_XACTS.md | 20 - .../GLOBAL_TRANSACTIONS_RUNNING_XACTS.md | 25 - .../SUMMARY_TRANSACTIONS_PREPARED_XACTS.md | 20 - .../SUMMARY_TRANSACTIONS_RUNNING_XACTS.md | 25 - .../TRANSACTIONS_PREPARED_XACTS.md | 20 - .../transaction/TRANSACTIONS_RUNNING_XACTS.md | 25 - .../transaction/transaction-schema.md | 15 - .../schema/DBE_PERF/utility/BGWRITER_STAT.md | 26 - .../DBE_PERF/utility/CLASS_VITAL_INFO.md | 19 - .../DBE_PERF/utility/GLOBAL_BGWRITER_STAT.md | 27 - .../utility/GLOBAL_CANDIDATE_STATUS.md | 22 - .../DBE_PERF/utility/GLOBAL_CKPT_STATUS.md | 22 - .../utility/GLOBAL_DOUBLE_WRITE_STATUS.md | 27 - .../utility/GLOBAL_GET_BGWRITER_STATUS.md | 21 - .../utility/GLOBAL_PAGEWRITER_STATUS.md | 23 - .../utility/GLOBAL_RECORD_RESET_TIME.md | 17 - .../utility/GLOBAL_RECOVERY_STATUS.md | 24 - .../DBE_PERF/utility/GLOBAL_REDO_STATUS.md | 38 - .../utility/GLOBAL_REPLICATION_SLOTS.md | 26 - .../utility/GLOBAL_REPLICATION_STAT.md | 31 - .../utility/GLOBAL_SINGLE_FLUSH_DW_STATUS.md | 21 - .../DBE_PERF/utility/REPLICATION_SLOTS.md | 25 - .../DBE_PERF/utility/REPLICATION_STAT.md | 30 - .../DBE_PERF/utility/SUMMARY_USER_LOGIN.md | 20 - .../schema/DBE_PERF/utility/USER_LOGIN.md | 20 - .../schema/DBE_PERF/utility/utility.md | 27 - .../wait-events/GLOBAL_WAIT_EVENTS.md | 25 - .../DBE_PERF/wait-events/WAIT_EVENTS.md | 25 - .../wait-events/dbe-perf-wait-events.md | 11 - .../WLM_USER_RESOURCE_CONFIG.md | 25 - .../WLM_USER_RESOURCE_RUNTIME.md | 26 - .../workload-manager/workload-manager.md | 11 - .../workload/GLOBAL_USER_TRANSACTION.md | 29 - .../workload/GLOBAL_WORKLOAD_TRANSACTION.md | 29 - .../workload/SUMMARY_WORKLOAD_SQL_COUNT.md | 24 - .../SUMMARY_WORKLOAD_SQL_ELAPSE_TIME.md | 33 - .../workload/SUMMARY_WORKLOAD_TRANSACTION.md | 28 - .../DBE_PERF/workload/USER_TRANSACTION.md | 28 - .../DBE_PERF/workload/WORKLOAD_SQL_COUNT.md | 23 - .../workload/WORKLOAD_SQL_ELAPSE_TIME.md | 32 - .../DBE_PERF/workload/WORKLOAD_TRANSACTION.md | 28 - .../DBE_PERF/workload/workload-schema.md | 18 - .../DBE_PLDEBUGGER-schema.md | 128 -- .../DBE_PLDEBUGGER.abort.md | 23 - .../DBE_PLDEBUGGER.add_breakpoint.md | 18 - .../DBE_PLDEBUGGER.attach.md | 23 - .../DBE_PLDEBUGGER.backtrace.md | 20 - .../DBE_PLDEBUGGER.continue.md | 26 - .../DBE_PLDEBUGGER.delete_breakpoint.md | 17 - .../DBE_PLDEBUGGER.disable_breakpoint.md | 17 - .../DBE_PLDEBUGGER.enable_breakpoint.md | 17 - .../DBE_PLDEBUGGER.finish.md | 19 - .../DBE_PLDEBUGGER.info_breakpoints.md | 20 - .../DBE_PLDEBUGGER.info_code.md | 19 - .../DBE_PLDEBUGGER.info_locals.md | 21 - .../DBE_PLDEBUGGER.local_debug_server_info.md | 18 - .../DBE_PLDEBUGGER.next.md | 19 - .../DBE_PLDEBUGGER.print_var.md | 22 - .../DBE_PLDEBUGGER.set_var.md | 18 - .../DBE_PLDEBUGGER.step.md | 19 - .../DBE_PLDEBUGGER.turn_off.md | 24 - .../DBE_PLDEBUGGER.turn_on.md | 25 - .../DBE_PLDEVELOPER.gs_errors.md | 24 - .../DBE_PLDEVELOPER.gs_source.md | 36 - .../schema/DBE_PLDEVELOPER/DBE_PLDEVELOPER.md | 13 - .../DBE_SQL_UTIL-Schema.md | 17 - .../DBE_SQL_UTIL.create_abort_sql_patch.md | 22 - .../DBE_SQL_UTIL.create_hint_sql_patch.md | 22 - .../DBE_SQL_UTIL.disable_sql_patch.md | 19 - .../DBE_SQL_UTIL.drop_sql_patch.md | 19 - .../DBE_SQL_UTIL.enable_sql_patch.md | 19 - .../DBE_SQL_UTIL.show_sql_patch.md | 22 - .../INFORMATION_SCHEMA_CATALOG_NAME.md | 16 - .../_PG_FOREIGN_DATA_WRAPPERS.md | 22 - .../information-schema/_PG_FOREIGN_SERVERS.md | 24 - .../information-schema/_PG_FOREIGN_TABLES.md | 22 - .../_PG_FOREIGN_TABLE_COLUMNS.md | 19 - .../information-schema/_PG_USER_MAPPINGS.md | 22 - .../information-schema/information-schema.md | 25 - .../v5.2/reference-guide/schema/schema.md | 33 - .../reference-guide/sql-reference/alias.md | 54 - .../sql-reference/appendix/appendix.md | 12 - .../appendix/extended-functions.md | 36 - .../sql-reference/appendix/extended-syntax.md | 52 - .../gin-indexes/gin-indexes-introduction.md | 16 - .../appendix/gin-indexes/gin-indexes.md | 13 - .../gin-indexes/gin-tips-and-tricks.md | 26 - .../appendix/gin-indexes/implementation.md | 24 - .../appendix/gin-indexes/scalability.md | 50 - .../sql-reference/constant-and-macro.md | 24 - .../sql-reference/controlling-transactions.md | 29 - .../sql-reference/dcl-syntax-overview.md | 50 - .../sql-reference/ddl-syntax-overview.md | 225 -- .../sql-reference/dml-syntax-overview.md | 51 - .../expressions/array-expressions.md | 94 - .../expressions/condition-expressions.md | 226 -- .../sql-reference/expressions/expressions.md | 14 - .../expressions/row-expressions.md | 28 - .../expressions/simple-expressions.md | 147 -- .../expressions/subquery-expressions.md | 146 -- .../additional-features.md | 13 - .../gathering-document-statistics.md | 50 - .../manipulating-queries.md | 52 - .../manipulating-tsvector.md | 31 - .../additional-features/rewriting-queries.md | 68 - .../configuration-examples.md | 116 - .../controlling-text-search.md | 13 - .../highlighting-results.md | 66 - .../parsing-documents.md | 52 - .../parsing-queries.md | 74 - .../ranking-search-results.md | 108 - .../dictionaries/dictionaries-overview.md | 36 - .../dictionaries/dictionaries.md | 16 - .../dictionaries/ispell-dictionary.md | 49 - .../dictionaries/simple-dictionary.md | 64 - .../dictionaries/snowball-dictionary.md | 14 - .../dictionaries/stop-words.md | 33 - .../dictionaries/synonym-dictionary.md | 116 - .../dictionaries/thesaurus-dictionary.md | 92 - .../full-text-search/full-text-search.md | 18 - .../introduction/basic-text-matching.md | 56 - .../introduction/configurations.md | 21 - .../introduction/full-text-retrieval.md | 38 - .../full-text-search-introduction.md | 13 - .../introduction/what-is-a-document.md | 33 - .../full-text-search/limitations.md | 16 - .../sql-reference/full-text-search/parser.md | 108 - .../constraints-on-index-use.md | 44 - .../tables-and-indexes/creating-an-index.md | 65 - .../tables-and-indexes/searching-a-table.md | 89 - .../tables-and-indexes/tables-and-indexes.md | 12 - .../testing-a-configuration.md | 65 - .../testing-a-dictionary.md | 29 - .../testing-a-parser.md | 69 - .../testing-and-debugging-text-search.md | 12 - .../sql-reference/keywords/keywords-1.md | 381 ---- .../sql-reference/keywords/keywords-2.md | 394 ---- .../sql-reference/keywords/keywords.md | 12 - .../sql-reference/mogdb-sql.md | 37 - .../sql-reference/ordinary-table.md | 52 - .../sql-reference/partition-table.md | 1249 ---------- .../sql-reference-anonymous-block.md | 76 - .../sql-reference/sql-reference-contraints.md | 152 -- .../sql-reference/sql-reference-cursor.md | 349 --- .../sql-reference/sql-reference-index.md | 159 -- .../sql-reference/sql-reference-llvm.md | 90 - .../sql-reference/sql-reference-lock.md | 76 - .../sql-reference/sql-reference-trigger.md | 158 -- .../sql-reference/sql-reference.md | 34 - .../sql-reference/sub-query.md | 181 -- .../sql-reference/system-operation.md | 38 - .../transaction/sql-reference-transaction.md | 11 - .../transaction/transaction-control.md | 28 - .../transaction/transaction-management.md | 173 -- .../sql-reference/type-base-value.md | 83 - .../type-conversion/functions.md | 99 - .../type-conversion/operators.md | 70 - .../type-conversion-overview.md | 52 - .../type-conversion/type-conversion.md | 14 - .../union-case-and-related-constructs.md | 205 -- .../type-conversion/value-storage.md | 36 - .../v5.2/reference-guide/sql-syntax/ABORT.md | 76 - .../sql-syntax/ALTER-AGGREGATE.md | 76 - .../sql-syntax/ALTER-AUDIT-POLICY.md | 101 - .../sql-syntax/ALTER-DATA-SOURCE.md | 114 - .../sql-syntax/ALTER-DATABASE.md | 144 -- .../sql-syntax/ALTER-DEFAULT-PRIVILEGES.md | 170 -- .../sql-syntax/ALTER-DIRECTORY.md | 47 - .../sql-syntax/ALTER-EVENT-TRIGGER.md | 56 - .../reference-guide/sql-syntax/ALTER-EVENT.md | 84 - .../sql-syntax/ALTER-EXTENSION.md | 154 -- .../sql-syntax/ALTER-FOREIGN-DATA-WRAPPER.md | 66 - .../sql-syntax/ALTER-FOREIGN-TABLE.md | 118 - .../sql-syntax/ALTER-FUNCTION.md | 202 -- .../sql-syntax/ALTER-GLOBAL-CONFIGURATION.md | 33 - .../reference-guide/sql-syntax/ALTER-GROUP.md | 60 - .../reference-guide/sql-syntax/ALTER-INDEX.md | 160 -- .../sql-syntax/ALTER-LANGUAGE.md | 36 - .../sql-syntax/ALTER-LARGE-OBJECT.md | 41 - .../sql-syntax/ALTER-MASKING-POLICY.md | 140 -- .../sql-syntax/ALTER-MATERIALIZED-VIEW.md | 79 - .../sql-syntax/ALTER-OPERATOR.md | 59 - .../sql-syntax/ALTER-PACKAGE.md | 48 - .../sql-syntax/ALTER-PROCEDURE.md | 193 -- .../sql-syntax/ALTER-PUBLICATION.md | 84 - .../sql-syntax/ALTER-RESOURCE-LABEL.md | 70 - .../sql-syntax/ALTER-RESOURCE-POOL.md | 123 - .../reference-guide/sql-syntax/ALTER-ROLE.md | 139 -- .../ALTER-ROW-LEVEL-SECURITY-POLICY.md | 114 - .../reference-guide/sql-syntax/ALTER-RULE.md | 45 - .../sql-syntax/ALTER-SCHEMA.md | 91 - .../sql-syntax/ALTER-SEQUENCE.md | 86 - .../sql-syntax/ALTER-SERVER.md | 106 - .../sql-syntax/ALTER-SESSION.md | 90 - .../sql-syntax/ALTER-SUBSCRIPTION.md | 108 - .../sql-syntax/ALTER-SYNONYM.md | 62 - .../sql-syntax/ALTER-SYSTEM-KILL-SESSION.md | 55 - .../sql-syntax/ALTER-SYSTEM-SET.md | 79 - .../sql-syntax/ALTER-TABLE-PARTITION.md | 293 --- .../sql-syntax/ALTER-TABLE-SUBPARTITION.md | 153 -- .../reference-guide/sql-syntax/ALTER-TABLE.md | 499 ---- .../sql-syntax/ALTER-TABLESPACE.md | 127 -- .../ALTER-TEXT-SEARCH-CONFIGURATION.md | 180 -- .../ALTER-TEXT-SEARCH-DICTIONARY.md | 103 - .../sql-syntax/ALTER-TRIGGER.md | 52 - .../reference-guide/sql-syntax/ALTER-TYPE.md | 177 -- .../sql-syntax/ALTER-USER-MAPPING.md | 76 - .../reference-guide/sql-syntax/ALTER-USER.md | 122 - .../reference-guide/sql-syntax/ALTER-VIEW.md | 153 -- .../sql-syntax/ANALYZE-ANALYSE.md | 192 -- .../v5.2/reference-guide/sql-syntax/BEGIN.md | 62 - .../v5.2/reference-guide/sql-syntax/CALL.md | 84 - .../reference-guide/sql-syntax/CHECKPOINT.md | 36 - .../sql-syntax/CLEAN-CONNECTION.md | 76 - .../v5.2/reference-guide/sql-syntax/CLOSE.md | 45 - .../reference-guide/sql-syntax/CLUSTER.md | 122 - .../reference-guide/sql-syntax/COMMENT.md | 159 -- .../reference-guide/sql-syntax/COMMIT-END.md | 72 - .../sql-syntax/COMMIT-PREPARED.md | 49 - .../reference-guide/sql-syntax/CONNECT-BY.md | 207 -- .../v5.2/reference-guide/sql-syntax/COPY.md | 643 ------ .../sql-syntax/CREATE-AGGREGATE.md | 82 - .../sql-syntax/CREATE-AUDIT-POLICY.md | 108 - .../reference-guide/sql-syntax/CREATE-CAST.md | 86 - .../sql-syntax/CREATE-CLIENT-MASTER-KEY.md | 59 - .../CREATE-COLUMN-ENCRYPTION-KEY.md | 58 - .../sql-syntax/CREATE-DATA-SOURCE.md | 97 - .../sql-syntax/CREATE-DATABASE.md | 234 -- .../sql-syntax/CREATE-DIRECTORY.md | 58 - .../sql-syntax/CREATE-EVENT-TRIGGER.md | 104 - .../sql-syntax/CREATE-EVENT.md | 102 - .../sql-syntax/CREATE-EXTENSION.md | 59 - .../sql-syntax/CREATE-FOREIGN-DATA-WRAPPER.md | 52 - .../sql-syntax/CREATE-FOREIGN-TABLE.md | 181 -- .../sql-syntax/CREATE-FUNCTION.md | 373 --- .../sql-syntax/CREATE-GROUP.md | 66 - .../CREATE-INCREMENTAL-MATERIALIZED-VIEW.md | 71 - .../sql-syntax/CREATE-INDEX.md | 427 ---- .../sql-syntax/CREATE-LANGUAGE.md | 71 - .../sql-syntax/CREATE-MASKING-POLICY.md | 111 - .../sql-syntax/CREATE-MATERIALIZED-VIEW.md | 77 - .../sql-syntax/CREATE-MODEL.md | 85 - .../sql-syntax/CREATE-OPERATOR.md | 117 - .../sql-syntax/CREATE-PACKAGE.md | 110 - .../sql-syntax/CREATE-PROCEDURE.md | 121 - .../sql-syntax/CREATE-PUBLICATION.md | 73 - .../sql-syntax/CREATE-RESOURCE-LABEL.md | 85 - .../sql-syntax/CREATE-RESOURCE-POOL.md | 152 -- .../reference-guide/sql-syntax/CREATE-ROLE.md | 297 --- .../CREATE-ROW-LEVEL-SECURITY-POLICY.md | 175 -- .../reference-guide/sql-syntax/CREATE-RULE.md | 78 - .../sql-syntax/CREATE-SCHEMA.md | 89 - .../sql-syntax/CREATE-SEQUENCE.md | 149 -- .../sql-syntax/CREATE-SERVER.md | 128 -- .../sql-syntax/CREATE-SUBSCRIPTION.md | 119 - .../sql-syntax/CREATE-SYNONYM.md | 114 - .../sql-syntax/CREATE-TABLE-AS.md | 161 -- .../sql-syntax/CREATE-TABLE-PARTITION.md | 1006 --------- .../sql-syntax/CREATE-TABLE-SUBPARTITION.md | 1083 --------- .../sql-syntax/CREATE-TABLE.md | 1069 --------- .../sql-syntax/CREATE-TABLESPACE.md | 130 -- .../CREATE-TEXT-SEARCH-CONFIGURATION.md | 96 - .../CREATE-TEXT-SEARCH-DICTIONARY.md | 152 -- .../sql-syntax/CREATE-TRIGGER.md | 371 --- .../reference-guide/sql-syntax/CREATE-TYPE.md | 298 --- .../sql-syntax/CREATE-USER-MAPPING.md | 87 - .../reference-guide/sql-syntax/CREATE-USER.md | 127 -- .../reference-guide/sql-syntax/CREATE-VIEW.md | 85 - .../CREATE-WEAK-PASSWORD-DICTIONARY.md | 54 - .../v5.2/reference-guide/sql-syntax/CURSOR.md | 78 - .../reference-guide/sql-syntax/DEALLOCATE.md | 38 - .../reference-guide/sql-syntax/DECLARE.md | 102 - .../v5.2/reference-guide/sql-syntax/DELETE.md | 165 -- .../reference-guide/sql-syntax/DELIMITER.md | 58 - .../v5.2/reference-guide/sql-syntax/DO.md | 56 - .../sql-syntax/DROP-AGGREGATE.md | 56 - .../sql-syntax/DROP-AUDIT-POLICY.md | 89 - .../reference-guide/sql-syntax/DROP-CAST.md | 56 - .../sql-syntax/DROP-CLIENT-MASTER-KEY.md | 50 - .../sql-syntax/DROP-COLUMN-ENCRYPTION-KEY.md | 43 - .../sql-syntax/DROP-DATA-SOURCE.md | 57 - .../sql-syntax/DROP-DATABASE.md | 48 - .../sql-syntax/DROP-DIRECTORY.md | 44 - .../sql-syntax/DROP-EVENT-TRIGGER.md | 47 - .../reference-guide/sql-syntax/DROP-EVENT.md | 38 - .../sql-syntax/DROP-EXTENSION.md | 51 - .../sql-syntax/DROP-FOREIGN-DATA-WRAPPER.md | 41 - .../sql-syntax/DROP-FOREIGN-TABLE.md | 44 - .../sql-syntax/DROP-FUNCTION.md | 82 - .../sql-syntax/DROP-GLOBAL-CONFIGURATION.md | 27 - .../reference-guide/sql-syntax/DROP-GROUP.md | 32 - .../reference-guide/sql-syntax/DROP-INDEX.md | 56 - .../sql-syntax/DROP-LANGUAGE.md | 48 - .../sql-syntax/DROP-MASKING-POLICY.md | 44 - .../sql-syntax/DROP-MATERIALIZED-VIEW.md | 48 - .../reference-guide/sql-syntax/DROP-MODEL.md | 34 - .../sql-syntax/DROP-OPERATOR.md | 10 - .../reference-guide/sql-syntax/DROP-OWNED.md | 40 - .../sql-syntax/DROP-PACKAGE.md | 23 - .../sql-syntax/DROP-PROCEDURE.md | 54 - .../sql-syntax/DROP-PUBLICATION.md | 44 - .../sql-syntax/DROP-RESOURCE-LABEL.md | 44 - .../sql-syntax/DROP-RESOURCE-POOL.md | 44 - .../reference-guide/sql-syntax/DROP-ROLE.md | 42 - .../DROP-ROW-LEVEL-SECURITY-POLICY.md | 57 - .../reference-guide/sql-syntax/DROP-RULE.md | 47 - .../reference-guide/sql-syntax/DROP-SCHEMA.md | 51 - .../sql-syntax/DROP-SEQUENCE.md | 55 - .../reference-guide/sql-syntax/DROP-SERVER.md | 41 - .../sql-syntax/DROP-SUBSCRIPTION.md | 41 - .../sql-syntax/DROP-SYNONYM.md | 45 - .../reference-guide/sql-syntax/DROP-TABLE.md | 55 - .../sql-syntax/DROP-TABLESPACE.md | 46 - .../DROP-TEXT-SEARCH-CONFIGURATION.md | 48 - .../sql-syntax/DROP-TEXT-SEARCH-DICTIONARY.md | 57 - .../sql-syntax/DROP-TRIGGER.md | 53 - .../reference-guide/sql-syntax/DROP-TYPE.md | 48 - .../sql-syntax/DROP-USER-MAPPING.md | 38 - .../reference-guide/sql-syntax/DROP-USER.md | 59 - .../reference-guide/sql-syntax/DROP-VIEW.md | 47 - .../DROP-WEAK-PASSWORD-DICTIONARY.md | 34 - .../sql-syntax/EXECUTE-DIRECT.md | 83 - .../reference-guide/sql-syntax/EXECUTE.md | 59 - .../sql-syntax/EXPLAIN-PLAN.md | 86 - .../reference-guide/sql-syntax/EXPLAIN.md | 299 --- .../v5.2/reference-guide/sql-syntax/FETCH.md | 218 -- .../v5.2/reference-guide/sql-syntax/GRANT.md | 647 ------ .../v5.2/reference-guide/sql-syntax/INSERT.md | 180 -- .../v5.2/reference-guide/sql-syntax/LOCK.md | 136 -- .../reference-guide/sql-syntax/MERGE-INTO.md | 159 -- .../v5.2/reference-guide/sql-syntax/MOVE.md | 81 - .../reference-guide/sql-syntax/PREDICT-BY.md | 47 - .../sql-syntax/PREPARE-TRANSACTION.md | 43 - .../reference-guide/sql-syntax/PREPARE.md | 46 - .../v5.2/reference-guide/sql-syntax/PURGE.md | 116 - .../sql-syntax/REASSIGN-OWNED.md | 39 - .../REFRESH-INCREMENTAL-MATERIALIZED-VIEW.md | 46 - .../sql-syntax/REFRESH-MATERIALIZED-VIEW.md | 50 - .../reference-guide/sql-syntax/REINDEX.md | 161 -- .../sql-syntax/RELEASE-SAVEPOINT.md | 69 - .../v5.2/reference-guide/sql-syntax/RESET.md | 68 - .../v5.2/reference-guide/sql-syntax/REVOKE.md | 270 --- .../sql-syntax/ROLLBACK-PREPARED.md | 34 - .../sql-syntax/ROLLBACK-TO-SAVEPOINT.md | 59 - .../reference-guide/sql-syntax/ROLLBACK.md | 44 - .../reference-guide/sql-syntax/SAVEPOINT.md | 98 - .../reference-guide/sql-syntax/SELECT-INTO.md | 87 - .../v5.2/reference-guide/sql-syntax/SELECT.md | 655 ------ .../sql-syntax/SET-CONSTRAINTS.md | 59 - .../reference-guide/sql-syntax/SET-ROLE.md | 74 - .../sql-syntax/SET-SESSION-AUTHORIZATION.md | 78 - .../sql-syntax/SET-TRANSACTION.md | 63 - .../v5.2/reference-guide/sql-syntax/SET.md | 164 -- .../reference-guide/sql-syntax/SHOW-EVENTS.md | 43 - .../v5.2/reference-guide/sql-syntax/SHOW.md | 51 - .../v5.2/reference-guide/sql-syntax/SHRINK.md | 58 - .../reference-guide/sql-syntax/SHUTDOWN.md | 50 - .../reference-guide/sql-syntax/SNAPSHOT.md | 110 - .../sql-syntax/START-TRANSACTION.md | 88 - .../sql-syntax/TIMECAPSULE-TABLE.md | 122 - .../reference-guide/sql-syntax/TRUNCATE.md | 130 -- .../v5.2/reference-guide/sql-syntax/UPDATE.md | 201 -- .../v5.2/reference-guide/sql-syntax/VACUUM.md | 120 - .../v5.2/reference-guide/sql-syntax/VALUES.md | 67 - .../reference-guide/sql-syntax/sql-syntax.md | 225 -- .../supported-data-types/HLL.md | 207 -- .../supported-data-types/binary-data-types.md | 54 - .../supported-data-types/bit-string-types.md | 50 - .../boolean-data-types.md | 59 - .../character-data-types.md | 92 - .../data-type-used-by-the-ledger-database.md | 31 - ...-types-supported-by-column-store-tables.md | 233 -- .../supported-data-types/date-time-types.md | 323 --- .../supported-data-types/geometric.md | 118 - .../supported-data-types/json-types.md | 146 -- .../supported-data-types/monetary.md | 34 - .../supported-data-types/network-address.md | 67 - .../numeric-data-types.md | 245 -- .../object-identifier-types.md | 90 - .../supported-data-types/pseudo-types.md | 65 - .../supported-data-types/range.md | 163 -- .../supported-data-types/set-type.md | 86 - .../supported-data-types.md | 31 - .../supported-data-types/text-search-types.md | 168 -- .../supported-data-types/uuid-type.md | 27 - .../supported-data-types/xml-type.md | 69 - ...iew-of-system-catalogs-and-system-views.md | 18 - .../system-catalogs-and-system-views.md | 15 - .../system-catalogs/GS_ASP.md | 43 - .../system-catalogs/GS_AUDITING_POLICY.md | 20 - .../GS_AUDITING_POLICY_ACCESS.md | 20 - .../GS_AUDITING_POLICY_FILTERS.md | 21 - .../GS_AUDITING_POLICY_PRIVILEGES.md | 20 - .../system-catalogs/GS_CLIENT_GLOBAL_KEYS.md | 21 - .../GS_CLIENT_GLOBAL_KEYS_ARGS.md | 20 - .../system-catalogs/GS_COLUMN_KEYS.md | 23 - .../system-catalogs/GS_COLUMN_KEYS_ARGS.md | 20 - .../system-catalogs/GS_DB_PRIVILEGE.md | 19 - .../system-catalogs/GS_ENCRYPTED_COLUMNS.md | 23 - .../system-catalogs/GS_ENCRYPTED_PROC.md | 20 - .../system-catalogs/GS_GLOBAL_CHAIN.md | 25 - .../system-catalogs/GS_GLOBAL_CONFIG.md | 17 - .../system-catalogs/GS_MASKING_POLICY.md | 20 - .../GS_MASKING_POLICY_ACTIONS.md | 21 - .../GS_MASKING_POLICY_FILTERS.md | 21 - .../system-catalogs/GS_MATVIEW.md | 21 - .../system-catalogs/GS_MATVIEW_DEPENDENCY.md | 20 - .../system-catalogs/GS_MODEL_WAREHOUSE.md | 38 - .../system-catalogs/GS_OPT_MODEL.md | 33 - .../system-catalogs/GS_PACKAGE.md | 24 - .../system-catalogs/GS_POLICY_LABEL.md | 24 - .../system-catalogs/GS_RECYCLEBIN.md | 35 - .../system-catalogs/GS_TXN_SNAPSHOT.md | 19 - .../system-catalogs/GS_UID.md | 17 - .../GS_WLM_EC_OPERATOR_INFO.md | 29 - .../GS_WLM_INSTANCE_HISTORY.md | 30 - .../system-catalogs/GS_WLM_OPERATOR_INFO.md | 37 - .../GS_WLM_PLAN_ENCODING_TABLE.md | 23 - .../GS_WLM_PLAN_OPERATOR_INFO.md | 32 - .../GS_WLM_SESSION_QUERY_INFO_ALL.md | 102 - .../GS_WLM_USER_RESOURCE_HISTORY.md | 33 - .../system-catalogs/PGXC_CLASS.md | 25 - .../system-catalogs/PGXC_GROUP.md | 24 - .../system-catalogs/PGXC_NODE.md | 32 - .../system-catalogs/PGXC_SLICE.md | 30 - .../system-catalogs/PG_AGGREGATE.md | 25 - .../system-catalogs/PG_AM.md | 47 - .../system-catalogs/PG_AMOP.md | 28 - .../system-catalogs/PG_AMPROC.md | 23 - .../PG_APP_WORKLOADGROUP_MAPPING.md | 18 - .../system-catalogs/PG_ATTRDEF.md | 23 - .../system-catalogs/PG_ATTRIBUTE.md | 39 - .../system-catalogs/PG_AUTHID.md | 45 - .../system-catalogs/PG_AUTH_HISTORY.md | 19 - .../system-catalogs/PG_AUTH_MEMBERS.md | 19 - .../system-catalogs/PG_CAST.md | 22 - .../system-catalogs/PG_CLASS.md | 78 - .../system-catalogs/PG_COLLATION.md | 22 - .../system-catalogs/PG_CONSTRAINT.md | 48 - .../system-catalogs/PG_CONVERSION.md | 23 - .../system-catalogs/PG_DATABASE.md | 31 - .../system-catalogs/PG_DB_ROLE_SETTING.md | 18 - .../system-catalogs/PG_DEFAULT_ACL.md | 20 - .../system-catalogs/PG_DEPEND.md | 32 - .../system-catalogs/PG_DESCRIPTION.md | 21 - .../system-catalogs/PG_DIRECTORY.md | 20 - .../system-catalogs/PG_ENUM.md | 23 - .../system-catalogs/PG_EVENT_TRIGGER.md | 21 - .../system-catalogs/PG_EXTENSION.md | 23 - .../PG_EXTENSION_DATA_SOURCE.md | 22 - .../PG_FOREIGN_DATA_WRAPPER.md | 22 - .../system-catalogs/PG_FOREIGN_SERVER.md | 23 - .../system-catalogs/PG_FOREIGN_TABLE.md | 19 - .../system-catalogs/PG_HASHBUCKET.md | 21 - .../system-catalogs/PG_INDEX.md | 35 - .../system-catalogs/PG_INHERITS.md | 18 - .../system-catalogs/PG_JOB.md | 36 - .../system-catalogs/PG_JOB_PROC.md | 19 - .../system-catalogs/PG_LANGUAGE.md | 24 - .../system-catalogs/PG_LARGEOBJECT.md | 22 - .../PG_LARGEOBJECT_METADATA.md | 18 - .../system-catalogs/PG_NAMESPACE.md | 22 - .../system-catalogs/PG_OBJECT.md | 30 - .../system-catalogs/PG_OPCLASS.md | 28 - .../system-catalogs/PG_OPERATOR.md | 30 - .../system-catalogs/PG_OPFAMILY.md | 24 - .../system-catalogs/PG_PARTITION.md | 44 - .../system-catalogs/PG_PLTEMPLATE.md | 23 - .../system-catalogs/PG_PROC.md | 55 - .../system-catalogs/PG_PUBLICATION.md | 21 - .../system-catalogs/PG_PUBLICATION_REL.md | 18 - .../system-catalogs/PG_RANGE.md | 23 - .../system-catalogs/PG_REPLICATION_ORIGIN.md | 17 - .../system-catalogs/PG_RESOURCE_POOL.md | 31 - .../system-catalogs/PG_REWRITE.md | 24 - .../system-catalogs/PG_RLSPOLICY.md | 22 - .../system-catalogs/PG_SECLABEL.md | 22 - .../system-catalogs/PG_SET.md | 19 - .../system-catalogs/PG_SHDEPEND.md | 45 - .../system-catalogs/PG_SHDESCRIPTION.md | 22 - .../system-catalogs/PG_SHSECLABEL.md | 25 - .../system-catalogs/PG_STATISTIC.md | 30 - .../system-catalogs/PG_STATISTIC_EXT.md | 30 - .../system-catalogs/PG_SUBSCRIPTION.md | 26 - .../system-catalogs/PG_SUBSCRIPTION_REL.md | 22 - .../system-catalogs/PG_SYNONYM.md | 21 - .../system-catalogs/PG_TABLESPACE.md | 22 - .../system-catalogs/PG_TRIGGER.md | 35 - .../system-catalogs/PG_TS_CONFIG.md | 23 - .../system-catalogs/PG_TS_CONFIG_MAP.md | 19 - .../system-catalogs/PG_TS_DICT.md | 23 - .../system-catalogs/PG_TS_PARSER.md | 23 - .../system-catalogs/PG_TS_TEMPLATE.md | 20 - .../system-catalogs/PG_TYPE.md | 46 - .../system-catalogs/PG_USER_MAPPING.md | 21 - .../system-catalogs/PG_USER_STATUS.md | 23 - .../system-catalogs/PG_WORKLOAD_GROUP.md | 19 - .../system-catalogs/PLAN_TABLE_DATA.md | 32 - .../system-catalogs/STATEMENT_HISTORY.md | 73 - .../system-catalogs/system-catalogs.md | 120 - .../system-views/GET_GLOBAL_PREPARED_XACTS.md | 10 - .../GS_ASYNC_SUBMIT_SESSIONS_STATUS.md | 25 - .../system-views/GS_AUDITING.md | 22 - .../system-views/GS_AUDITING_ACCESS.md | 22 - .../system-views/GS_AUDITING_PRIVILEGE.md | 22 - .../system-views/GS_CLUSTER_RESOURCE_INFO.md | 22 - .../system-views/GS_COMPRESSION.md | 24 - .../system-views/GS_DB_PRIVILEGES.md | 18 - .../system-views/GS_FILE_STAT.md | 28 - .../system-views/GS_GSC_MEMORY_DETAIL.md | 19 - .../system-views/GS_INSTANCE_TIME.md | 29 - .../system-views/GS_LABELS.md | 19 - .../system-views/GS_LSC_MEMORY_DETAIL.md | 24 - .../system-views/GS_MASKING.md | 19 - .../system-views/GS_MATVIEWS.md | 21 - .../system-views/GS_OS_RUN_INFO.md | 20 - .../system-views/GS_REDO_STAT.md | 22 - .../system-views/GS_SESSION_CPU_STATISTICS.md | 25 - .../system-views/GS_SESSION_MEMORY.md | 19 - .../system-views/GS_SESSION_MEMORY_CONTEXT.md | 25 - .../system-views/GS_SESSION_MEMORY_DETAIL.md | 27 - .../GS_SESSION_MEMORY_STATISTICS.md | 25 - .../system-views/GS_SESSION_STAT.md | 20 - .../system-views/GS_SESSION_TIME.md | 19 - .../system-views/GS_SQL_COUNT.md | 45 - .../system-views/GS_STAT_SESSION_CU.md | 18 - .../system-views/GS_THREAD_MEMORY_CONTEXT.md | 26 - .../system-views/GS_TOTAL_MEMORY_DETAIL.md | 18 - .../system-views/GS_WLM_CGROUP_INFO.md | 24 - .../GS_WLM_EC_OPERATOR_STATISTICS.md | 25 - .../system-views/GS_WLM_OPERATOR_HISTORY.md | 12 - .../GS_WLM_OPERATOR_STATISTICS.md | 38 - .../GS_WLM_PLAN_OPERATOR_HISTORY.md | 35 - .../GS_WLM_REBUILD_USER_RESOURCE_POOL.md | 16 - .../system-views/GS_WLM_RESOURCE_POOL.md | 25 - .../system-views/GS_WLM_SESSION_HISTORY.md | 83 - .../system-views/GS_WLM_SESSION_INFO.md | 12 - .../system-views/GS_WLM_SESSION_INFO_ALL.md | 102 - .../system-views/GS_WLM_SESSION_STATISTICS.md | 64 - .../system-views/GS_WLM_USER_INFO.md | 25 - .../system-views/MPP_TABLES.md | 21 - .../system-views/PGXC_PREPARED_XACTS.md | 16 - .../system-views/PG_AVAILABLE_EXTENSIONS.md | 19 - .../PG_AVAILABLE_EXTENSION_VERSIONS.md | 23 - .../system-views/PG_COMM_DELAY.md | 22 - .../system-views/PG_COMM_RECV_STREAM.md | 32 - .../system-views/PG_COMM_SEND_STREAM.md | 32 - .../system-views/PG_COMM_STATUS.md | 28 - .../system-views/PG_CONTROL_GROUP_CONFIG.md | 16 - .../system-views/PG_CURSORS.md | 21 - .../system-views/PG_EXT_STATS.md | 28 - .../system-views/PG_GET_INVALID_BACKENDS.md | 20 - .../PG_GET_SENDERS_CATCHUP_TIME.md | 23 - .../system-views/PG_GROUP.md | 18 - .../system-views/PG_GTT_ATTACHED_PIDS.md | 19 - .../system-views/PG_GTT_RELSTATS.md | 23 - .../system-views/PG_GTT_STATS.md | 29 - .../system-views/PG_INDEXES.md | 20 - .../system-views/PG_LOCKS.md | 34 - .../system-views/PG_NODE_ENV.md | 22 - .../system-views/PG_OS_THREADS.md | 20 - .../system-views/PG_PREPARED_STATEMENTS.md | 20 - .../system-views/PG_PREPARED_XACTS.md | 20 - .../system-views/PG_PUBLICATION_TABLES.md | 18 - .../PG_REPLICATION_ORIGIN_STATUS.md | 19 - .../system-views/PG_REPLICATION_SLOTS.md | 26 - .../system-views/PG_RLSPOLICIES.md | 22 - .../system-views/PG_ROLES.md | 39 - .../system-views/PG_RULES.md | 19 - .../system-views/PG_RUNNING_XACTS.md | 25 - .../system-views/PG_SECLABELS.md | 23 - .../system-views/PG_SESSION_IOSTAT.md | 27 - .../system-views/PG_SESSION_WLMSTAT.md | 39 - .../system-views/PG_SETTINGS.md | 31 - .../system-views/PG_SHADOW.md | 35 - .../system-views/PG_STATIO_ALL_INDEXES.md | 22 - .../system-views/PG_STATIO_ALL_SEQUENCES.md | 20 - .../system-views/PG_STATIO_ALL_TABLES.md | 26 - .../system-views/PG_STATIO_SYS_INDEXES.md | 22 - .../system-views/PG_STATIO_SYS_SEQUENCES.md | 20 - .../system-views/PG_STATIO_SYS_TABLES.md | 26 - .../system-views/PG_STATIO_USER_INDEXES.md | 22 - .../system-views/PG_STATIO_USER_SEQUENCES.md | 20 - .../system-views/PG_STATIO_USER_TABLES.md | 26 - .../system-views/PG_STATS.md | 30 - .../system-views/PG_STAT_ACTIVITY.md | 38 - .../system-views/PG_STAT_ACTIVITY_NG.md | 36 - .../system-views/PG_STAT_ALL_INDEXES.md | 25 - .../system-views/PG_STAT_ALL_TABLES.md | 37 - .../system-views/PG_STAT_BAD_BLOCK.md | 24 - .../system-views/PG_STAT_BGWRITER.md | 26 - .../system-views/PG_STAT_DATABASE.md | 34 - .../PG_STAT_DATABASE_CONFLICTS.md | 22 - .../system-views/PG_STAT_REPLICATION.md | 30 - .../system-views/PG_STAT_SUBSCRIPTION.md | 23 - .../system-views/PG_STAT_SYS_INDEXES.md | 23 - .../system-views/PG_STAT_SYS_TABLES.md | 37 - .../system-views/PG_STAT_USER_FUNCTIONS.md | 21 - .../system-views/PG_STAT_USER_INDEXES.md | 23 - .../system-views/PG_STAT_USER_TABLES.md | 37 - .../system-views/PG_STAT_XACT_ALL_TABLES.md | 26 - .../system-views/PG_STAT_XACT_SYS_TABLES.md | 26 - .../PG_STAT_XACT_USER_FUNCTIONS.md | 21 - .../system-views/PG_STAT_XACT_USER_TABLES.md | 26 - .../system-views/PG_TABLES.md | 25 - .../system-views/PG_TDE_INFO.md | 18 - .../system-views/PG_THREAD_WAIT_STATUS.md | 290 --- .../system-views/PG_TIMEZONE_ABBREVS.md | 18 - .../system-views/PG_TIMEZONE_NAMES.md | 19 - .../system-views/PG_TOTAL_MEMORY_DETAIL.md | 18 - .../PG_TOTAL_USER_RESOURCE_INFO.md | 32 - .../PG_TOTAL_USER_RESOURCE_INFO_OID.md | 32 - .../system-views/PG_USER.md | 34 - .../system-views/PG_USER_MAPPINGS.md | 23 - .../system-views/PG_VARIABLE_INFO.md | 26 - .../system-views/PG_VIEWS.md | 19 - .../system-views/PG_WLM_STATISTICS.md | 24 - .../system-views/PLAN_TABLE.md | 31 - .../system-views/system-views.md | 127 -- .../viewing-system-catalogs.md | 132 -- .../reference-guide/tool-reference/FAQ.md | 134 -- .../tool-reference/client-tool/client-tool.md | 12 - .../client-tool/gsql/client-tool-gsql.md | 17 - .../client-tool/gsql/command-reference.md | 68 - .../client-tool/gsql/gsql-faq.md | 222 -- .../client-tool/gsql/gsql-introduction.md | 142 -- .../gsql/meta-command-reference.md | 214 -- .../gsql/obtaining-help-information.md | 77 - .../client-tool/gsql/usage-guidelines.md | 201 -- .../functions-of-mogdb-executable-scripts.md | 54 - .../tool-reference/server-tools/gs_cgroup.md | 421 ---- .../tool-reference/server-tools/gs_check.md | 1372 ----------- .../tool-reference/server-tools/gs_checkos.md | 228 -- .../server-tools/gs_checkperf.md | 293 --- .../server-tools/gs_collector.md | 230 -- .../tool-reference/server-tools/gs_dump.md | 578 ----- .../tool-reference/server-tools/gs_dumpall.md | 291 --- .../tool-reference/server-tools/gs_encrypt.md | 135 -- .../tool-reference/server-tools/gs_guc.md | 391 ---- .../tool-reference/server-tools/gs_om.md | 421 ---- .../server-tools/gs_plan_simulator.md | 117 - .../tool-reference/server-tools/gs_restore.md | 393 ---- .../tool-reference/server-tools/gs_sdr.md | 425 ---- .../tool-reference/server-tools/gs_ssh.md | 76 - .../tool-reference/server-tools/gs_watch.md | 52 - .../server-tools/server-tools.md | 26 - ...ogs-and-views-supported-by-gs_collector.md | 123 - .../tool-reference/tool-overview.md | 35 - .../tool-reference/tool-reference.md | 18 - .../dsscmd.md | 401 ---- .../dssserver.md | 296 --- .../gs_backup.md | 147 -- .../gs_basebackup.md | 166 -- .../gs_ctl.md | 133 -- .../gs_dropnode.md | 131 -- .../gs_expansion.md | 178 -- .../gs_initdb.md | 121 - .../gs_install.md | 127 -- .../gs_postuninstall.md | 121 - .../gs_preinstall.md | 298 --- .../gs_probackup.md | 816 ------- .../gs_sshexkey.md | 132 -- .../gs_tar.md | 49 - .../gs_uninstall.md | 80 - .../gs_upgradectl.md | 337 --- .../gstrace.md | 98 - .../kadmin-local.md | 16 - .../kdb5_util.md | 16 - .../kdestroy.md | 16 - .../kinit.md | 16 - .../klist.md | 16 - .../krb5kdc.md | 16 - .../mogdb.md | 176 -- .../pg_archivecleanup.md | 58 - .../pg_config.md | 118 - .../pg_controldata.md | 50 - .../pg_recvlogical.md | 157 -- .../pg_resetxlog.md | 70 - .../tools-used-in-the-internal-system/pscp.md | 72 - .../tools-used-in-the-internal-system/pssh.md | 68 - .../tools-used-in-the-internal-system.md | 43 - .../transfer.py.md | 60 - .../v5.2/security-guide/security-guide.md | 10 - .../1-client-access-authentication.md | 769 ------- .../2-managing-users-and-their-permissions.md | 1032 --------- .../security/3-configuring-database-audit.md | 372 --- .../4-setting-encrypted-equality-query.md | 332 --- .../security/5-setting-a-ledger-database.md | 397 ---- .../security/6-transparent-data-encryption.md | 88 - .../security/database-security-management.md | 15 - .../en/docs-mogdb/v5.2/source-code-parsing.md | 98 - product/en/docs-mogdb/v5.2/toc.md | 1894 ---------------- product/en/docs-mogdb/v5.2/toc_about.md | 17 - product/en/docs-mogdb/v5.2/toc_ai-features.md | 89 - .../v5.2/toc_characteristic_description.md | 154 -- .../toc_common-faults-and-identification.md | 48 - .../v5.2/toc_communication-matrix.md | 5 - .../docs-mogdb/v5.2/toc_datatypes-and-sql.md | 313 --- product/en/docs-mogdb/v5.2/toc_dev.md | 319 --- product/en/docs-mogdb/v5.2/toc_error.md | 102 - .../v5.2/toc_extension-referecne.md | 24 - product/en/docs-mogdb/v5.2/toc_faqs.md | 12 - product/en/docs-mogdb/v5.2/toc_glossary.md | 5 - .../en/docs-mogdb/v5.2/toc_high_available.md | 17 - product/en/docs-mogdb/v5.2/toc_install.md | 13 - product/en/docs-mogdb/v5.2/toc_manage.md | 83 - .../v5.2/toc_parameters-and-tools.md | 393 ---- product/en/docs-mogdb/v5.2/toc_performance.md | 33 - product/en/docs-mogdb/v5.2/toc_quickstart.md | 29 - product/en/docs-mogdb/v5.2/toc_secure.md | 12 - .../v5.2/toc_system-catalogs-and-functions.md | 311 --- product/en/docs-mogdb/v5.2/toc_upgrade.md | 10 - .../v5.2/upgrade-guide/1-upgrade-overview.md | 29 - .../upgrade-guide/2-read-before-upgrade.md | 72 - .../v5.2/upgrade-guide/3-in-place-upgrade.md | 132 -- .../v5.2/upgrade-guide/4-rolling-upgrade.md | 114 - .../v5.2/upgrade-guide/upgrade-guide.md | 13 - .../docs-mogdb/v5.2/AI-features/ai-feature.md | 20 - .../ai4db/abo-optimizer/abo-optimizer.md | 12 - .../adaptive-plan-selection-best-practices.md | 31 - .../adaptive-plan-selection-overview.md | 10 - .../adaptive-plan-selection-prerequisites.md | 10 - ...adaptive-plan-selection-troubleshooting.md | 10 - .../adaptive-plan-selection-usage-guide.md | 22 - .../ai4db-adaptive-plan-selection.md | 18 - ...i4db-intelligent-cardinality-estimation.md | 18 - ...t-cardinality-estimation-best-practices.md | 67 - ...lligent-cardinality-estimation-overview.md | 10 - ...nt-cardinality-estimation-prerequisites.md | 10 - ...-cardinality-estimation-troubleshooting.md | 10 - ...gent-cardinality-estimation-usage-guide.md | 15 - .../ai-sub-functions-of-the-dbmind.md | 19 - .../anomaly-analysis-command-reference.md | 70 - ...lysis-multi-metric-correlation-analysis.md | 18 - ...aly-analysis-obtaining-help-information.md | 39 - .../anomaly-analysis-overview.md | 10 - .../anomaly-analysis-troubleshooting.md | 10 - .../anomaly-analysis-usage-guide.md | 26 - .../anomaly-analysis/anomaly-analysis.md | 14 - .../anomaly-detection-command-reference.md | 21 - ...ly-detection-obtaining-help-information.md | 41 - .../anomaly-detection-overview.md | 10 - .../anomaly-detection-troubleshooting.md | 11 - .../anomaly-detection-usage-guide.md | 54 - .../anomaly-detection/anomaly-detection.md | 14 - ...cast-trend-prediction-command-reference.md | 26 - ...trend-prediction-environment-deployment.md | 10 - ...d-prediction-obtaining-help-information.md | 57 - .../forcast-trend-prediction-overview.md | 10 - ...orcast-trend-prediction-troubleshooting.md | 11 - .../forcast-trend-prediction-usage-guide.md | 40 - .../forcast-trend-prediction.md | 15 - .../index-advisor-index-recommendation.md | 14 - .../single-query-index-recommendation.md | 71 - .../virtual-index.md | 125 - .../workload-level-index-recommendation.md | 136 -- .../slow-sql-statements-command-reference.md | 20 - ...w-sql-statements-environment-deployment.md | 11 - ...l-statements-obtaining-help-information.md | 41 - .../slow-sql-statements-overview.md | 10 - .../slow-sql-statements-troubleshooting.md | 15 - .../slow-sql-statements-usage-guide.md | 34 - .../slow-sql-statements.md | 15 - ...l-statement-rewriting-command-reference.md | 19 - ...nt-rewriting-obtaining-help-information.md | 36 - ...writer-sql-statement-rewriting-overview.md | 19 - ...sql-statement-rewriting-troubleshooting.md | 10 - ...ter-sql-statement-rewriting-usage-guide.md | 42 - .../sql-rewriter-sql-statement-rewriting.md | 14 - ...ag-slow-sql-discovery-command-reference.md | 17 - ...ql-discovery-obtaining-help-information.md | 49 - .../sqldiag-slow-sql-discovery-overview.md | 18 - ...diag-slow-sql-discovery-troubleshooting.md | 11 - .../sqldiag-slow-sql-discovery-usage-guide.md | 96 - .../sqldiag-slow-sql-discovery.md | 22 - .../x-tuner-command-reference.md | 50 - .../x-tuner-examples.md | 162 -- .../x-tuner-obtaining-help-information.md | 48 - .../x-tuner-overview.md | 10 - ...er-parameter-optimization-and-diagnosis.md | 15 - .../x-tuner-preparations.md | 186 -- .../x-tuner-troubleshooting.md | 13 - .../ai4db/ai4db-autonomous-database-o&m.md | 34 - .../components-that-support-dbmind.md | 12 - .../prometheus-exporter-command-reference.md | 192 -- ...metheus-exporter-environment-deployment.md | 173 -- ...eus-exporter-obtaining-help-information.md | 16 - .../prometheus-exporter-overview.md | 12 - .../prometheus-exporter-troubleshooting.md | 16 - .../prometheus-exporter-usage-guide.md | 72 - .../prometheus-exporter.md | 15 - .../ai4db/dbmind-mode/component.md | 37 - .../ai4db/dbmind-mode/dbmind-mode.md | 51 - .../AI-features/ai4db/dbmind-mode/service.md | 207 -- .../v5.2/AI-features/ai4db/dbmind-mode/set.md | 63 - .../v5.2/AI-features/db4ai/db4ai.md | 13 - ...query-for-model-training-and-prediction.md | 535 ----- ...i-snapshots-for-data-version-management.md | 266 --- .../db4ai/full-process-ai/full-process-ai.md | 16 - .../full-process-ai/plpython-fenced-mode.md | 135 -- .../AI-features/db4ai/native-db4ai-engine.md | 317 --- product/zh/docs-mogdb/v5.2/_index.md | 90 - .../MogDB-compared-to-openGauss.md | 134 -- .../v5.2/about-mogdb/about-mogdb.md | 15 - .../mogdb-new-feature/release-note.md | 12 - .../2-docker-based-mogdb.md | 37 - .../open-source-components/DBMS-RANDOM.md | 532 ----- .../open-source-components/compat-tools.md | 18 - .../open-source-components/mog_filedump.md | 106 - .../open-source-components/mog_xlogdump.md | 305 --- .../open-source-components/mogdb-monitor.md | 27 - .../open-source-components.md | 14 - .../v5.2/about-mogdb/terms-of-use.md | 22 - .../v5.2/about-mogdb/usage-limitations.md | 27 - .../administrator-guide.md | 17 - .../backup-and-restoration-overview.md | 95 - .../backup-and-restoration.md | 13 - .../flashback-restoration.md | 211 -- .../logical-backup-and-restoration.md | 24 - .../physical-backup-and-restoration.md | 139 -- .../column-store-tables-management.md | 401 ---- ...mon-primary-backup-deployment-scenarios.md | 85 - .../database-deployment-scenario.md | 12 - .../resource-pooling-architecture.md | 20 - ...rrent-architectural-feature-constraints.md | 34 - ...-developer-environment-deployment-guide.md | 279 --- .../two-city-three-dc-dr.md | 189 -- ...-and-gs_dumpall-to-export-data-overview.md | 97 - .../2-exporting-a-single-database.md | 307 --- .../3-exporting-all-databases.md | 135 -- ...-by-a-user-without-required-permissions.md | 86 - .../exporting-data/exporting-data.md | 13 - .../importing-and-exporting-data.md | 11 - .../importing-data/1-import-modes.md | 18 - ...10-managing-concurrent-write-operations.md | 195 -- ...ing-the-INSERT-statement-to-insert-data.md | 18 - ...OPY-FROM-STDIN-statement-to-import-data.md | 318 --- ...sing-a-gsql-meta-command-to-import-data.md | 217 -- .../5-using-gs_restore-to-import-data.md | 272 --- .../6-updating-data-in-a-table.md | 174 -- .../importing-data/7-deep-copy.md | 122 - .../importing-data/8-ANALYZE-table.md | 53 - .../9-doing-VACUUM-to-a-table.md | 22 - .../importing-data/importing-data.md | 19 - .../localization/character-set-support.md | 207 -- .../localization/collation-support.md | 128 -- .../localization/locale-support.md | 68 - .../localization/localization.md | 12 - .../1-introducing-mot/1-mot-introduction.md | 29 - .../2-mot-features-and-benefits.md | 22 - .../3-mot-key-technologies.md | 25 - .../4-mot-usage-scenarios.md | 22 - .../5-mot-performance-benchmarks.md | 202 -- .../1-introducing-mot/introducing-mot.md | 16 - .../2-using-mot/1-using-mot-overview.md | 18 - .../2-using-mot/2-mot-preparation.md | 226 -- .../2-using-mot/3-mot-deployment.md | 714 ------ .../mot-engine/2-using-mot/4-mot-usage.md | 748 ------ .../2-using-mot/5-mot-administration.md | 460 ---- .../6-mot-sample-tpcc-benchmark.md | 116 - .../mot-engine/2-using-mot/using-mot.md | 17 - .../mot-engine/3-concepts-of-mot/3-1.md | 95 - .../mot-engine/3-concepts-of-mot/3-2.md | 191 -- .../mot-engine/3-concepts-of-mot/3-3.md | 71 - .../mot-engine/3-concepts-of-mot/3-4.md | 22 - .../mot-engine/3-concepts-of-mot/3-5.md | 43 - .../mot-engine/3-concepts-of-mot/3-6.md | 210 -- .../mot-engine/3-concepts-of-mot/3-7.md | 24 - .../mot-engine/3-concepts-of-mot/3-8.md | 78 - .../mot-engine/3-concepts-of-mot/3-9.md | 33 - .../3-concepts-of-mot/concepts-of-mot.md | 20 - .../mot-engine/4-appendix/1-references.md | 36 - .../mot-engine/4-appendix/2-glossary.md | 59 - .../mot-engine/4-appendix/mot-appendix.md | 11 - .../mot-engine/mot-engine.md | 13 - .../primary-and-standby-management.md | 135 -- .../0-starting-and-stopping-mogdb.md | 219 -- .../1-routine-maintenance-check-items.md | 178 -- ...0-data-security-maintenance-suggestions.md | 29 - .../routine-maintenance/11-log-reference.md | 153 -- .../2-checking-os-parameters.md | 180 -- .../3-checking-mogdb-health-status.md | 648 ------ .../4-checking-database-performance.md | 85 - .../5-checking-and-deleting-logs.md | 164 -- .../6-checking-time-consistency.md | 52 - ...g-the-number-of-application-connections.md | 133 -- .../8-routinely-maintaining-tables.md | 551 ----- .../9-routinely-recreating-an-index.md | 92 - .../exporting-and-viewing-the-wdr.md | 101 - .../routine-maintenance.md | 24 - .../routine-maintenance/slow-sql-diagnosis.md | 139 -- .../using-the-gsql-client-for-connection.md | 212 -- .../abo-optimizer/adaptive-plan-selection.md | 42 - ...haracteristic-description-abo-optimizer.md | 11 - .../intelligent-cardinality-estimation.md | 46 - .../ai-capabilities/ai-capabilities.md | 12 - ...ection-forecast-and-exception-detection.md | 45 - ...-cause-analysis-for-slow-sql-statements.md | 42 - .../3-index-recommendation.md | 44 - .../4-parameter-tuning-and-diagnosis.md | 50 - .../5-slow-sql-statement-discovery.md | 43 - .../characteristic-description-ai4db.md | 14 - .../db4ai-database-driven-ai.md | 43 - .../1-standard-sql.md | 42 - .../2-standard-development-interfaces.md | 40 - .../3-postgresql-api-compatibility.md | 40 - .../ECPG.md | 113 - .../MogDB-MySQL-compatibility.md | 30 - .../MogDB-Oracle-compatibility.md | 30 - .../application-development-interfaces.md | 15 - .../characteristic-description-overview.md | 179 -- .../characteristic-description.md | 20 - .../add-rowtype-attribute-to-the-view.md | 65 - ...tions-distinct-performance-optimization.md | 52 - ...aggregate-functions-support-keep-clause.md | 103 - ...e-functions-support-scenario-extensions.md | 48 - .../compatibility/authid-current-user.md | 98 - .../compatibility/compatibility.md | 39 - ...h-mysql-alias-support-for-single-quotes.md | 71 - ...ate-current_time-keywords-as-field-name.md | 83 - .../compatibility/custom-type-array.md | 67 - .../for-update-supports-outer-join.md | 58 - .../compatibility/format-error-backtrace.md | 139 -- .../compatibility/insert-on-conflict.md | 115 - .../mod-function-float-to-int.md | 44 - .../modify-table-log-property.md | 105 - .../mogdb-supports-insert-all.md | 178 -- .../nesting-of-aggregate-functions.md | 112 - .../oracle-dblink-syntax-compatibility.md | 242 -- .../order-by-group-by-scenario-expansion.md | 94 - .../compatibility/pivot-and-unpivot.md | 669 ------ ...hen-creating-package-function-procedure.md | 41 - ...bypass-method-when-merge-into-hit-index.md | 91 - ...es-to-procedure-and-function-parameters.md | 80 - ...-constants-in-package-as-default-values.md | 143 -- .../support-passing-the-count-attribute.md | 50 - .../compatibility/support-plpgsql-subtype.md | 188 -- .../support-q-quote-escape-character.md | 296 --- ...g-two-date-types-to-return-numeric-type.md | 64 - ...ntheses-for-function-without-parameters.md | 56 - .../compatibility/support-table-function.md | 65 - ...the-same-name-after-the-end-with-oracle.md | 75 - .../compatibility/support-where-current-of.md | 58 - .../1-access-control-model.md | 45 - .../10-row-level-access-control.md | 47 - .../11-password-strength-verification.md | 75 - ...ity-query-in-a-fully-encrypted-database.md | 118 - .../13-ledger-database-mechanism.md | 48 - .../14-transparent-data-encryption.md | 57 - ...ation-of-control-and-access-permissions.md | 50 - .../3-database-encryption-authentication.md | 40 - .../4-data-encryption-and-storage.md | 52 - .../database-security/5-database-audit.md | 40 - .../6-network-communication-security.md | 48 - .../database-security/7-resource-label.md | 53 - .../database-security/8-unified-audit.md | 72 - .../9-dynamic-data-anonymization.md | 98 - .../database-security/database-security.md | 23 - ...ort-for-functions-and-stored-procedures.md | 42 - .../10-autonomous-transaction.md | 48 - .../11-global-temporary-table.md | 49 - .../12-pseudocolumn-rownum.md | 47 - .../13-stored-procedure-debugging.md | 42 - ...-load-balancing-and-readwrite-isolation.md | 40 - .../15-in-place-update-storage-engine.md | 40 - .../16-publication-subscription.md | 57 - .../17-foreign-key-lock-enhancement.md | 49 - .../18-data-compression-in-oltp-scenarios.md | 43 - .../19-transaction-async-submit.md | 31 - .../enterprise-level-features/2-sql-hints.md | 44 - .../20-copy-import-optimization.md | 95 - .../21-dynamic-partition-pruning.md | 32 - .../22-sql-running-status-observation.md | 107 - .../23-index-creation-parallel-control.md | 28 - .../24-brin-index.md | 261 --- .../25-bloom-index.md | 215 -- .../3-full-text-indexing.md | 57 - .../4-copy-interface-for-error-tolerance.md | 40 - .../5-partitioning.md | 65 - ...support-for-advanced-analysis-functions.md | 61 - .../7-materialized-view.md | 40 - .../8-hyperloglog.md | 42 - .../9-creating-an-index-online.md | 45 - .../enterprise-level-features.md | 39 - .../event-trigger.md | 43 - .../index-support-fuzzy-matching.md | 51 - .../pruning-order-by-in-subqueries.md | 211 -- .../scroll-cursor.md | 49 - ...for-pruning-subquery-projection-columns.md | 88 - .../high-availability/1-primary-standby.md | 46 - .../10-adding-or-deleting-a-standby-server.md | 63 - ...-entering-the-maximum-availability-mode.md | 46 - .../12-parallel-logical-decoding.md | 52 - .../high-availability/13-dcf.md | 54 - .../high-availability/14-cm.md | 67 - .../high-availability/15-global-syscache.md | 47 - ...-a-standby-node-to-build-a-standby-node.md | 40 - .../17-two-city-three-dc-dr.md | 47 - .../2-logical-replication.md | 41 - .../high-availability/4-logical-backup.md | 44 - .../high-availability/5-physical-backup.md | 56 - .../6-automatic-job-retry-upon-failure.md | 97 - .../high-availability/7-ultimate-rto.md | 43 - .../8-cascaded-standby-server.md | 49 - .../high-availability/9-delayed-replay.md | 50 - ...omponent-supporting-two-node-deployment.md | 34 - .../cm-dual-network-segment-deployment.md | 175 -- .../high-availability/ddl-query-of-view.md | 87 - ...fficiency-of-logical-backup-and-restore.md | 89 - ...vailability-based-on-the-paxos-protocol.md | 41 - .../high-availability/high-availability.md | 30 - .../high-performance/1-cbo-optimizer.md | 36 - .../high-performance/10-xlog-no-lock-flush.md | 36 - .../11-parallel-page-based-redo-for-ustore.md | 36 - ...store-execution-to-vectorized-execution.md | 110 - .../high-performance/2-llvm.md | 40 - .../high-performance/3-vectorized-engine.md | 47 - .../4-hybrid-row-column-store.md | 72 - .../5-adaptive-compression.md | 42 - ...-kunpeng-numa-architecture-optimization.md | 40 - .../8-high-concurrency-of-thread-pools.md | 37 - .../9-smp-for-parallel-execution.md | 47 - .../adaptive-two-phase-aggregation.md | 150 -- .../astore-row-level-compression.md | 95 - .../btree-index-compression.md | 193 -- ...hancement-of-tracing-backend-key-thread.md | 32 - .../enhancement-of-wal-redo-performance.md | 52 - .../high-performance/high-performance.md | 30 - .../ock-accelerated-data-transmission.md | 36 - ...ock-scrlock-accelerate-distributed-lock.md | 36 - .../ordering-operator-optimization.md | 114 - .../high-performance/parallel-index-scan.md | 151 -- .../parallel-query-optimization.md | 92 - .../high-performance/sql-bypass.md | 141 -- .../high-performance/tracing-SQL-function.md | 40 - .../2-workload-diagnosis-report.md | 76 - .../maintainability/3-slow-sql-diagnosis.md | 176 -- .../4-session-performance-diagnosis.md | 110 - .../5-system-kpi-aided-diagnosis.md | 70 - .../autonomous-transaction-management.md | 72 - .../maintainability/built-in-stack-tool.md | 186 -- .../maintainability/corrupt-files-handling.md | 64 - .../maintainability/dcf-module-tracing.md | 34 - .../error-when-writing-illegal-characters.md | 93 - .../maintainability/extension-splitting.md | 70 - .../maintainability/fault-diagnosis.md | 34 - .../light-lock-export-and-analysis.md | 32 - .../maintainability/maintainability.md | 23 - .../maintainability/pageinspect-pagehack.md | 254 --- .../maintainability/sql-patch.md | 128 -- ...a-distributed-database-using-kubernetes.md | 36 - .../distributed-analysis-capabilities.md | 36 - .../distributed-database-capability.md | 36 - .../middleware/middleware.md | 12 - ...ency-escape-at-the-infrastructure-layer.md | 42 - .../workload-management.md | 10 - .../cm-fault/cm-cluster-brain-split-fault.md | 274 --- .../cm-fault/cm-cluster-manual-failover.md | 86 - .../cm-fault/cm-fault.md | 11 - .../common-fault-locating-cases.md | 17 - ...e-dump-occurs-after-installation-on-x86.md | 26 - ...core-dump-occurs-due-to-full-disk-space.md | 22 - ...settings-of-guc-parameter-log-directory.md | 20 - ...e-dump-occurs-when-removeipc-is-enabled.md | 24 - .../core-fault-locating.md | 13 - .../after-you-run-the-du-command.md | 32 - .../disk-space-usage-reaches-the-threshold.md | 58 - ...or-no-space-left-on-device-is-displayed.md | 64 - .../file-is-damaged-in-the-xfs-file-system.md | 22 - .../file-system-disk-memory.md | 16 - .../insufficient-memory.md | 20 - .../shared-memory-leak.md | 73 - .../when-the-tpcc-is-running.md | 20 - .../index-fault/b-tree-index-faults.md | 68 - .../index-fault/index-fault.md | 12 - .../index-fault/reindexing-fails.md | 29 - ...hen-a-user-specifies-only-an-index-name.md | 47 - ...-error-occurs-during-integer-conversion.md | 24 - .../different-data-is-displayed.md | 32 - .../forcibly-terminating-a-session.md | 62 - .../permission-session-data-type.md | 12 - .../performance-deterioration.md | 26 - .../primary-node-is-hung-in-demoting.md | 24 - .../service-ha-concurrency/query-failure.md | 77 - .../service-ha-concurrency.md | 15 - .../service-startup-failure.md | 91 - .../standby-node-in-the-need-repair-state.md | 20 - .../too-many-clients-already.md | 50 - ...alyzing-the-status-of-a-query-statement.md | 57 - ...ng-whether-a-query-statement-is-blocked.md | 56 - .../lock-wait-timeout-is-displayed.md | 25 - .../sql-fault/low-query-efficiency.md | 32 - .../slow-response-to-a-query-statement.md | 48 - .../sql-fault/sql-fault.md | 14 - ...ed-when-the-table-partition-is-modified.md | 46 - .../table-partition-table.md | 11 - .../table-size-does-not-change.md | 39 - .../common-fault-locating-methods.md | 283 --- .../common-faults-and-identification.md | 11 - .../docs-mogdb/v5.2/communication-matrix.md | 28 - .../developer-guide/1-1-stored-procedure.md | 16 - ...-introduction-to-autonomous-transaction.md | 17 - ...ction-supporting-autonomous-transaction.md | 42 - ...edure-supporting-autonomous-transaction.md | 45 - .../autonomous-transaction/4-restrictions.md | 150 -- ...block-supporting-autonomous-transaction.md | 36 - .../1-development-based-on-jdbc-overview.md | 10 - .../10-example-common-operations.md | 286 --- ...e-retrying-sql-queries-for-applications.md | 205 -- ...-and-exporting-data-through-local-files.md | 119 - ...rating-data-from-a-my-database-to-mogdb.md | 98 - .../14-example-logic-replication-code.md | 182 -- ...-to-the-database-in-different-scenarios.md | 60 - .../15-JDBC/1-java-sql-Connection.md | 67 - .../15-JDBC/10-javax-sql-DataSource.md | 21 - .../15-JDBC/11-javax-sql-PooledConnection.md | 19 - .../15-JDBC/12-javax-naming-Context.md | 25 - ...-javax-naming-spi-InitialContextFactory.md | 16 - .../15-JDBC/14-CopyManager.md | 47 - .../15-JDBC/2-java-sql-CallableStatement.md | 46 - .../15-JDBC/3-java-sql-DatabaseMetaData.md | 199 -- .../15-JDBC/4-java-sql-Driver.md | 22 - .../15-JDBC/5-java-sql-PreparedStatement.md | 70 - .../15-JDBC/6-java-sql-ResultSet.md | 154 -- .../15-JDBC/7-java-sql-ResultSetMetaData.md | 36 - .../15-JDBC/8-java-sql-Statement.md | 69 - .../9-javax-sql-ConnectionPoolDataSource.md | 17 - .../15-JDBC/jdbc-interface-reference.md | 25 - ...kage-driver-class-and-environment-class.md | 55 - .../3-development-process.md | 12 - .../4-loading-the-driver.md | 59 - .../5-connecting-to-a-database.md | 111 - .../6-connecting-to-a-database-using-ssl.md | 152 -- .../7-running-sql-statements.md | 249 -- .../8-processing-data-in-a-result-set.md | 84 - .../8.1-log-management.md | 231 -- .../9-closing-a-connection.md | 17 - .../connecting-to-a-database-using-uds.md | 48 - .../development-based-on-jdbc.md | 31 - ...imary-and-backup-cluster-load-balancing.md | 107 - .../jdbc-based-common-parameter-reference.md | 114 - .../jdbc-release-notes.md | 159 -- .../1-development-based-on-odbc.md | 42 - ...es-dependent-libraries-and-header-files.md | 14 - ...nfiguring-a-data-source-in-the-linux-os.md | 334 --- .../4-development-process.md | 44 - ...mple-common-functions-and-batch-binding.md | 440 ---- ...pplication-scenarios-and-configurations.md | 496 ---- .../6-ODBC/2-0-odbc-overview.md | 10 - .../6-ODBC/2-1-SQLAllocEnv.md | 10 - .../6-ODBC/2-10-SQLExecDirect.md | 48 - .../6-ODBC/2-11-SQLExecute.md | 44 - .../6-ODBC/2-12-SQLFetch.md | 43 - .../6-ODBC/2-13-SQLFreeStmt.md | 10 - .../6-ODBC/2-14-SQLFreeConnect.md | 10 - .../6-ODBC/2-15-SQLFreeHandle.md | 43 - .../6-ODBC/2-16-SQLFreeEnv.md | 10 - .../6-ODBC/2-17-SQLPrepare.md | 46 - .../6-ODBC/2-18-SQLGetData.md | 53 - .../6-ODBC/2-19-SQLGetDiagRec.md | 74 - .../6-ODBC/2-2-SQLAllocConnect.md | 10 - .../6-ODBC/2-20-SQLSetConnectAttr.md | 47 - .../6-ODBC/2-21-SQLSetEnvAttr.md | 47 - .../6-ODBC/2-22-SQLSetStmtAttr.md | 47 - .../6-ODBC/2-23-Examples.md | 347 --- .../6-ODBC/2-3-SQLAllocHandle.md | 45 - .../6-ODBC/2-4-SQLAllocStmt.md | 10 - .../6-ODBC/2-5-SQLBindCol.md | 51 - .../6-ODBC/2-6-SQLBindParameter.md | 59 - .../6-ODBC/2-7-SQLColAttribute.md | 53 - .../6-ODBC/2-8-SQLConnect.md | 54 - .../6-ODBC/2-9-SQLDisconnect.md | 41 - .../6-ODBC/odbc-interface-reference.md | 34 - .../odbc-release-notes.md | 14 - ...1-database-connection-control-functions.md | 20 - .../10-PQstatus.md | 64 - .../2-PQconnectdbParams.md | 42 - .../3-PQconnectdb.md | 39 - .../4-PQconninfoParse.md | 31 - .../5-PQconnectStart.md | 30 - .../6-PQerrorMessage.md | 34 - .../7-PQsetdbLogin.md | 51 - .../8-PQfinish.md | 34 - .../9-PQreset.md | 34 - .../1-PQclear.md | 34 - .../10-PQntuples.md | 34 - .../11-PQprepare.md | 50 - .../12-PQresultStatus.md | 72 - .../2-PQexec.md | 42 - .../3-PQexecParams.md | 44 - .../4-PQexecParamsBatch.md | 46 - .../5-PQexecPrepared.md | 42 - .../6-PQexecPreparedBatch.md | 44 - .../7-PQfname.md | 36 - .../8-PQgetvalue.md | 42 - .../9-PQnfields.md | 34 - .../database-statement-execution-functions.md | 23 - ...ons-for-asynchronous-command-processing.md | 23 - .../2-PQsendQuery.md | 39 - .../3-PQsendQueryParams.md | 52 - .../4-PQsendPrepare.md | 46 - .../5-PQsendQueryPrepared.md | 50 - .../6-PQflush.md | 38 - .../1-PQgetCancel.md | 38 - .../2-PQfreeCancel.md | 34 - .../3-PQcancel.md | 41 - ...tions-for-canceling-queries-in-progress.md | 14 - .../2-libpq/libpq-api-reference.md | 13 - .../dependent-header-files-of-libpq.md | 10 - .../development-based-on-libpq.md | 17 - .../development-process.md | 34 - .../libpq-example.md | 280 --- .../libpq-release-notes.md | 14 - .../link-parameters.md | 53 - .../1-psycopg-based-development.md | 31 - .../10.1-example-common-operations.md | 101 - .../1-psycopg2-connect.md | 42 - .../10-connection-close.md | 32 - .../2-connection-cursor.md | 37 - .../3-cursor-execute-query-vars-list.md | 35 - .../4-curosr-executemany-query-vars-list.md | 35 - .../5-connection-commit.md | 32 - .../6-connection-rollback.md | 32 - .../7-cursor-fetchone.md | 30 - .../8-cursor-fetchall.md | 30 - .../9-cursor-close.md | 30 - .../psycopg-api-reference.md | 21 - .../12-psycopg2-release-notes.md | 44 - .../2-psycopg-package.md | 30 - .../3.1-development-process.md | 12 - .../4-connecting-to-a-database.md | 123 - ...daptation-of-python-values-to-sql-types.md | 35 - .../6-new-features-in-mogdb.md | 100 - .../9-connecting-to-the-database-using-ssl.md | 62 - .../developer-guide/dev/5-commissioning.md | 40 - .../dev/application-development-tutorial.md | 15 - .../design-specification.md | 355 --- ...roduction-to-development-specifications.md | 26 - .../naming-specification.md | 79 - .../overview-of-development-specifications.md | 26 - .../postgresql-compatibility.md | 130 -- .../query-operations.md | 76 - .../syntax-specification.md | 92 - .../v5.2/developer-guide/developer-guide.md | 20 - .../developer-guide/extension/extension.md | 17 - .../foreign-data-wrapper/1-oracle_fdw.md | 118 - .../foreign-data-wrapper/2-mysql_fdw.md | 83 - .../foreign-data-wrapper/3-postgres_fdw.md | 104 - .../extension/foreign-data-wrapper/dblink.md | 179 -- .../foreign-data-wrapper/fdw-introduction.md | 16 - .../foreign-data-wrapper/file_fdw.md | 91 - .../extension/pg_bulkload-user-guide.md | 204 -- .../extension/pg_prewarm-user-guide.md | 108 - .../extension/pg_repack-user-guide.md | 245 -- .../extension/pg_trgm-user-guide.md | 135 -- .../postgis-extension/postgis-extension.md | 12 - .../postgis-extension/postgis-overview.md | 24 - .../postgis-support-and-constraints.md | 57 - .../postgis-extension/using-postgis.md | 97 - .../extension/wal2json-user-guide.md | 309 --- .../v5.2/developer-guide/extension/whale.md | 267 --- .../logical-decoding/1-logical-decoding.md | 68 - ...cal-decoding-by-sql-function-interfaces.md | 85 - .../logical-decoding/logical-decoding.md | 11 - .../logical-replication.md | 11 - .../publication-subscription/architecture.md | 16 - .../configuration-settings.md | 17 - .../publication-subscription/conflicts.md | 14 - .../publication-subscription/monitoring.md | 14 - .../publication-subscription.md | 30 - .../publication-subscription/publications.md | 22 - .../publication-subscription/quick-setup.md | 60 - .../publication-subscription/restrictions.md | 24 - .../publication-subscription/security.md | 20 - .../publication-subscription/subscriptions.md | 28 - .../1-materialized-view-overview.md | 15 - .../1-full-materialized-view-overview.md | 10 - .../2-full-materialized-view-usage.md | 81 - ...terialized-view-support-and-constraints.md | 22 - .../full-materialized-view.md | 12 - ...-incremental-materialized-view-overview.md | 10 - .../2-incremental-materialized-view-usage.md | 102 - ...terialized-view-support-and-constraints.md | 31 - .../incremental-materialized-view.md | 12 - .../assessment-tool.md | 139 -- .../dolphin-extension/dolphin-extension.md | 13 - .../dolphin-extension/dolphin-installation.md | 12 - .../dolphin-extension/dolphin-overview.md | 10 - .../dolphin-extension/dolphin-restrictions.md | 16 - .../dolphin-reset-parameters.md | 158 -- .../dolphin-syntax/dolphin-syntax.md | 15 - .../dolphin-syntax/guc-parameters.md | 691 ------ .../dolphin-column-name-identifiers.md | 48 - .../identifiers/dolphin-identifiers.md | 10 - .../data-types/dolphin-binary-types.md | 80 - .../data-types/dolphin-bit-string-types.md | 44 - .../data-types/dolphin-bool-types.md | 31 - .../data-types/dolphin-character-types.md | 77 - .../data-types/dolphin-data-types.md | 16 - .../data-types/dolphin-date-time-types.md | 250 -- .../data-types/dolphin-enumeration-types.md | 82 - .../data-types/dolphin-numeric-types.md | 196 -- .../sql-reference/dolphin-dcl-syntax.md | 89 - .../sql-reference/dolphin-ddl-syntax.md | 122 - .../sql-reference/dolphin-dml-syntax.md | 26 - .../sql-reference/dolphin-keywords.md | 41 - .../sql-reference/dolphin-sql-reference.md | 17 - .../dolphin-conditional-expressions.md | 80 - .../expressions/dolphin-expressions.md | 10 - .../dolphin-advisory-lock-functions.md | 244 -- .../dolphin-aggregate-functions.md | 73 - ...phin-arithmetic-functions-and-operators.md | 240 -- .../dolphin-assignment-operators.md | 12 - .../dolphin-b-compatible-database-lock.md | 79 - ...phin-bit-string-functions-and-operators.md | 115 - ...cter-processing-functions-and-operators.md | 993 -------- .../dolphin-comment-operators.md | 28 - ...hin-compatible-operators-and-operations.md | 327 --- ...olphin-conditional-expression-functions.md | 126 -- ...time-processing-functions-and-operators.md | 2008 ----------------- .../dolphin-functions-and-operators.md | 25 - ...phin-json-jsonb-functions-and-operators.md | 999 -------- .../dolphin-logical-operators.md | 87 - ...network-address-functions-and-operators.md | 172 -- .../dolphin-system-information-functions.md | 76 - .../dolphin-type-conversion-functions.md | 58 - .../sql-syntax/dolphin-alter-database.md | 57 - .../sql-syntax/dolphin-alter-function.md | 99 - .../sql-syntax/dolphin-alter-procedure.md | 93 - .../sql-syntax/dolphin-alter-server.md | 91 - .../dolphin-alter-table-partition.md | 381 ---- .../sql-syntax/dolphin-alter-table.md | 314 --- .../sql-syntax/dolphin-alter-tablespace.md | 171 -- .../sql-syntax/dolphin-alter-view.md | 160 -- .../sql-syntax/dolphin-analyze-analyse.md | 93 - .../sql-reference/sql-syntax/dolphin-ast.md | 43 - .../sql-syntax/dolphin-checksum-table.md | 114 - .../sql-syntax/dolphin-create-database.md | 67 - .../sql-syntax/dolphin-create-function.md | 173 -- .../sql-syntax/dolphin-create-index.md | 251 --- .../sql-syntax/dolphin-create-procedure.md | 137 -- .../sql-syntax/dolphin-create-server.md | 86 - .../sql-syntax/dolphin-create-table-as.md | 72 - .../dolphin-create-table-partition.md | 1320 ----------- .../sql-syntax/dolphin-create-table.md | 541 ----- .../sql-syntax/dolphin-create-tablespace.md | 58 - .../sql-syntax/dolphin-create-trigger.md | 449 ---- .../sql-syntax/dolphin-create-view.md | 137 -- .../sql-syntax/dolphin-describe-table.md | 137 -- .../sql-reference/sql-syntax/dolphin-do.md | 61 - .../sql-syntax/dolphin-drop-database.md | 48 - .../sql-syntax/dolphin-drop-index.md | 64 - .../sql-syntax/dolphin-drop-tablespace.md | 50 - .../sql-syntax/dolphin-execute.md | 63 - .../sql-syntax/dolphin-explain.md | 304 --- .../sql-syntax/dolphin-flush-binary-logs.md | 36 - .../sql-syntax/dolphin-grant-revoke-proxy.md | 113 - .../sql-reference/sql-syntax/dolphin-grant.md | 112 - .../sql-syntax/dolphin-insert.md | 229 -- .../sql-reference/sql-syntax/dolphin-kill.md | 146 -- .../sql-syntax/dolphin-load-data.md | 149 -- .../sql-syntax/dolphin-optimize-table.md | 74 - .../sql-syntax/dolphin-prepare.md | 70 - .../sql-syntax/dolphin-rename-table.md | 72 - .../sql-syntax/dolphin-rename-user.md | 45 - .../sql-syntax/dolphin-revoke.md | 114 - .../sql-syntax/dolphin-select-hint.md | 49 - .../sql-syntax/dolphin-select.md | 597 ----- .../sql-syntax/dolphin-set-charset.md | 70 - .../sql-syntax/dolphin-set-password.md | 69 - .../sql-syntax/dolphin-show-character-set.md | 52 - .../sql-syntax/dolphin-show-collation.md | 62 - .../sql-syntax/dolphin-show-columns.md | 132 -- .../dolphin-show-create-database.md | 43 - .../dolphin-show-create-function.md | 62 - .../dolphin-show-create-procedure.md | 70 - .../sql-syntax/dolphin-show-create-table.md | 49 - .../sql-syntax/dolphin-show-create-trigger.md | 46 - .../sql-syntax/dolphin-show-create-view.md | 49 - .../sql-syntax/dolphin-show-databases.md | 90 - .../dolphin-show-function-status.md | 61 - .../sql-syntax/dolphin-show-grants.md | 40 - .../sql-syntax/dolphin-show-index.md | 81 - .../sql-syntax/dolphin-show-master-status.md | 49 - .../sql-syntax/dolphin-show-plugins.md | 76 - .../sql-syntax/dolphin-show-privileges.md | 78 - .../dolphin-show-procedure-status.md | 51 - .../sql-syntax/dolphin-show-processlist.md | 95 - .../sql-syntax/dolphin-show-slave-hosts.md | 103 - .../sql-syntax/dolphin-show-status.md | 499 ---- .../sql-syntax/dolphin-show-table-status.md | 109 - .../sql-syntax/dolphin-show-tables.md | 88 - .../sql-syntax/dolphin-show-triggers.md | 78 - .../sql-syntax/dolphin-show-variables.md | 61 - .../sql-syntax/dolphin-show-warnings.md | 175 -- .../sql-syntax/dolphin-sql-syntax.md | 80 - .../sql-syntax/dolphin-update.md | 252 --- .../sql-syntax/dolphin-use-db-name.md | 78 - .../dolphin-assignment-statements.md | 57 - .../dolphin-basic-statements.md | 10 - .../dolphin-stored-procedures.md | 10 - .../system-views/dolphin-INDEX_STATISTIC.md | 29 - .../dolphin-PG_TYPE_NONSTRICT_BASIC_VALUE.md | 17 - .../system-views/dolphin-system-views.md | 11 - .../migrating-data/data-check.md | 273 --- .../migrating-data/full-migration.md | 117 - .../migrating-data/incremental-migration.md | 149 -- .../migrating-data-from-mysql-to-mogdb.md | 20 - .../migrating-data/quick-mysql-migration.md | 470 ---- .../migrating-data/reverse-migration.md | 149 -- .../mysql-compatible-description.md | 56 - .../partition-management.md | 11 - .../benefits-of-partition-pruning.md | 66 - .../dynamic-partition-pruning.md | 260 --- ...whether-partition-pruning-has-been-used.md | 12 - ...-that-can-be-used-for-partition-pruning.md | 33 - .../partition-pruning/partition-pruning.md | 14 - .../static-partition-pruning.md | 60 - ...ns-for-choosing-a-partitioning-strategy.md | 12 - .../when-to-use-hash-partitioning.md | 45 - .../when-to-use-list-partitioning.md | 37 - .../when-to-use-range-partitioning.md | 46 - .../plpgsql/1-1-plpgsql-overview.md | 41 - .../plpgsql/1-10-other-statements.md | 20 - .../developer-guide/plpgsql/1-11-cursors.md | 186 -- .../plpgsql/1-12-retry-management.md | 26 - .../developer-guide/plpgsql/1-13-debugging.md | 176 -- .../developer-guide/plpgsql/1-14-package.md | 21 - .../developer-guide/plpgsql/1-2-data-types.md | 10 - .../plpgsql/1-3-data-type-conversion.md | 42 - .../plpgsql/1-4-arrays-and-records.md | 979 -------- .../plpgsql/1-5-declare-syntax.md | 103 - .../plpgsql/1-6-basic-statements.md | 253 --- .../plpgsql/1-7-dynamic-statements.md | 166 -- .../plpgsql/1-8-control-statements.md | 870 ------- .../plpgsql/1-9-transaction-management.md | 428 ---- .../advanced-packages/advanced-packages.md | 12 - .../basic-interfaces/PKG_SERVICE.md | 387 ---- .../basic-interfaces/basic-interfaces.md | 10 - .../scheduled-jobs/pkg-service.md | 366 --- .../scheduled-jobs/scheduled-jobs.md | 10 - .../developer-guide/user-defined-functions.md | 16 - .../v5.2/faqs/application-development-faqs.md | 8 - .../faqs/deployment-and-maintenance-faqs.md | 332 --- product/zh/docs-mogdb/v5.2/faqs/faqs.md | 15 - .../v5.2/faqs/high-availability-faqs.md | 28 - .../zh/docs-mogdb/v5.2/faqs/migration-faqs.md | 21 - .../zh/docs-mogdb/v5.2/faqs/product-faqs.md | 153 -- .../zh/docs-mogdb/v5.2/faqs/upgrade-faqs.md | 16 - product/zh/docs-mogdb/v5.2/glossary.md | 145 -- .../cluster-management/cluster-management.md | 20 - .../cm-configuration-parameter/cm-cm_agent.md | 364 --- .../cm-cm_server.md | 503 ----- .../cm-configuration-parameter.md | 13 - .../feature-introduction.md | 792 ------- .../introduction-to-cm_ctl-tool.md | 419 ---- .../introduction-to-cm_persist.md | 49 - ...to-installation-and-uninstallation-tool.md | 67 - .../manual-configuration-of-vip.md | 174 -- .../cluster-management/safety-design.md | 81 - .../high-available-dcf.md | 213 -- .../high-available-guide.md | 11 - .../docker-installation.md | 445 ---- .../installation-guide/installation-guide.md | 14 - .../environment-requirement.md | 75 - .../installation-preparation.md | 11 - .../os-configuration.md | 277 --- .../installation-guide/manual-installation.md | 383 ---- .../ptk-based-installation.md | 208 -- .../recommended-parameter-settings.md | 234 -- .../v5.2/mogeaver/mogeaver-overview.md | 47 - .../v5.2/mogeaver/mogeaver-release-notes.md | 167 -- .../zh/docs-mogdb/v5.2/mogeaver/mogeaver.md | 11 - product/zh/docs-mogdb/v5.2/overview.md | 181 -- .../TPCC-performance-tuning-guide.md | 788 ------- .../performance-tuning/performance-tuning.md | 13 - .../experience-in-rewriting-sql-statements.md | 63 - .../sql-tuning/hint-based-tuning.md | 1106 --------- .../introduction-to-the-sql-execution-plan.md | 140 -- .../sql-tuning/query-execution-process.md | 79 - ...etting-key-parameters-during-sql-tuning.md | 27 - ...iewing-and-modifying-a-table-definition.md | 77 - .../sql-tuning/sql-tuning.md | 18 - .../sql-tuning/tuning-process.md | 39 - .../typical-sql-optimization-methods.md | 712 ------ .../sql-tuning/updating-statistics.md | 58 - .../system-tuning/configuring-llvm.md | 93 - .../system-tuning/configuring-smp.md | 111 - .../system-tuning/configuring-ustore.md | 194 -- .../configuring-vector-engine.md | 59 - .../system-tuning/optimizing-os-parameters.md | 125 - .../resource-load-management-overview.md | 26 - .../resource-load-management.md | 11 - .../creating-resource-pool.md | 152 -- .../enabling-resource-load-management.md | 55 - .../resource-management-preparations.md | 13 - .../resource-planning.md | 34 - .../setting-control-group.md | 211 -- .../system-tuning/system-tuning.md | 15 - .../v5.2/performance-tuning/wdr/wdr-report.md | 385 ---- .../wdr/wdr-snapshot-schema.md | 258 --- .../v5.2/performance-tuning/wdr/wdr.md | 12 - .../container-based-installation.md | 199 -- .../installation-on-a-single-node.md | 180 -- .../quick-start/mogdb-access/mogdb-access.md | 13 - .../use-cli-to-access-mogdb/gsql.md | 216 -- .../use-cli-to-access-mogdb/pgcli.md | 85 - .../use-cli-to-access-mogdb.md | 11 - .../use-gui-tools-to-access-mogdb/dbeaver.md | 64 - .../mogeaver-usage.md | 60 - .../use-gui-tools-to-access-mogdb.md | 11 - .../use-middleware-to-access-mogdb.md | 11 - ...-configures-mogdb-data-source-reference.md | 175 -- ...-configures-mogdb-data-source-reference.md | 210 -- .../adonet.md | 16 - .../c-cpp.md | 15 - .../go.md | 16 - .../java.md | 14 - .../nodejs.md | 16 - .../python.md | 18 - .../rust.md | 16 - ...se-programming-language-to-access-mogdb.md | 16 - .../v5.2/quick-start/mogdb-playground.md | 46 - .../zh/docs-mogdb/v5.2/quick-start/mogila.md | 406 ---- .../v5.2/quick-start/quick-start.md | 14 - .../GAUSS-00001-GAUSS-00100.md | 828 ------- .../GAUSS-00101-GAUSS-00200.md | 836 ------- .../GAUSS-00201-GAUSS-00300.md | 748 ------ .../GAUSS-00301-GAUSS-00400.md | 804 ------- .../GAUSS-00401-GAUSS-00500.md | 692 ------ .../GAUSS-00501-GAUSS-00600.md | 724 ------ .../GAUSS-00601-GAUSS-00700.md | 748 ------ .../GAUSS-00701-GAUSS-00800.md | 764 ------- .../GAUSS-00801-GAUSS-00900.md | 700 ------ .../GAUSS-00901-GAUSS-01000.md | 756 ------- .../GAUSS-01001-GAUSS-01100.md | 812 ------- .../GAUSS-01101-GAUSS-01200.md | 780 ------- .../GAUSS-01201-GAUSS-01300.md | 822 ------- .../GAUSS-01301-GAUSS-01400.md | 748 ------ .../GAUSS-01401-GAUSS-01500.md | 820 ------- .../GAUSS-01501-GAUSS-01600.md | 644 ------ .../GAUSS-01601-GAUSS-01700.md | 732 ------ .../GAUSS-01701-GAUSS-01800.md | 750 ------ .../GAUSS-01801-GAUSS-01900.md | 740 ------ .../GAUSS-01901-GAUSS-02000.md | 614 ----- .../GAUSS-02001-GAUSS-02100.md | 376 --- .../GAUSS-02101-GAUSS-02200.md | 580 ----- .../GAUSS-02201-GAUSS-02300.md | 556 ----- .../GAUSS-02301-GAUSS-02400.md | 759 ------- .../GAUSS-02401-GAUSS-02500.md | 708 ------ .../GAUSS-02501-GAUSS-02600.md | 772 ------- .../GAUSS-02601-GAUSS-02700.md | 764 ------- .../GAUSS-02701-GAUSS-02800.md | 732 ------ .../GAUSS-02801-GAUSS-02900.md | 780 ------- .../GAUSS-02901-GAUSS-03000.md | 780 ------- .../GAUSS-03001-GAUSS-03100.md | 812 ------- .../GAUSS-03101-GAUSS-03200.md | 828 ------- .../GAUSS-03201-GAUSS-03300.md | 820 ------- .../GAUSS-03301-GAUSS-03400.md | 828 ------- .../GAUSS-03401-GAUSS-03500.md | 668 ------ .../GAUSS-03501-GAUSS-03600.md | 700 ------ .../GAUSS-03601-GAUSS-03700.md | 748 ------ .../GAUSS-03701-GAUSS-03800.md | 700 ------ .../GAUSS-03801-GAUSS-03900.md | 644 ------ .../GAUSS-03901-GAUSS-04000.md | 644 ------ .../GAUSS-04001-GAUSS-04100.md | 724 ------ .../GAUSS-04101-GAUSS-04200.md | 478 ---- .../GAUSS-04201-GAUSS-04300.md | 692 ------ .../GAUSS-04301-GAUSS-04400.md | 444 ---- .../GAUSS-04401-GAUSS-04500.md | 457 ---- .../GAUSS-04501-GAUSS-04600.md | 644 ------ .../GAUSS-04601-GAUSS-04700.md | 614 ----- .../GAUSS-04701-GAUSS-04800.md | 559 ----- .../GAUSS-04801-GAUSS-04900.md | 844 ------- .../GAUSS-04901-GAUSS-05000.md | 860 ------- .../GAUSS-05001-GAUSS-05100.md | 868 ------- .../GAUSS-05101-GAUSS-05200.md | 604 ----- .../GAUSS-05201-GAUSS-05300.md | 868 ------- .../GAUSS-05301-GAUSS-05400.md | 860 ------- .../GAUSS-05401-GAUSS-05500.md | 860 ------- .../GAUSS-05501-GAUSS-05600.md | 868 ------- .../GAUSS-05601-GAUSS-05700.md | 868 ------- .../GAUSS-05701-GAUSS-05800.md | 868 ------- .../GAUSS-05801-GAUSS-05900.md | 698 ------ .../GAUSS-05901-GAUSS-06000.md | 764 ------- .../GAUSS-06001-GAUSS-06100.md | 852 ------- .../GAUSS-06101-GAUSS-06200.md | 860 ------- .../GAUSS-06201-GAUSS-06300.md | 924 -------- .../GAUSS-06301-GAUSS-06400.md | 860 ------- .../GAUSS-06401-GAUSS-06500.md | 788 ------- .../GAUSS-06501-GAUSS-06600.md | 868 ------- .../GAUSS-06601-GAUSS-06700.md | 868 ------- .../GAUSS-06701-GAUSS-06800.md | 868 ------- .../GAUSS-06801-GAUSS-06900.md | 868 ------- .../GAUSS-06901-GAUSS-07000.md | 868 ------- .../GAUSS-07001-GAUSS-07100.md | 868 ------- .../GAUSS-07101-GAUSS-07200.md | 868 ------- .../GAUSS-07201-GAUSS-07300.md | 868 ------- .../GAUSS-07301-GAUSS-07400.md | 868 ------- .../GAUSS-07401-GAUSS-07500.md | 632 ------ .../GAUSS-50000-GAUSS-50999.md | 1100 --------- .../GAUSS-51000-GAUSS-51999.md | 1252 ---------- .../GAUSS-52000-GAUSS-52999.md | 900 -------- .../GAUSS-53000-GAUSS-53699.md | 1402 ------------ .../class00-class21.md | 42 - .../class0A-class0Z.md | 27 - .../class22-class24.md | 78 - .../class25-class40.md | 69 - .../class2B-class2F.md | 22 - .../class3B-class3F.md | 19 - .../class42-class44.md | 74 - .../class53-class58.md | 46 - .../classCG-classTS.md | 103 - .../classF0-classP0.md | 20 - .../classXX-classYY.md | 45 - .../description-of-sql-error-codes.md | 18 - .../sqlstate-values-of-mogdb-cm-error-code.md | 27 - .../sqlstate-values-of-mogdb-error-code.md | 20 - .../error-code-reference.md | 90 - .../third-party-library-error-codes.md | 29 - .../error-log-reference.md | 10 - .../kernel-error-message.md | 1328 ----------- .../aggregate-functions.md | 947 -------- .../ai-feature-functions.md | 200 -- .../array-functions-and-operators.md | 518 ----- .../binary-string-functions-and-operators.md | 210 -- .../bit-string-functions-and-operators.md | 155 -- ...cter-processing-functions-and-operators.md | 1841 --------------- .../comparison-operators.md | 27 - .../conditional-expressions-functions.md | 181 -- ...a-damage-detection-and-repair-functions.md | 220 -- ...time-processing-functions-and-operators.md | 1396 ------------ .../dynamic-data-masking-functions.md | 68 - .../encrypted-equality-functions.md | 187 -- .../event-trigger-functions.md | 116 - .../fault-injection-system-function.md | 20 - .../functions-and-operators.md | 50 - .../geometric-functions-and-operators.md | 948 -------- .../global-syscache-feature-functions.md | 99 - .../global-temporary-table-functions.md | 132 -- .../functions-and-operators/hash-function.md | 590 ----- .../hll-functions-and-operators.md | 894 -------- .../internal-functions-1.md | 90 - .../internal-functions-2.md | 126 -- .../internal-functions/internal-functions.md | 12 - .../functions-and-operators/json-functions.md | 783 ------- .../ledger-database-functions.md | 96 - .../logical-operators.md | 26 - .../mathematical-functions-and-operators.md | 1202 ---------- .../mode-matching-operators.md | 230 -- ...network-address-functions-and-operators.md | 456 ---- .../obsolete-functions.md | 18 - .../other-system-functions-1.md | 270 --- .../other-system-functions-2.md | 618 ----- .../other-system-functions.md | 12 - .../prompt-message-function.md | 21 - .../range-functions-and-operators.md | 435 ---- .../security-functions.md | 346 --- .../sequence-functions.md | 166 -- .../set-returning-functions.md | 131 -- .../statistics-information-functions-1.md | 653 ------ .../statistics-information-functions-2.md | 666 ------ .../statistics-information-functions-3.md | 665 ------ .../statistics-information-functions.md | 12 - .../access-privilege-inquiry-function.md | 314 --- .../comment-information-functions.md | 40 - .../other-function.md | 22 - .../schema-visibility-inquiry-functions.md | 90 - .../session-information-functions.md | 641 ------ .../system-catalog-information-functions.md | 457 ---- .../system-information-functions.md | 16 - .../transaction-ids-and-snapshots.md | 329 --- .../advisory-lock-functions.md | 190 -- ...ackup-and-restoration-control-functions.md | 256 --- .../configuration-settings-functions.md | 62 - .../database-object-functions.md | 438 ---- .../logical-replication-functions.md | 597 ----- .../other-functions.md | 764 ------- .../row-store-compression-system-functions.md | 95 - .../segment-page-storage-functions.md | 113 - .../server-signal-functions.md | 66 - .../snapshot-synchronization-functions.md | 24 - .../system-management-functions.md | 21 - .../undo-system-functions.md | 273 --- .../universal-file-access-functions.md | 148 -- .../text-search-functions-and-operators.md | 535 ----- .../trigger-functions.md | 55 - .../type-conversion-functions-1.md | 723 ------ .../type-conversion-functions-2.md | 722 ------ .../type-conversion-functions.md | 12 - .../window-functions.md | 649 ------ .../functions-and-operators/xml-functions.md | 380 ---- .../guc-parameters/AI-features.md | 100 - .../guc-parameters/DCF-parameters-settings.md | 377 ---- .../guc-parameters/HyperLogLog.md | 104 - .../guc-parameters/MogDB-transaction.md | 151 -- .../guc-parameters/alarm-detection.md | 73 - .../guc-parameters/appendix.md | 30 - .../guc-parameters/auditing/audit-switch.md | 133 -- .../guc-parameters/auditing/auditing.md | 12 - .../auditing/operation-audit.md | 219 -- .../auditing/user-and-permission-audit.md | 108 - .../guc-parameters/automatic-vacuuming.md | 212 -- .../guc-parameters/backend-compression.md | 180 -- .../backup-and-restoration-parameter.md | 46 - .../guc-parameters/cm-parameters.md | 10 - .../communication-library-parameters.md | 126 -- .../connection-and-authentication.md | 12 - .../connection-settings.md | 275 --- .../security-and-authentication.md | 413 ---- .../connection-pool-parameters.md | 23 - .../guc-parameters/data-import-export.md | 89 - .../default-settings-of-client-connection.md | 12 - .../other-default-parameters.md | 54 - .../statement-behavior.md | 245 -- .../zone-and-formatting.md | 190 -- .../guc-parameters/delimiter.md | 18 - .../guc-parameters/developer-options.md | 480 ---- .../error-reporting-and-logging.md | 13 - .../logging-content.md | 373 --- .../logging-destination.md | 174 -- .../logging-time.md | 119 - .../using-csv-log-output.md | 88 - .../guc-parameters/fault-tolerance.md | 135 -- .../guc-parameters/file-location.md | 86 - .../guc-parameters/flashback.md | 66 - .../global-syscache-parameters.md | 39 - .../guc-parameters/global-temporary-table.md | 44 - .../guc-parameters/guc-parameter-list.md | 795 ------- .../guc-parameters/guc-parameter-usage.md | 18 - .../guc-user-defined-functions.md | 54 - .../ha-replication/ha-replication.md | 12 - .../ha-replication/primary-server.md | 326 --- .../ha-replication/sending-server.md | 257 --- .../ha-replication/standby-server.md | 191 -- .../guc-parameters/load-management.md | 439 ---- .../guc-parameters/lock-management.md | 157 -- .../miscellaneous-parameters.md | 371 --- ...multi-level-cache-management-parameters.md | 69 - ...o-efficient-data-compression-algorithms.md | 24 - .../query-planning/genetic-query-optimizer.md | 112 - .../optimizer-cost-constants.md | 155 -- .../optimizer-method-configuration.md | 409 ---- .../query-planning/other-optimizer-options.md | 863 ------- .../query-planning/query-planning.md | 21 - .../reference-guide/guc-parameters/query.md | 296 --- .../reference-guide-guc-parameters.md | 56 - ...on-parameters-of-two-database-instances.md | 18 - .../guc-parameters/reserved-parameters.md | 30 - .../asynchronous-io-operations.md | 151 -- .../resource-consumption/background-writer.md | 139 -- .../cost-based-vacuum-delay.md | 76 - .../resource-consumption/disk-space.md | 35 - .../kernel-resource-usage.md | 41 - .../resource-consumption/memory.md | 391 ---- .../resource-consumption.md | 15 - .../resource-pooling-parameters.md | 285 --- .../guc-parameters/rollback-parameters.md | 38 - .../guc-parameters/scheduled-task.md | 42 - .../guc-parameters/security-configuration.md | 80 - .../performance-statistics.md | 35 - .../query-and-index-statistics-collector.md | 152 -- .../statistics-during-the-database-running.md | 11 - .../system-performance-snapshot.md | 124 - .../guc-parameters/thread-pool.md | 112 - .../guc-parameters/upgrade-parameters.md | 24 - .../compatibility-with-earlier-versions.md | 149 -- .../platform-and-client-compatibility.md | 579 ----- .../version-and-platform-compatibility.md | 11 - .../guc-parameters/wait-events.md | 23 - .../write-ahead-log/archiving.md | 130 -- .../write-ahead-log/checkpoints.md | 131 -- .../write-ahead-log/log-replay.md | 185 -- .../write-ahead-log/settings.md | 337 --- .../write-ahead-log/write-ahead-log.md | 13 - ...-parameters-supported-by-standby-server.md | 25 - .../v5.2/reference-guide/reference-guide.md | 19 - .../schema/DB4AI-schema/DB4AI-schema.md | 22 - .../DB4AI-schema/DB4AI.ARCHIVE_SNAPSHOT.md | 18 - .../DB4AI-schema/DB4AI.CREATE_SNAPSHOT.md | 21 - .../DB4AI.CREATE_SNAPSHOT_INTERNAL.md | 21 - .../DB4AI.MANAGE_SNAPSHOT_INTERNAL.md | 19 - .../DB4AI-schema/DB4AI.PREPARE_SNAPSHOT.md | 21 - .../DB4AI.PREPARE_SNAPSHOT_INTERNAL.md | 27 - .../DB4AI-schema/DB4AI.PUBLISH_SNAPSHOT.md | 18 - .../DB4AI-schema/DB4AI.PURGE_SNAPSHOT.md | 18 - .../DB4AI.PURGE_SNAPSHOT_INTERNAL.md | 17 - .../DB4AI-schema/DB4AI.SAMPLE_SNAPSHOT.md | 22 - .../schema/DB4AI-schema/DB4AI.SNAPSHOT.md | 28 - .../schema/DBE_PERF/DBE_PERF.md | 33 - .../schema/DBE_PERF/cache-io/Cache-IO.md | 38 - .../cache-io/GLOBAL_STATIO_ALL_INDEXES.md | 23 - .../cache-io/GLOBAL_STATIO_ALL_SEQUENCES.md | 21 - .../cache-io/GLOBAL_STATIO_ALL_TABLES.md | 27 - .../cache-io/GLOBAL_STATIO_SYS_INDEXES.md | 23 - .../cache-io/GLOBAL_STATIO_SYS_SEQUENCES.md | 21 - .../cache-io/GLOBAL_STATIO_SYS_TABLES.md | 27 - .../cache-io/GLOBAL_STATIO_USER_INDEXES.md | 23 - .../cache-io/GLOBAL_STATIO_USER_SEQUENCES.md | 21 - .../cache-io/GLOBAL_STATIO_USER_TABLES.md | 27 - .../DBE_PERF/cache-io/GLOBAL_STAT_DB_CU.md | 20 - .../cache-io/GLOBAL_STAT_SESSION_CU.md | 18 - .../DBE_PERF/cache-io/STATIO_ALL_INDEXES.md | 22 - .../DBE_PERF/cache-io/STATIO_ALL_SEQUENCES.md | 20 - .../DBE_PERF/cache-io/STATIO_ALL_TABLES.md | 26 - .../DBE_PERF/cache-io/STATIO_SYS_INDEXES.md | 22 - .../DBE_PERF/cache-io/STATIO_SYS_SEQUENCES.md | 20 - .../DBE_PERF/cache-io/STATIO_SYS_TABLES.md | 26 - .../DBE_PERF/cache-io/STATIO_USER_INDEXES.md | 22 - .../cache-io/STATIO_USER_SEQUENCES.md | 20 - .../DBE_PERF/cache-io/STATIO_USER_TABLES.md | 26 - .../cache-io/SUMMARY_STATIO_ALL_INDEXES.md | 18 - .../cache-io/SUMMARY_STATIO_ALL_SEQUENCES.md | 19 - .../cache-io/SUMMARY_STATIO_ALL_TABLES.md | 25 - .../cache-io/SUMMARY_STATIO_SYS_INDEXES.md | 20 - .../cache-io/SUMMARY_STATIO_SYS_SEQUENCES.md | 19 - .../cache-io/SUMMARY_STATIO_SYS_TABLES.md | 25 - .../cache-io/SUMMARY_STATIO_USER_INDEXES.md | 20 - .../cache-io/SUMMARY_STATIO_USER_SEQUENCES.md | 19 - .../cache-io/SUMMARY_STATIO_USER_TABLES.md | 25 - .../DBE_PERF/configuration/CONFIG_SETTINGS.md | 31 - .../configuration/GLOBAL_CONFIG_SETTINGS.md | 32 - .../DBE_PERF/configuration/configuration.md | 11 - .../schema/DBE_PERF/file/FILE_IOSTAT.md | 28 - .../schema/DBE_PERF/file/FILE_REDO_IOSTAT.md | 22 - .../DBE_PERF/file/GLOBAL_FILE_IOSTAT.md | 29 - .../DBE_PERF/file/GLOBAL_FILE_REDO_IOSTAT.md | 23 - .../schema/DBE_PERF/file/GLOBAL_REL_IOSTAT.md | 20 - .../schema/DBE_PERF/file/LOCAL_REL_IOSTAT.md | 19 - .../DBE_PERF/file/SUMMARY_FILE_IOSTAT.md | 28 - .../DBE_PERF/file/SUMMARY_FILE_REDO_IOSTAT.md | 22 - .../DBE_PERF/file/SUMMARY_REL_IOSTAT.md | 19 - .../schema/DBE_PERF/file/file.md | 18 - .../GLOBAL_PLANCACHE_CLEAN.md | 10 - .../GLOBAL_PLANCACHE_STATUS.md | 23 - .../global-plancache/global-plancache.md | 13 - .../DBE_PERF/instance/GLOBAL_INSTANCE_TIME.md | 19 - .../schema/DBE_PERF/instance/INSTANCE_TIME.md | 29 - .../schema/DBE_PERF/instance/instance.md | 11 - .../schema/DBE_PERF/lock/GLOBAL_LOCKS.md | 29 - .../schema/DBE_PERF/lock/LOCKS.md | 34 - .../schema/DBE_PERF/lock/lock.md | 11 - .../memory/GLOBAL_MEMORY_NODE_DETAIL.md | 18 - .../memory/GLOBAL_SHARED_MEMORY_DETAIL.md | 22 - .../DBE_PERF/memory/MEMORY_NODE_DETAIL.md | 18 - .../schema/DBE_PERF/memory/memory-schema.md | 12 - .../object/GLOBAL_STAT_ALL_INDEXES.md | 24 - .../DBE_PERF/object/GLOBAL_STAT_ALL_TABLES.md | 37 - .../DBE_PERF/object/GLOBAL_STAT_BAD_BLOCK.md | 23 - .../DBE_PERF/object/GLOBAL_STAT_DATABASE.md | 35 - .../object/GLOBAL_STAT_DATABASE_CONFLICTS.md | 23 - .../object/GLOBAL_STAT_SYS_INDEXES.md | 24 - .../DBE_PERF/object/GLOBAL_STAT_SYS_TABLES.md | 37 - .../object/GLOBAL_STAT_USER_FUNCTIONS.md | 22 - .../object/GLOBAL_STAT_USER_INDEXES.md | 24 - .../object/GLOBAL_STAT_USER_TABLES.md | 37 - .../object/GLOBAL_STAT_XACT_ALL_TABLES.md | 27 - .../object/GLOBAL_STAT_XACT_SYS_TABLES.md | 27 - .../object/GLOBAL_STAT_XACT_USER_FUNCTIONS.md | 22 - .../object/GLOBAL_STAT_XACT_USER_TABLES.md | 27 - .../DBE_PERF/object/STAT_ALL_INDEXES.md | 23 - .../schema/DBE_PERF/object/STAT_ALL_TABLES.md | 36 - .../schema/DBE_PERF/object/STAT_BAD_BLOCK.md | 24 - .../schema/DBE_PERF/object/STAT_DATABASE.md | 34 - .../object/STAT_DATABASE_CONFLICTS.md | 22 - .../DBE_PERF/object/STAT_SYS_INDEXES.md | 23 - .../schema/DBE_PERF/object/STAT_SYS_TABLES.md | 36 - .../DBE_PERF/object/STAT_USER_FUNCTIONS.md | 21 - .../DBE_PERF/object/STAT_USER_INDEXES.md | 23 - .../DBE_PERF/object/STAT_USER_TABLES.md | 36 - .../DBE_PERF/object/STAT_XACT_ALL_TABLES.md | 26 - .../DBE_PERF/object/STAT_XACT_SYS_TABLES.md | 26 - .../object/STAT_XACT_USER_FUNCTIONS.md | 21 - .../DBE_PERF/object/STAT_XACT_USER_TABLES.md | 26 - .../object/SUMMARY_STAT_ALL_INDEXES.md | 21 - .../object/SUMMARY_STAT_ALL_TABLES.md | 35 - .../DBE_PERF/object/SUMMARY_STAT_BAD_BLOCK.md | 22 - .../DBE_PERF/object/SUMMARY_STAT_DATABASE.md | 33 - .../object/SUMMARY_STAT_DATABASE_CONFLICTS.md | 21 - .../object/SUMMARY_STAT_SYS_INDEXES.md | 21 - .../object/SUMMARY_STAT_SYS_TABLES.md | 35 - .../object/SUMMARY_STAT_USER_FUNCTIONS.md | 20 - .../object/SUMMARY_STAT_USER_INDEXES.md | 21 - .../object/SUMMARY_STAT_USER_TABLES.md | 35 - .../object/SUMMARY_STAT_XACT_ALL_TABLES.md | 25 - .../object/SUMMARY_STAT_XACT_SYS_TABLES.md | 25 - .../SUMMARY_STAT_XACT_USER_FUNCTIONS.md | 20 - .../object/SUMMARY_STAT_XACT_USER_TABLES.md | 25 - .../schema/DBE_PERF/object/object-schema.md | 51 - .../operator/GLOBAL_OPERATOR_HISTORY.md | 37 - .../operator/GLOBAL_OPERATOR_HISTORY_TABLE.md | 10 - .../operator/GLOBAL_OPERATOR_RUNTIME.md | 38 - .../DBE_PERF/operator/OPERATOR_HISTORY.md | 10 - .../operator/OPERATOR_HISTORY_TABLE.md | 37 - .../DBE_PERF/operator/OPERATOR_RUNTIME.md | 38 - .../DBE_PERF/operator/operator-schema.md | 15 - .../schema/DBE_PERF/os/GLOBAL_OS_RUNTIME.md | 21 - .../schema/DBE_PERF/os/GLOBAL_OS_THREADS.md | 20 - .../schema/DBE_PERF/os/OS_RUNTIME.md | 20 - .../schema/DBE_PERF/os/OS_THREADS.md | 20 - .../schema/DBE_PERF/os/os-schema.md | 13 - .../query/GLOBAL_SLOW_QUERY_HISTORY.md | 10 - .../DBE_PERF/query/GLOBAL_SLOW_QUERY_INFO.md | 10 - .../query/GLOBAL_STATEMENT_COMPLEX_HISTORY.md | 83 - .../GLOBAL_STATEMENT_COMPLEX_HISTORY_TABLE.md | 10 - .../query/GLOBAL_STATEMENT_COMPLEX_RUNTIME.md | 63 - .../DBE_PERF/query/GLOBAL_STATEMENT_COUNT.md | 41 - .../DBE_PERF/query/GS_SLOW_QUERY_HISTORY.md | 10 - .../DBE_PERF/query/GS_SLOW_QUERY_INFO.md | 43 - .../schema/DBE_PERF/query/STATEMENT.md | 81 - .../query/STATEMENT_COMPLEX_HISTORY.md | 10 - .../query/STATEMENT_COMPLEX_HISTORY_TABLE.md | 10 - .../query/STATEMENT_COMPLEX_RUNTIME.md | 63 - .../schema/DBE_PERF/query/STATEMENT_COUNT.md | 44 - .../DBE_PERF/query/STATEMENT_HISTORY_query.md | 98 - .../STATEMENT_RESPONSETIME_PERCENTILE.md | 17 - .../STATEMENT_WLMSTAT_COMPLEX_RUNTIME.md | 38 - .../DBE_PERF/query/SUMMARY_STATEMENT.md | 56 - .../DBE_PERF/query/SUMMARY_STATEMENT_COUNT.md | 40 - .../schema/DBE_PERF/query/query-schema.md | 27 - .../schema/DBE_PERF/rto/RTO-RPO.md | 12 - .../schema/DBE_PERF/rto/global_rto_status.md | 17 - .../global_streaming_hadr_rto_and_rpo_stat.md | 23 - .../rto/gs_hadr_local_rto_and_rpo_stat.md | 27 - .../session-thread/GLOBAL_SESSION_MEMORY.md | 20 - .../GLOBAL_SESSION_MEMORY_DETAIL.md | 24 - .../session-thread/GLOBAL_SESSION_STAT.md | 21 - .../GLOBAL_SESSION_STAT_ACTIVITY.md | 37 - .../session-thread/GLOBAL_SESSION_TIME.md | 20 - .../GLOBAL_THREADPOOL_STATUS.md | 10 - .../GLOBAL_THREAD_WAIT_STATUS.md | 31 - .../session-thread/LOCAL_ACTIVE_SESSION.md | 47 - .../session-thread/LOCAL_THREADPOOL_STATUS.md | 24 - .../session-thread/SESSION_CPU_RUNTIME.md | 25 - .../DBE_PERF/session-thread/SESSION_MEMORY.md | 19 - .../session-thread/SESSION_MEMORY_DETAIL.md | 23 - .../session-thread/SESSION_MEMORY_RUNTIME.md | 25 - .../DBE_PERF/session-thread/SESSION_STAT.md | 20 - .../session-thread/SESSION_STAT_ACTIVITY.md | 36 - .../DBE_PERF/session-thread/SESSION_TIME.md | 19 - .../STATEMENT_IOSTAT_COMPLEX_RUNTIME.md | 24 - .../session-thread/THREAD_WAIT_STATUS.md | 27 - .../DBE_PERF/session-thread/session-thread.md | 27 - .../GLOBAL_TRANSACTIONS_PREPARED_XACTS.md | 20 - .../GLOBAL_TRANSACTIONS_RUNNING_XACTS.md | 25 - .../SUMMARY_TRANSACTIONS_PREPARED_XACTS.md | 20 - .../SUMMARY_TRANSACTIONS_RUNNING_XACTS.md | 25 - .../TRANSACTIONS_PREPARED_XACTS.md | 20 - .../transaction/TRANSACTIONS_RUNNING_XACTS.md | 25 - .../transaction/transaction-schema.md | 15 - .../schema/DBE_PERF/utility/BGWRITER_STAT.md | 26 - .../DBE_PERF/utility/CLASS_VITAL_INFO.md | 19 - .../DBE_PERF/utility/GLOBAL_BGWRITER_STAT.md | 27 - .../utility/GLOBAL_CANDIDATE_STATUS.md | 22 - .../DBE_PERF/utility/GLOBAL_CKPT_STATUS.md | 22 - .../utility/GLOBAL_DOUBLE_WRITE_STATUS.md | 27 - .../utility/GLOBAL_GET_BGWRITER_STATUS.md | 21 - .../utility/GLOBAL_PAGEWRITER_STATUS.md | 23 - .../utility/GLOBAL_RECORD_RESET_TIME.md | 17 - .../utility/GLOBAL_RECOVERY_STATUS.md | 24 - .../DBE_PERF/utility/GLOBAL_REDO_STATUS.md | 38 - .../utility/GLOBAL_REPLICATION_SLOTS.md | 26 - .../utility/GLOBAL_REPLICATION_STAT.md | 31 - .../utility/GLOBAL_SINGLE_FLUSH_DW_STATUS.md | 21 - .../DBE_PERF/utility/REPLICATION_SLOTS.md | 25 - .../DBE_PERF/utility/REPLICATION_STAT.md | 30 - .../DBE_PERF/utility/SUMMARY_USER_LOGIN.md | 20 - .../schema/DBE_PERF/utility/USER_LOGIN.md | 20 - .../schema/DBE_PERF/utility/utility.md | 27 - .../wait-events/GLOBAL_WAIT_EVENTS.md | 25 - .../DBE_PERF/wait-events/WAIT_EVENTS.md | 25 - .../wait-events/dbe-perf-wait-events.md | 11 - .../WLM_USER_RESOURCE_CONFIG.md | 25 - .../WLM_USER_RESOURCE_RUNTIME.md | 26 - .../workload-manager/workload-manager.md | 11 - .../workload/GLOBAL_USER_TRANSACTION.md | 29 - .../workload/GLOBAL_WORKLOAD_TRANSACTION.md | 29 - .../workload/SUMMARY_WORKLOAD_SQL_COUNT.md | 24 - .../SUMMARY_WORKLOAD_SQL_ELAPSE_TIME.md | 33 - .../workload/SUMMARY_WORKLOAD_TRANSACTION.md | 28 - .../DBE_PERF/workload/USER_TRANSACTION.md | 28 - .../DBE_PERF/workload/WORKLOAD_SQL_COUNT.md | 23 - .../workload/WORKLOAD_SQL_ELAPSE_TIME.md | 32 - .../DBE_PERF/workload/WORKLOAD_TRANSACTION.md | 28 - .../DBE_PERF/workload/workload-schema.md | 18 - .../DBE_PLDEBUGGER-schema.md | 191 -- .../DBE_PLDEBUGGER.abort.md | 23 - .../DBE_PLDEBUGGER.add_breakpoint.md | 18 - .../DBE_PLDEBUGGER.attach.md | 23 - .../DBE_PLDEBUGGER.backtrace.md | 20 - .../DBE_PLDEBUGGER.continue.md | 26 - .../DBE_PLDEBUGGER.delete_breakpoint.md | 17 - .../DBE_PLDEBUGGER.disable_breakpoint.md | 17 - .../DBE_PLDEBUGGER.enable_breakpoint.md | 17 - .../DBE_PLDEBUGGER.finish.md | 19 - .../DBE_PLDEBUGGER.info_breakpoints.md | 20 - .../DBE_PLDEBUGGER.info_code.md | 19 - .../DBE_PLDEBUGGER.info_locals.md | 21 - .../DBE_PLDEBUGGER.local_debug_server_info.md | 18 - .../DBE_PLDEBUGGER.next.md | 19 - .../DBE_PLDEBUGGER.print_var.md | 22 - .../DBE_PLDEBUGGER.set_var.md | 18 - .../DBE_PLDEBUGGER.step.md | 19 - .../DBE_PLDEBUGGER.turn_off.md | 24 - .../DBE_PLDEBUGGER.turn_on.md | 25 - .../DBE_PLDEVELOPER.gs_errors.md | 24 - .../DBE_PLDEVELOPER.gs_source.md | 36 - .../schema/DBE_PLDEVELOPER/DBE_PLDEVELOPER.md | 13 - .../DBE_SQL_UTIL-Schema.md | 17 - .../DBE_SQL_UTIL.create_abort_sql_patch.md | 22 - .../DBE_SQL_UTIL.create_hint_sql_patch.md | 22 - .../DBE_SQL_UTIL.disable_sql_patch.md | 19 - .../DBE_SQL_UTIL.drop_sql_patch.md | 19 - .../DBE_SQL_UTIL.enable_sql_patch.md | 19 - .../DBE_SQL_UTIL.show_sql_patch.md | 22 - .../INFORMATION_SCHEMA_CATALOG_NAME.md | 16 - .../_PG_FOREIGN_DATA_WRAPPERS.md | 22 - .../information-schema/_PG_FOREIGN_SERVERS.md | 24 - .../information-schema/_PG_FOREIGN_TABLES.md | 22 - .../_PG_FOREIGN_TABLE_COLUMNS.md | 19 - .../information-schema/_PG_USER_MAPPINGS.md | 22 - .../information-schema/information-schema.md | 25 - .../v5.2/reference-guide/schema/schema.md | 34 - .../reference-guide/sql-reference/alias.md | 54 - .../appendix/extended-functions.md | 36 - .../sql-reference/appendix/extended-syntax.md | 52 - .../gin-indexes/gin-indexes-introduction.md | 16 - .../appendix/gin-indexes/gin-indexes.md | 13 - .../gin-indexes/gin-tips-and-tricks.md | 26 - .../appendix/gin-indexes/implementation.md | 24 - .../appendix/gin-indexes/scalability.md | 50 - .../appendix/sql-reference-appendix.md | 12 - .../sql-reference/constant-and-macro.md | 24 - .../sql-reference/dcl-syntax-overview.md | 50 - .../sql-reference/ddl-syntax-overview.md | 228 -- .../sql-reference/dml-syntax-overview.md | 51 - .../expressions/array-expressions.md | 93 - .../expressions/condition-expressions.md | 225 -- .../sql-reference/expressions/expressions.md | 14 - .../expressions/row-expressions.md | 28 - .../expressions/simple-expressions.md | 147 -- .../expressions/subquery-expressions.md | 144 -- .../additional-features.md | 13 - .../gathering-document-statistics.md | 50 - .../manipulating-queries.md | 52 - .../manipulating-tsvector.md | 31 - .../additional-features/rewriting-queries.md | 68 - .../configuration-examples.md | 116 - .../controlling-text-search.md | 13 - .../highlighting-results.md | 66 - .../parsing-documents.md | 52 - .../parsing-queries.md | 74 - .../ranking-search-results.md | 108 - .../dictionaries/dictionaries-overview.md | 36 - .../dictionaries/dictionaries.md | 16 - .../dictionaries/ispell-dictionary.md | 49 - .../dictionaries/simple-dictionary.md | 64 - .../dictionaries/snowball-dictionary.md | 14 - .../dictionaries/stop-words.md | 33 - .../dictionaries/synonym-dictionary.md | 116 - .../dictionaries/thesaurus-dictionary.md | 92 - .../full-text-search/full-text-search.md | 18 - .../introduction/basic-text-matching.md | 56 - .../introduction/configurations.md | 21 - .../introduction/full-text-retrieval.md | 38 - .../full-text-search-introduction.md | 13 - .../introduction/what-is-a-document.md | 33 - .../full-text-search/limitations.md | 16 - .../sql-reference/full-text-search/parser.md | 108 - .../constraints-on-index-use.md | 44 - .../tables-and-indexes/creating-an-index.md | 65 - .../tables-and-indexes/searching-a-table.md | 91 - .../tables-and-indexes/tables-and-indexes.md | 12 - .../testing-a-configuration.md | 65 - .../testing-a-dictionary.md | 29 - .../testing-a-parser.md | 74 - .../testing-and-debugging-text-search.md | 12 - .../sql-reference/keywords/keywords-1.md | 451 ---- .../sql-reference/keywords/keywords-2.md | 489 ---- .../sql-reference/keywords/keywords.md | 12 - .../sql-reference/mogdb-sql.md | 37 - .../sql-reference/ordinary-table.md | 52 - .../sql-reference/partition-table.md | 1249 ---------- .../sql-reference-anonymous-block.md | 77 - .../sql-reference/sql-reference-contraints.md | 152 -- .../sql-reference/sql-reference-cursor.md | 348 --- .../sql-reference/sql-reference-index.md | 159 -- .../sql-reference/sql-reference-llvm.md | 90 - .../sql-reference/sql-reference-lock.md | 76 - .../sql-reference/sql-reference-trigger.md | 158 -- .../sql-reference/sql-reference.md | 33 - .../sql-reference/sub-query.md | 181 -- .../sql-reference/system-operation.md | 38 - .../transaction/sql-reference-transaction.md | 12 - .../transaction/transaction-auto-commit.md | 167 -- .../transaction/transaction-control.md | 30 - .../transaction/transaction-management.md | 173 -- .../sql-reference/type-base-value.md | 83 - .../type-conversion/functions.md | 103 - .../type-conversion/operators.md | 75 - .../type-conversion-overview.md | 52 - .../type-conversion/type-conversion.md | 14 - .../union-case-and-related-constructs.md | 205 -- .../type-conversion/value-storage.md | 36 - .../v5.2/reference-guide/sql-syntax/ABORT.md | 77 - .../sql-syntax/ALTER-AGGREGATE.md | 77 - .../sql-syntax/ALTER-AUDIT-POLICY.md | 102 - .../sql-syntax/ALTER-DATA-SOURCE.md | 115 - .../sql-syntax/ALTER-DATABASE.md | 147 -- .../sql-syntax/ALTER-DEFAULT-PRIVILEGES.md | 209 -- .../sql-syntax/ALTER-DIRECTORY.md | 48 - .../sql-syntax/ALTER-EVENT-TRIGGER.md | 57 - .../reference-guide/sql-syntax/ALTER-EVENT.md | 85 - .../sql-syntax/ALTER-EXTENSION.md | 155 -- .../sql-syntax/ALTER-FOREIGN-DATA-WRAPPER.md | 67 - .../sql-syntax/ALTER-FOREIGN-TABLE.md | 119 - .../sql-syntax/ALTER-FUNCTION.md | 206 -- .../sql-syntax/ALTER-GLOBAL-CONFIGURATION.md | 54 - .../reference-guide/sql-syntax/ALTER-GROUP.md | 61 - .../reference-guide/sql-syntax/ALTER-INDEX.md | 161 -- .../sql-syntax/ALTER-LANGUAGE.md | 37 - .../sql-syntax/ALTER-LARGE-OBJECT.md | 42 - .../sql-syntax/ALTER-MASKING-POLICY.md | 141 -- .../sql-syntax/ALTER-MATERIALIZED-VIEW.md | 80 - .../sql-syntax/ALTER-OPERATOR.md | 60 - .../sql-syntax/ALTER-PACKAGE.md | 50 - .../sql-syntax/ALTER-PROCEDURE.md | 196 -- .../sql-syntax/ALTER-PUBLICATION.md | 85 - .../sql-syntax/ALTER-RESOURCE-LABEL.md | 71 - .../sql-syntax/ALTER-RESOURCE-POOL.md | 124 - .../reference-guide/sql-syntax/ALTER-ROLE.md | 141 -- .../ALTER-ROW-LEVEL-SECURITY-POLICY.md | 115 - .../reference-guide/sql-syntax/ALTER-RULE.md | 46 - .../sql-syntax/ALTER-SCHEMA.md | 112 - .../sql-syntax/ALTER-SEQUENCE.md | 139 -- .../sql-syntax/ALTER-SERVER.md | 107 - .../sql-syntax/ALTER-SESSION.md | 91 - .../sql-syntax/ALTER-SUBSCRIPTION.md | 111 - .../sql-syntax/ALTER-SYNONYM.md | 75 - .../sql-syntax/ALTER-SYSTEM-KILL-SESSION.md | 56 - .../sql-syntax/ALTER-SYSTEM-SET.md | 80 - .../sql-syntax/ALTER-TABLE-PARTITION.md | 300 --- .../sql-syntax/ALTER-TABLE-SUBPARTITION.md | 219 -- .../reference-guide/sql-syntax/ALTER-TABLE.md | 779 ------- .../sql-syntax/ALTER-TABLESPACE.md | 128 -- .../ALTER-TEXT-SEARCH-CONFIGURATION.md | 181 -- .../ALTER-TEXT-SEARCH-DICTIONARY.md | 104 - .../sql-syntax/ALTER-TRIGGER.md | 55 - .../reference-guide/sql-syntax/ALTER-TYPE.md | 178 -- .../sql-syntax/ALTER-USER-MAPPING.md | 74 - .../reference-guide/sql-syntax/ALTER-USER.md | 124 - .../reference-guide/sql-syntax/ALTER-VIEW.md | 159 -- .../sql-syntax/ANALYZE-ANALYSE.md | 191 -- .../v5.2/reference-guide/sql-syntax/BEGIN.md | 63 - .../v5.2/reference-guide/sql-syntax/CALL.md | 85 - .../reference-guide/sql-syntax/CHECKPOINT.md | 37 - .../sql-syntax/CLEAN-CONNECTION.md | 77 - .../v5.2/reference-guide/sql-syntax/CLOSE.md | 46 - .../reference-guide/sql-syntax/CLUSTER.md | 123 - .../reference-guide/sql-syntax/COMMENT.md | 160 -- .../reference-guide/sql-syntax/COMMIT-END.md | 73 - .../sql-syntax/COMMIT-PREPARED.md | 50 - .../reference-guide/sql-syntax/CONNECT-BY.md | 210 -- .../v5.2/reference-guide/sql-syntax/COPY.md | 668 ------ .../sql-syntax/CREATE-AGGREGATE.md | 83 - .../sql-syntax/CREATE-AUDIT-POLICY.md | 109 - .../reference-guide/sql-syntax/CREATE-CAST.md | 87 - .../sql-syntax/CREATE-CLIENT-MASTER-KEY.md | 62 - .../CREATE-COLUMN-ENCRYPTION-KEY.md | 61 - .../sql-syntax/CREATE-DATA-SOURCE.md | 97 - .../sql-syntax/CREATE-DATABASE.md | 235 -- .../sql-syntax/CREATE-DIRECTORY.md | 59 - .../sql-syntax/CREATE-EVENT-TRIGGER.md | 107 - .../sql-syntax/CREATE-EVENT.md | 103 - .../sql-syntax/CREATE-EXTENSION.md | 60 - .../sql-syntax/CREATE-FOREIGN-DATA-WRAPPER.md | 53 - .../sql-syntax/CREATE-FOREIGN-TABLE.md | 182 -- .../sql-syntax/CREATE-FUNCTION.md | 326 --- .../sql-syntax/CREATE-GROUP.md | 67 - .../CREATE-INCREMENTAL-MATERIALIZED-VIEW.md | 72 - .../sql-syntax/CREATE-INDEX.md | 553 ----- .../sql-syntax/CREATE-LANGUAGE.md | 72 - .../sql-syntax/CREATE-MASKING-POLICY.md | 112 - .../sql-syntax/CREATE-MATERIALIZED-VIEW.md | 78 - .../sql-syntax/CREATE-MODEL.md | 86 - .../sql-syntax/CREATE-OPERATOR.md | 118 - .../sql-syntax/CREATE-PACKAGE.md | 115 - .../sql-syntax/CREATE-PROCEDURE.md | 152 -- .../sql-syntax/CREATE-PUBLICATION.md | 73 - .../sql-syntax/CREATE-RESOURCE-LABEL.md | 86 - .../sql-syntax/CREATE-RESOURCE-POOL.md | 151 -- .../reference-guide/sql-syntax/CREATE-ROLE.md | 298 --- .../CREATE-ROW-LEVEL-SECURITY-POLICY.md | 176 -- .../reference-guide/sql-syntax/CREATE-RULE.md | 79 - .../sql-syntax/CREATE-SCHEMA.md | 110 - .../sql-syntax/CREATE-SEQUENCE.md | 150 -- .../sql-syntax/CREATE-SERVER.md | 131 -- .../sql-syntax/CREATE-SUBSCRIPTION.md | 121 - .../sql-syntax/CREATE-SYNONYM.md | 116 - .../sql-syntax/CREATE-TABLE-AS.md | 238 -- .../sql-syntax/CREATE-TABLE-PARTITION.md | 1047 --------- .../sql-syntax/CREATE-TABLE-SUBPARTITION.md | 1087 --------- .../sql-syntax/CREATE-TABLE.md | 1191 ---------- .../sql-syntax/CREATE-TABLESPACE.md | 133 -- .../CREATE-TEXT-SEARCH-CONFIGURATION.md | 97 - .../CREATE-TEXT-SEARCH-DICTIONARY.md | 153 -- .../sql-syntax/CREATE-TRIGGER.md | 401 ---- .../reference-guide/sql-syntax/CREATE-TYPE.md | 312 --- .../sql-syntax/CREATE-USER-MAPPING.md | 86 - .../reference-guide/sql-syntax/CREATE-USER.md | 135 -- .../reference-guide/sql-syntax/CREATE-VIEW.md | 119 - .../CREATE-WEAK-PASSWORD-DICTIONARY.md | 55 - .../v5.2/reference-guide/sql-syntax/CURSOR.md | 78 - .../reference-guide/sql-syntax/DEALLOCATE.md | 39 - .../reference-guide/sql-syntax/DECLARE.md | 102 - .../v5.2/reference-guide/sql-syntax/DELETE.md | 175 -- .../reference-guide/sql-syntax/DELIMITER.md | 59 - .../v5.2/reference-guide/sql-syntax/DO.md | 56 - .../sql-syntax/DROP-AGGREGATE.md | 57 - .../sql-syntax/DROP-AUDIT-POLICY.md | 39 - .../reference-guide/sql-syntax/DROP-CAST.md | 57 - .../sql-syntax/DROP-CLIENT-MASTER-KEY.md | 51 - .../sql-syntax/DROP-COLUMN-ENCRYPTION-KEY.md | 44 - .../sql-syntax/DROP-DATA-SOURCE.md | 58 - .../sql-syntax/DROP-DATABASE.md | 49 - .../sql-syntax/DROP-DIRECTORY.md | 45 - .../sql-syntax/DROP-EVENT-TRIGGER.md | 48 - .../reference-guide/sql-syntax/DROP-EVENT.md | 39 - .../sql-syntax/DROP-EXTENSION.md | 52 - .../sql-syntax/DROP-FOREIGN-DATA-WRAPPER.md | 42 - .../sql-syntax/DROP-FOREIGN-TABLE.md | 45 - .../sql-syntax/DROP-FUNCTION.md | 82 - .../sql-syntax/DROP-GLOBAL-CONFIGURATION.md | 28 - .../reference-guide/sql-syntax/DROP-GROUP.md | 33 - .../reference-guide/sql-syntax/DROP-INDEX.md | 59 - .../sql-syntax/DROP-LANGUAGE.md | 49 - .../sql-syntax/DROP-MASKING-POLICY.md | 45 - .../sql-syntax/DROP-MATERIALIZED-VIEW.md | 49 - .../reference-guide/sql-syntax/DROP-MODEL.md | 35 - .../sql-syntax/DROP-OPERATOR.md | 11 - .../reference-guide/sql-syntax/DROP-OWNED.md | 41 - .../sql-syntax/DROP-PACKAGE.md | 31 - .../sql-syntax/DROP-PROCEDURE.md | 55 - .../sql-syntax/DROP-PUBLICATION.md | 45 - .../sql-syntax/DROP-RESOURCE-LABEL.md | 45 - .../sql-syntax/DROP-RESOURCE-POOL.md | 45 - .../reference-guide/sql-syntax/DROP-ROLE.md | 43 - .../DROP-ROW-LEVEL-SECURITY-POLICY.md | 58 - .../reference-guide/sql-syntax/DROP-RULE.md | 48 - .../reference-guide/sql-syntax/DROP-SCHEMA.md | 52 - .../sql-syntax/DROP-SEQUENCE.md | 56 - .../reference-guide/sql-syntax/DROP-SERVER.md | 42 - .../sql-syntax/DROP-SUBSCRIPTION.md | 42 - .../sql-syntax/DROP-SYNONYM.md | 46 - .../reference-guide/sql-syntax/DROP-TABLE.md | 56 - .../sql-syntax/DROP-TABLESPACE.md | 47 - .../DROP-TEXT-SEARCH-CONFIGURATION.md | 49 - .../sql-syntax/DROP-TEXT-SEARCH-DICTIONARY.md | 58 - .../sql-syntax/DROP-TRIGGER.md | 60 - .../reference-guide/sql-syntax/DROP-TYPE.md | 49 - .../sql-syntax/DROP-USER-MAPPING.md | 39 - .../reference-guide/sql-syntax/DROP-USER.md | 60 - .../reference-guide/sql-syntax/DROP-VIEW.md | 48 - .../DROP-WEAK-PASSWORD-DICTIONARY.md | 35 - .../sql-syntax/EXECUTE-DIRECT.md | 84 - .../reference-guide/sql-syntax/EXECUTE.md | 60 - .../sql-syntax/EXPLAIN-PLAN.md | 85 - .../reference-guide/sql-syntax/EXPLAIN.md | 244 -- .../v5.2/reference-guide/sql-syntax/FETCH.md | 219 -- .../v5.2/reference-guide/sql-syntax/GRANT.md | 650 ------ .../v5.2/reference-guide/sql-syntax/INSERT.md | 277 --- .../v5.2/reference-guide/sql-syntax/LOCK.md | 137 -- .../reference-guide/sql-syntax/MERGE-INTO.md | 163 -- .../v5.2/reference-guide/sql-syntax/MOVE.md | 82 - .../reference-guide/sql-syntax/PREDICT-BY.md | 48 - .../sql-syntax/PREPARE-TRANSACTION.md | 44 - .../reference-guide/sql-syntax/PREPARE.md | 47 - .../v5.2/reference-guide/sql-syntax/PURGE.md | 117 - .../sql-syntax/REASSIGN-OWNED.md | 40 - .../REFRESH-INCREMENTAL-MATERIALIZED-VIEW.md | 47 - .../sql-syntax/REFRESH-MATERIALIZED-VIEW.md | 51 - .../reference-guide/sql-syntax/REINDEX.md | 165 -- .../sql-syntax/RELEASE-SAVEPOINT.md | 70 - .../v5.2/reference-guide/sql-syntax/RESET.md | 69 - .../v5.2/reference-guide/sql-syntax/REVOKE.md | 271 --- .../sql-syntax/ROLLBACK-PREPARED.md | 34 - .../sql-syntax/ROLLBACK-TO-SAVEPOINT.md | 60 - .../reference-guide/sql-syntax/ROLLBACK.md | 45 - .../reference-guide/sql-syntax/SAVEPOINT.md | 99 - .../reference-guide/sql-syntax/SELECT-INTO.md | 91 - .../v5.2/reference-guide/sql-syntax/SELECT.md | 946 -------- .../sql-syntax/SET-CONSTRAINTS.md | 60 - .../reference-guide/sql-syntax/SET-ROLE.md | 75 - .../sql-syntax/SET-SESSION-AUTHORIZATION.md | 79 - .../sql-syntax/SET-TRANSACTION.md | 79 - .../v5.2/reference-guide/sql-syntax/SET.md | 268 --- .../reference-guide/sql-syntax/SHOW-EVENTS.md | 46 - .../v5.2/reference-guide/sql-syntax/SHOW.md | 52 - .../v5.2/reference-guide/sql-syntax/SHRINK.md | 59 - .../reference-guide/sql-syntax/SHUTDOWN.md | 51 - .../reference-guide/sql-syntax/SNAPSHOT.md | 111 - .../sql-syntax/START-TRANSACTION.md | 89 - .../sql-syntax/TIMECAPSULE-TABLE.md | 129 -- .../reference-guide/sql-syntax/TRUNCATE.md | 131 -- .../v5.2/reference-guide/sql-syntax/UPDATE.md | 207 -- .../v5.2/reference-guide/sql-syntax/VACUUM.md | 118 - .../v5.2/reference-guide/sql-syntax/VALUES.md | 68 - .../reference-guide/sql-syntax/sql-syntax.md | 225 -- .../supported-data-types/HLL.md | 210 -- .../supported-data-types/binary-data-types.md | 54 - .../supported-data-types/bit-string-types.md | 51 - .../boolean-data-types.md | 59 - .../character-data-types.md | 90 - .../data-type-used-by-the-ledger-database.md | 31 - ...-types-supported-by-column-store-tables.md | 233 -- .../supported-data-types/date-time-types.md | 406 ---- .../supported-data-types/geometric.md | 118 - .../supported-data-types/json-types.md | 154 -- .../supported-data-types/monetary.md | 34 - .../supported-data-types/network-address.md | 67 - .../numeric-data-types.md | 263 --- .../object-identifier-types.md | 90 - .../supported-data-types/pseudo-types.md | 66 - .../supported-data-types/range.md | 163 -- .../supported-data-types/set-type.md | 86 - .../supported-data-types.md | 31 - .../supported-data-types/text-search-types.md | 168 -- .../supported-data-types/uuid-type.md | 27 - .../supported-data-types/xml-type.md | 68 - ...iew-of-system-catalogs-and-system-views.md | 22 - .../system-catalogs-and-system-views.md | 15 - .../system-catalogs/GS_ASP.md | 46 - .../system-catalogs/GS_AUDITING_POLICY.md | 20 - .../GS_AUDITING_POLICY_ACCESS.md | 20 - .../GS_AUDITING_POLICY_FILTERS.md | 21 - .../GS_AUDITING_POLICY_PRIVILEGES.md | 20 - .../system-catalogs/GS_CLIENT_GLOBAL_KEYS.md | 21 - .../GS_CLIENT_GLOBAL_KEYS_ARGS.md | 20 - .../system-catalogs/GS_COLUMN_KEYS.md | 23 - .../system-catalogs/GS_COLUMN_KEYS_ARGS.md | 20 - .../system-catalogs/GS_DB_PRIVILEGE.md | 19 - .../system-catalogs/GS_ENCRYPTED_COLUMNS.md | 23 - .../system-catalogs/GS_ENCRYPTED_PROC.md | 20 - .../system-catalogs/GS_GLOBAL_CHAIN.md | 25 - .../system-catalogs/GS_GLOBAL_CONFIG.md | 17 - .../system-catalogs/GS_MASKING_POLICY.md | 20 - .../GS_MASKING_POLICY_ACTIONS.md | 21 - .../GS_MASKING_POLICY_FILTERS.md | 21 - .../system-catalogs/GS_MATVIEW.md | 21 - .../system-catalogs/GS_MATVIEW_DEPENDENCY.md | 20 - .../system-catalogs/GS_MODEL_WAREHOUSE.md | 38 - .../system-catalogs/GS_OPT_MODEL.md | 33 - .../system-catalogs/GS_PACKAGE.md | 24 - .../system-catalogs/GS_POLICY_LABEL.md | 24 - .../system-catalogs/GS_RECYCLEBIN.md | 35 - .../system-catalogs/GS_TXN_SNAPSHOT.md | 19 - .../system-catalogs/GS_UID.md | 17 - .../GS_WLM_EC_OPERATOR_INFO.md | 29 - .../GS_WLM_INSTANCE_HISTORY.md | 30 - .../system-catalogs/GS_WLM_OPERATOR_INFO.md | 37 - .../GS_WLM_PLAN_ENCODING_TABLE.md | 23 - .../GS_WLM_PLAN_OPERATOR_INFO.md | 32 - .../GS_WLM_SESSION_QUERY_INFO_ALL.md | 102 - .../GS_WLM_USER_RESOURCE_HISTORY.md | 33 - .../system-catalogs/PGXC_CLASS.md | 25 - .../system-catalogs/PGXC_GROUP.md | 24 - .../system-catalogs/PGXC_NODE.md | 32 - .../system-catalogs/PGXC_SLICE.md | 30 - .../system-catalogs/PG_AGGREGATE.md | 25 - .../system-catalogs/PG_AM.md | 47 - .../system-catalogs/PG_AMOP.md | 28 - .../system-catalogs/PG_AMPROC.md | 23 - .../PG_APP_WORKLOADGROUP_MAPPING.md | 18 - .../system-catalogs/PG_ATTRDEF.md | 23 - .../system-catalogs/PG_ATTRIBUTE.md | 39 - .../system-catalogs/PG_AUTHID.md | 45 - .../system-catalogs/PG_AUTH_HISTORY.md | 19 - .../system-catalogs/PG_AUTH_MEMBERS.md | 19 - .../system-catalogs/PG_CAST.md | 22 - .../system-catalogs/PG_CLASS.md | 78 - .../system-catalogs/PG_COLLATION.md | 24 - .../system-catalogs/PG_CONSTRAINT.md | 48 - .../system-catalogs/PG_CONVERSION.md | 23 - .../system-catalogs/PG_DATABASE.md | 31 - .../system-catalogs/PG_DB_ROLE_SETTING.md | 18 - .../system-catalogs/PG_DEFAULT_ACL.md | 20 - .../system-catalogs/PG_DEPEND.md | 32 - .../system-catalogs/PG_DESCRIPTION.md | 21 - .../system-catalogs/PG_DIRECTORY.md | 20 - .../system-catalogs/PG_ENUM.md | 23 - .../system-catalogs/PG_EVENT_TRIGGER.md | 21 - .../system-catalogs/PG_EXTENSION.md | 23 - .../PG_EXTENSION_DATA_SOURCE.md | 22 - .../PG_FOREIGN_DATA_WRAPPER.md | 22 - .../system-catalogs/PG_FOREIGN_SERVER.md | 23 - .../system-catalogs/PG_FOREIGN_TABLE.md | 19 - .../system-catalogs/PG_HASHBUCKET.md | 21 - .../system-catalogs/PG_INDEX.md | 35 - .../system-catalogs/PG_INHERITS.md | 18 - .../system-catalogs/PG_JOB.md | 36 - .../system-catalogs/PG_JOB_PROC.md | 19 - .../system-catalogs/PG_LANGUAGE.md | 24 - .../system-catalogs/PG_LARGEOBJECT.md | 22 - .../PG_LARGEOBJECT_METADATA.md | 18 - .../system-catalogs/PG_NAMESPACE.md | 23 - .../system-catalogs/PG_OBJECT.md | 30 - .../system-catalogs/PG_OPCLASS.md | 28 - .../system-catalogs/PG_OPERATOR.md | 30 - .../system-catalogs/PG_OPFAMILY.md | 24 - .../system-catalogs/PG_PARTITION.md | 47 - .../system-catalogs/PG_PLTEMPLATE.md | 23 - .../system-catalogs/PG_PROC.md | 55 - .../system-catalogs/PG_PUBLICATION.md | 21 - .../system-catalogs/PG_PUBLICATION_REL.md | 18 - .../system-catalogs/PG_RANGE.md | 23 - .../system-catalogs/PG_REPLICATION_ORIGIN.md | 17 - .../system-catalogs/PG_RESOURCE_POOL.md | 31 - .../system-catalogs/PG_REWRITE.md | 24 - .../system-catalogs/PG_RLSPOLICY.md | 22 - .../system-catalogs/PG_SECLABEL.md | 22 - .../system-catalogs/PG_SET.md | 19 - .../system-catalogs/PG_SHDEPEND.md | 45 - .../system-catalogs/PG_SHDESCRIPTION.md | 22 - .../system-catalogs/PG_SHSECLABEL.md | 25 - .../system-catalogs/PG_STATISTIC.md | 30 - .../system-catalogs/PG_STATISTIC_EXT.md | 30 - .../system-catalogs/PG_SUBSCRIPTION.md | 26 - .../system-catalogs/PG_SUBSCRIPTION_REL.md | 22 - .../system-catalogs/PG_SYNONYM.md | 21 - .../system-catalogs/PG_TABLESPACE.md | 22 - .../system-catalogs/PG_TRIGGER.md | 35 - .../system-catalogs/PG_TS_CONFIG.md | 23 - .../system-catalogs/PG_TS_CONFIG_MAP.md | 19 - .../system-catalogs/PG_TS_DICT.md | 23 - .../system-catalogs/PG_TS_PARSER.md | 23 - .../system-catalogs/PG_TS_TEMPLATE.md | 20 - .../system-catalogs/PG_TYPE.md | 46 - .../system-catalogs/PG_USER_MAPPING.md | 21 - .../system-catalogs/PG_USER_STATUS.md | 23 - .../system-catalogs/PG_WORKLOAD_GROUP.md | 19 - .../system-catalogs/PLAN_TABLE_DATA.md | 32 - .../system-catalogs/STATEMENT_HISTORY.md | 73 - .../system-catalogs/system-catalogs.md | 120 - .../system-views/GET_GLOBAL_PREPARED_XACTS.md | 10 - .../GS_ASYNC_SUBMIT_SESSIONS_STATUS.md | 25 - .../system-views/GS_AUDITING.md | 22 - .../system-views/GS_AUDITING_ACCESS.md | 22 - .../system-views/GS_AUDITING_PRIVILEGE.md | 22 - .../system-views/GS_CLUSTER_RESOURCE_INFO.md | 22 - .../system-views/GS_COMPRESSION.md | 24 - .../system-views/GS_DB_PRIVILEGES.md | 18 - .../system-views/GS_FILE_STAT.md | 28 - .../system-views/GS_GSC_MEMORY_DETAIL.md | 19 - .../system-views/GS_INSTANCE_TIME.md | 29 - .../system-views/GS_LABELS.md | 19 - .../system-views/GS_LSC_MEMORY_DETAIL.md | 24 - .../system-views/GS_MASKING.md | 19 - .../system-views/GS_MATVIEWS.md | 21 - .../system-views/GS_OS_RUN_INFO.md | 20 - .../system-views/GS_REDO_STAT.md | 22 - .../system-views/GS_SESSION_CPU_STATISTICS.md | 25 - .../system-views/GS_SESSION_MEMORY.md | 19 - .../system-views/GS_SESSION_MEMORY_CONTEXT.md | 25 - .../system-views/GS_SESSION_MEMORY_DETAIL.md | 27 - .../GS_SESSION_MEMORY_STATISTICS.md | 25 - .../system-views/GS_SESSION_STAT.md | 20 - .../system-views/GS_SESSION_TIME.md | 19 - .../system-views/GS_SHARED_MEMORY_DETAIL.md | 21 - .../system-views/GS_SQL_COUNT.md | 45 - .../system-views/GS_STAT_SESSION_CU.md | 18 - .../system-views/GS_THREAD_MEMORY_CONTEXT.md | 26 - .../system-views/GS_TOTAL_MEMORY_DETAIL.md | 18 - .../system-views/GS_WLM_CGROUP_INFO.md | 24 - .../GS_WLM_EC_OPERATOR_STATISTICS.md | 25 - .../system-views/GS_WLM_OPERATOR_HISTORY.md | 12 - .../GS_WLM_OPERATOR_STATISTICS.md | 38 - .../GS_WLM_PLAN_OPERATOR_HISTORY.md | 35 - .../GS_WLM_REBUILD_USER_RESOURCE_POOL.md | 16 - .../system-views/GS_WLM_RESOURCE_POOL.md | 25 - .../system-views/GS_WLM_SESSION_HISTORY.md | 83 - .../system-views/GS_WLM_SESSION_INFO.md | 12 - .../system-views/GS_WLM_SESSION_INFO_ALL.md | 102 - .../system-views/GS_WLM_SESSION_STATISTICS.md | 64 - .../system-views/GS_WLM_USER_INFO.md | 25 - .../system-views/MPP_TABLES.md | 21 - .../system-views/PATCH_INFORMATION_TABLE.md | 20 - .../system-views/PGXC_PREPARED_XACTS.md | 16 - .../system-views/PG_AVAILABLE_EXTENSIONS.md | 19 - .../PG_AVAILABLE_EXTENSION_VERSIONS.md | 23 - .../system-views/PG_COMM_DELAY.md | 22 - .../system-views/PG_COMM_RECV_STREAM.md | 32 - .../system-views/PG_COMM_SEND_STREAM.md | 32 - .../system-views/PG_COMM_STATUS.md | 28 - .../system-views/PG_CONTROL_GROUP_CONFIG.md | 16 - .../system-views/PG_CURSORS.md | 21 - .../system-views/PG_EXT_STATS.md | 28 - .../system-views/PG_GET_INVALID_BACKENDS.md | 20 - .../PG_GET_SENDERS_CATCHUP_TIME.md | 23 - .../system-views/PG_GROUP.md | 18 - .../system-views/PG_GTT_ATTACHED_PIDS.md | 19 - .../system-views/PG_GTT_RELSTATS.md | 23 - .../system-views/PG_GTT_STATS.md | 29 - .../system-views/PG_INDEXES.md | 20 - .../system-views/PG_LOCKS.md | 34 - .../system-views/PG_NODE_ENV.md | 22 - .../system-views/PG_OS_THREADS.md | 20 - .../system-views/PG_PREPARED_STATEMENTS.md | 20 - .../system-views/PG_PREPARED_XACTS.md | 20 - .../system-views/PG_PUBLICATION_TABLES.md | 18 - .../PG_REPLICATION_ORIGIN_STATUS.md | 19 - .../system-views/PG_REPLICATION_SLOTS.md | 26 - .../system-views/PG_RLSPOLICIES.md | 22 - .../system-views/PG_ROLES.md | 42 - .../system-views/PG_RULES.md | 19 - .../system-views/PG_RUNNING_XACTS.md | 28 - .../system-views/PG_SECLABELS.md | 23 - .../system-views/PG_SESSION_IOSTAT.md | 27 - .../system-views/PG_SESSION_WLMSTAT.md | 39 - .../system-views/PG_SETTINGS.md | 31 - .../system-views/PG_SHADOW.md | 35 - .../system-views/PG_STATIO_ALL_INDEXES.md | 22 - .../system-views/PG_STATIO_ALL_SEQUENCES.md | 20 - .../system-views/PG_STATIO_ALL_TABLES.md | 26 - .../system-views/PG_STATIO_SYS_INDEXES.md | 22 - .../system-views/PG_STATIO_SYS_SEQUENCES.md | 20 - .../system-views/PG_STATIO_SYS_TABLES.md | 26 - .../system-views/PG_STATIO_USER_INDEXES.md | 22 - .../system-views/PG_STATIO_USER_SEQUENCES.md | 20 - .../system-views/PG_STATIO_USER_TABLES.md | 26 - .../system-views/PG_STATS.md | 30 - .../system-views/PG_STAT_ACTIVITY.md | 38 - .../system-views/PG_STAT_ACTIVITY_NG.md | 36 - .../system-views/PG_STAT_ALL_INDEXES.md | 25 - .../system-views/PG_STAT_ALL_TABLES.md | 37 - .../system-views/PG_STAT_BAD_BLOCK.md | 24 - .../system-views/PG_STAT_BGWRITER.md | 26 - .../system-views/PG_STAT_DATABASE.md | 34 - .../PG_STAT_DATABASE_CONFLICTS.md | 22 - .../system-views/PG_STAT_REPLICATION.md | 30 - .../system-views/PG_STAT_SUBSCRIPTION.md | 23 - .../system-views/PG_STAT_SYS_INDEXES.md | 23 - .../system-views/PG_STAT_SYS_TABLES.md | 37 - .../system-views/PG_STAT_USER_FUNCTIONS.md | 21 - .../system-views/PG_STAT_USER_INDEXES.md | 23 - .../system-views/PG_STAT_USER_TABLES.md | 37 - .../system-views/PG_STAT_XACT_ALL_TABLES.md | 26 - .../system-views/PG_STAT_XACT_SYS_TABLES.md | 26 - .../PG_STAT_XACT_USER_FUNCTIONS.md | 21 - .../system-views/PG_STAT_XACT_USER_TABLES.md | 26 - .../system-views/PG_TABLES.md | 25 - .../system-views/PG_TDE_INFO.md | 18 - .../system-views/PG_THREAD_WAIT_STATUS.md | 291 --- .../system-views/PG_TIMEZONE_ABBREVS.md | 18 - .../system-views/PG_TIMEZONE_NAMES.md | 19 - .../system-views/PG_TOTAL_MEMORY_DETAIL.md | 18 - .../PG_TOTAL_USER_RESOURCE_INFO.md | 32 - .../PG_TOTAL_USER_RESOURCE_INFO_OID.md | 32 - .../system-views/PG_USER.md | 34 - .../system-views/PG_USER_MAPPINGS.md | 23 - .../system-views/PG_VARIABLE_INFO.md | 26 - .../system-views/PG_VIEWS.md | 19 - .../system-views/PG_WLM_STATISTICS.md | 24 - .../system-views/PLAN_TABLE.md | 31 - .../system-views/system-views.md | 129 -- .../viewing-system-catalogs.md | 139 -- .../reference-guide/tool-reference/FAQ.md | 133 -- .../tool-reference/client-tool/client-tool.md | 12 - .../client-tool/gsql/client-tool-gsql.md | 18 - .../client-tool/gsql/command-reference.md | 68 - .../client-tool/gsql/gsql-faq.md | 221 -- .../client-tool/gsql/gsql-introduction.md | 142 -- .../client-tool/gsql/gsql-release-notes.md | 14 - .../gsql/meta-command-reference.md | 230 -- .../gsql/obtaining-help-information.md | 77 - .../client-tool/gsql/usage-guidelines.md | 201 -- .../functions-of-mogdb-executable-scripts.md | 54 - .../tool-reference/server-tools/gs_cgroup.md | 444 ---- .../tool-reference/server-tools/gs_check.md | 1161 ---------- .../tool-reference/server-tools/gs_checkos.md | 223 -- .../server-tools/gs_checkperf.md | 298 --- .../server-tools/gs_collector.md | 227 -- .../tool-reference/server-tools/gs_dump.md | 573 ----- .../tool-reference/server-tools/gs_dumpall.md | 292 --- .../tool-reference/server-tools/gs_encrypt.md | 136 -- .../tool-reference/server-tools/gs_guc.md | 398 ---- .../tool-reference/server-tools/gs_om.md | 398 ---- .../server-tools/gs_plan_simulator.md | 117 - .../tool-reference/server-tools/gs_restore.md | 405 ---- .../tool-reference/server-tools/gs_sdr.md | 425 ---- .../tool-reference/server-tools/gs_ssh.md | 76 - .../tool-reference/server-tools/gs_watch.md | 52 - .../server-tools/server-tools.md | 26 - ...ogs-and-views-supported-by-gs_collector.md | 375 --- .../tool-reference/tool-overview.md | 35 - .../tool-reference/tool-reference.md | 18 - .../dsscmd.md | 386 ---- .../dssserver.md | 296 --- .../gs_backup.md | 151 -- .../gs_basebackup.md | 164 -- .../gs_ctl.md | 183 -- .../gs_dropnode.md | 125 - .../gs_expansion.md | 152 -- .../gs_initdb.md | 132 -- .../gs_install.md | 127 -- .../gs_postuninstall.md | 119 - .../gs_preinstall.md | 309 --- .../gs_probackup.md | 786 ------- .../gs_sshexkey.md | 135 -- .../gs_tar.md | 49 - .../gs_uninstall.md | 80 - .../gs_upgradectl.md | 272 --- .../gstrace.md | 313 --- .../kadmin-local.md | 16 - .../kdb5_util.md | 16 - .../kdestroy.md | 16 - .../kinit.md | 16 - .../klist.md | 16 - .../krb5kdc.md | 16 - .../mogdb.md | 177 -- .../pg_archivecleanup.md | 58 - .../pg_config.md | 118 - .../pg_controldata.md | 70 - .../pg_recvlogical.md | 157 -- .../pg_resetxlog.md | 104 - .../tools-used-in-the-internal-system/pscp.md | 72 - .../tools-used-in-the-internal-system/pssh.md | 68 - .../tools-used-in-the-internal-system.md | 43 - .../transfer.py.md | 60 - .../v5.2/security-guide/security-guide.md | 10 - .../1-client-access-authentication.md | 820 ------- .../2-managing-users-and-their-permissions.md | 1136 ---------- .../security/3-configuring-database-audit.md | 470 ---- .../4-setting-encrypted-equality-query.md | 332 --- .../security/5-setting-a-ledger-database.md | 412 ---- .../security/6-transparent-data-encryption.md | 88 - .../security/database-security-management.md | 15 - .../zh/docs-mogdb/v5.2/source-code-parsing.md | 98 - product/zh/docs-mogdb/v5.2/toc.md | 1917 ---------------- product/zh/docs-mogdb/v5.2/toc_about.md | 18 - product/zh/docs-mogdb/v5.2/toc_ai-features.md | 89 - .../v5.2/toc_characteristic_description.md | 169 -- .../toc_common-faults-and-identification.md | 49 - .../v5.2/toc_communication-matrix.md | 5 - .../docs-mogdb/v5.2/toc_datatypes-and-sql.md | 313 --- product/zh/docs-mogdb/v5.2/toc_dev.md | 322 --- product/zh/docs-mogdb/v5.2/toc_error.md | 102 - .../v5.2/toc_extension-referecne.md | 24 - product/zh/docs-mogdb/v5.2/toc_faqs.md | 12 - product/zh/docs-mogdb/v5.2/toc_glossary.md | 5 - .../zh/docs-mogdb/v5.2/toc_high_available.md | 17 - product/zh/docs-mogdb/v5.2/toc_install.md | 14 - product/zh/docs-mogdb/v5.2/toc_manage.md | 83 - .../v5.2/toc_parameters-and-tools.md | 396 ---- product/zh/docs-mogdb/v5.2/toc_performance.md | 33 - product/zh/docs-mogdb/v5.2/toc_quickstart.md | 29 - product/zh/docs-mogdb/v5.2/toc_secure.md | 12 - .../v5.2/toc_system-catalogs-and-functions.md | 313 --- product/zh/docs-mogdb/v5.2/toc_upgrade.md | 10 - .../v5.2/upgrade-guide/1-upgrade-overview.md | 30 - .../upgrade-guide/2-read-before-upgrade.md | 72 - .../v5.2/upgrade-guide/3-in-place-upgrade.md | 128 -- .../v5.2/upgrade-guide/4-rolling-upgrade.md | 114 - .../v5.2/upgrade-guide/upgrade-guide.md | 13 - 3903 files changed, 482024 deletions(-) delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai-feature.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/abo-optimizer.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-best-practices.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-prerequisites.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/ai4db-adaptive-plan-selection.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/ai4db-intelligent-cardinality-estimation.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-best-practices.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-prerequisites.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-1-x-tuner-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-2-preparations.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-3-examples.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-4-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-5-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-6-Troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-1-single-query-index-recommendation.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-2-virtual-index.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-3-workload-level-index-recommendation.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-1-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-2-environment-deployment.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-3-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-4-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-5-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-6-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-1-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-2-environment-deployment.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-3-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-4-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-5-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-6-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-1-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-2-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-3-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-4-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-5-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-1-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-2-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-3-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-4-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-5-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-1-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-2-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-3-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-4-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-5-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/ai-sub-functions-of-the-dbmind.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-environment-deployment.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/index-advisor-index-recommendation.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/single-query-index-recommendation.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/virtual-index.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/workload-level-index-recommendation.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-environment-deployment.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-examples.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-parameter-optimization-and-diagnosis.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-preparations.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/ai4db-autonomous-database-o&m.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/components-that-support-dbmind.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-environment-deployment.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-troubleshooting.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-usage-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/1-service.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/2-component.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/3-set.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/component.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/dbmind-mode.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/service.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/set.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/db4ai/db4ai.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/db4ai-query-for-model-training-and-prediction.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/db4ai-snapshots-for-data-version-management.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/full-process-ai.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/plpython-fenced-mode.md delete mode 100644 product/en/docs-mogdb/v5.2/AI-features/db4ai/native-db4ai-engine.md delete mode 100644 product/en/docs-mogdb/v5.2/_index.md delete mode 100644 product/en/docs-mogdb/v5.2/about-mogdb/MogDB-compared-to-openGauss.md delete mode 100644 product/en/docs-mogdb/v5.2/about-mogdb/about-mogdb.md delete mode 100644 product/en/docs-mogdb/v5.2/about-mogdb/mogdb-new-feature/release-note.md delete mode 100644 product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/2-docker-based-mogdb.md delete mode 100644 product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/DBMS-RANDOM.md delete mode 100644 product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/compat-tools.md delete mode 100644 product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/mog_filedump.md delete mode 100644 product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/mog_xlogdump.md delete mode 100644 product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/mogdb-monitor.md delete mode 100644 product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/open-source-components.md delete mode 100644 product/en/docs-mogdb/v5.2/about-mogdb/terms-of-use.md delete mode 100644 product/en/docs-mogdb/v5.2/about-mogdb/usage-limitations.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/administrator-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/backup-and-restoration-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/backup-and-restoration.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/flashback-restoration.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/logical-backup-and-restoration.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/physical-backup-and-restoration.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/column-store-tables-management.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/common-primary-backup-deployment-scenarios.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/database-deployment-scenario.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-architecture.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-current-architectural-feature-constraints.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-developer-environment-deployment-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/two-city-three-dc-dr.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/1-using-gs_dump-and-gs_dumpall-to-export-data-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/2-exporting-a-single-database.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/3-exporting-all-databases.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/4-data-export-by-a-user-without-required-permissions.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/exporting-data.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-and-exporting-data.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/1-import-modes.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/10-managing-concurrent-write-operations.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/2-running-the-INSERT-statement-to-insert-data.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/3-running-the-COPY-FROM-STDIN-statement-to-import-data.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/4-using-a-gsql-meta-command-to-import-data.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/5-using-gs_restore-to-import-data.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/6-updating-data-in-a-table.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/7-deep-copy.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/8-ANALYZE-table.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/9-doing-VACUUM-to-a-table.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/importing-data.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/localization/character-set-support.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/localization/collation-support.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/localization/locale-support.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/localization/localization.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/1-mot-introduction.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/2-mot-features-and-benefits.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/3-mot-key-technologies.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/4-mot-usage-scenarios.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/5-mot-performance-benchmarks.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/introducing-mot.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/1-using-mot-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/2-mot-preparation.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/3-mot-deployment.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/4-mot-usage.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/5-mot-administration.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/6-mot-sample-tpcc-benchmark.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/using-mot.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-1.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-2.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-3.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-4.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-5.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-6.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-7.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-8.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-9.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/concepts-of-mot.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/1-references.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/2-glossary.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/mot-appendix.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/mot-engine.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/primary-and-standby-management.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/0-starting-and-stopping-mogdb.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/1-routine-maintenance-check-items.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/10-data-security-maintenance-suggestions.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/11-log-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/2-checking-os-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/3-checking-mogdb-health-status.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/4-checking-database-performance.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/5-checking-and-deleting-logs.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/6-checking-time-consistency.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/7-checking-the-number-of-application-connections.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/8-routinely-maintaining-tables.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/9-routinely-recreating-an-index.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/exporting-and-viewing-the-wdr.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/routine-maintenance.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/slow-sql-diagnosis.md delete mode 100644 product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/using-the-gsql-client-for-connection.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/adaptive-plan-selection.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/characteristic-description-abo-optimizer.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/intelligent-cardinality-estimation.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai-capabilities.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/1-database-metric-collection-forecast-and-exception-detection.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/2-root-cause-analysis-for-slow-sql-statements.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/3-index-recommendation.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/4-parameter-tuning-and-diagnosis.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/5-slow-sql-statement-discovery.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/characteristic-description-ai4db.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/db4ai-database-driven-ai.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/1-standard-sql.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/2-standard-development-interfaces.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/3-postgresql-api-compatibility.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/ECPG.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/MogDB-MySQL-compatibility.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/MogDB-Oracle-compatibility.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/application-development-interfaces.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/characteristic-description-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/characteristic-description.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/add-rowtype-attribute-to-the-view.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-distinct-performance-optimization.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-support-keep-clause.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-support-scenario-extensions.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/compatibility.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/compatible-with-mysql-alias-support-for-single-quotes.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/current_date-current_time-keywords-as-field-name.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/custom-type-array.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/for-update-supports-outer-join.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/format-error-backtrace.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/mogdb-supports-insert-all.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/oracle-dblink-syntax-compatibility.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/remove-type-conversion-hint-when-creating-package-function-procedure.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-bypass-method-when-merge-into-hit-index.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-for-adding-nocopy-attributes-to-procedure-and-function-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-for-constants-in-package-as-default-values.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-passing-the-count-attribute.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-plpgsql-subtype.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-q-quote-escape-character.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-subtracting-two-date-types-to-return-numeric-type.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-synonym-calls-without-parentheses-for-function-without-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-table-function.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-to-keep-the-same-name-after-the-end-with-oracle.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-where-current-of.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/1-access-control-model.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/10-row-level-access-control.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/11-password-strength-verification.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/12-equality-query-in-a-fully-encrypted-database.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/13-ledger-database-mechanism.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/14-transparent-data-encryption.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/2-separation-of-control-and-access-permissions.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/3-database-encryption-authentication.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/4-data-encryption-and-storage.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/5-database-audit.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/6-network-communication-security.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/7-resource-label.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/8-unified-audit.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/9-dynamic-data-anonymization.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/database-security/database-security.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/1-support-for-functions-and-stored-procedures.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/10-autonomous-transaction.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/11-global-temporary-table.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/12-pseudocolumn-rownum.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/13-stored-procedure-debugging.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/14-jdbc-client-load-balancing-and-readwrite-isolation.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/15-in-place-update-storage-engine.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/16-publication-subscription.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/17-foreign-key-lock-enhancement.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/18-data-compression-in-oltp-scenarios.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/19-transaction-async-submit.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/2-sql-hints.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/20-copy-import-optimization.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/21-dynamic-partition-pruning.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/22-sql-running-status-observation.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/23-index-creation-parallel-control.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/24-brin-index.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/25-bloom-index.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/3-full-text-indexing.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/4-copy-interface-for-error-tolerance.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/5-partitioning.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/6-support-for-advanced-analysis-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/7-materialized-view.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/8-hyperloglog.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/9-creating-an-index-online.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/enterprise-level-features.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/event-trigger.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/1-primary-standby.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/10-adding-or-deleting-a-standby-server.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/11-delaying-entering-the-maximum-availability-mode.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/12-parallel-logical-decoding.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/13-dcf.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/14-cm.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/15-global-syscache.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/16-using-a-standby-node-to-build-a-standby-node.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/17-two-city-three-dc-dr.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/2-logical-replication.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/4-logical-backup.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/5-physical-backup.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/6-automatic-job-retry-upon-failure.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/7-ultimate-rto.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/8-cascaded-standby-server.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/9-delayed-replay.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/cm-cluster-management-component-supporting-two-node-deployment.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/ddl-query-of-view.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/high-availability-based-on-the-paxos-protocol.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-availability/high-availability.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/1-cbo-optimizer.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/10-xlog-no-lock-flush.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/11-parallel-page-based-redo-for-ustore.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/12-row-store-execution-to-vectorized-execution.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/2-llvm.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/3-vectorized-engine.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/4-hybrid-row-column-store.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/5-adaptive-compression.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/7-kunpeng-numa-architecture-optimization.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/8-high-concurrency-of-thread-pools.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/9-smp-for-parallel-execution.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/adaptive-two-phase-aggregation.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/astore-row-level-compression.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/btree-index-compression.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/enhancement-of-tracing-backend-key-thread.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/enhancement-of-wal-redo-performance.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/high-performance.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/ock-accelerated-data-transmission.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/ock-scrlock-accelerate-distributed-lock.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/ordering-operator-optimization.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/parallel-index-scan.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/parallel-query-optimization.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/sql-bypass.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/high-performance/tracing-SQL-function.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/2-workload-diagnosis-report.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/3-slow-sql-diagnosis.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/4-session-performance-diagnosis.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/5-system-kpi-aided-diagnosis.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/built-in-stack-tool.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/dcf-module-tracing.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/error-when-writing-illegal-characters.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/extension-splitting.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/fault-diagnosis.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/light-lock-export-and-analysis.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/maintainability.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/pageinspect-pagehack.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/maintainability/sql-patch.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/middleware/deploying-a-distributed-database-using-kubernetes.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/middleware/distributed-analysis-capabilities.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/middleware/distributed-database-capability.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/middleware/middleware.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/workload-management/high-latency-escape-at-the-infrastructure-layer.md delete mode 100644 product/en/docs-mogdb/v5.2/characteristic-description/workload-management/workload-management.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-cluster-brain-split-fault.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-cluster-manual-failover.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-fault.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/common-fault-locating-cases.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-after-installation-on-x86.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-due-to-full-disk-space.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-due-to-incorrect-settings-of-guc-parameter-log-directory.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-when-removeipc-is-enabled.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-fault-locating.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/after-you-run-the-du-command.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/disk-space-usage-reaches-the-threshold.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/error-no-space-left-on-device-is-displayed.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/file-is-damaged-in-the-xfs-file-system.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/file-system-disk-memory.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/insufficient-memory.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/shared-memory-leak.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/when-the-tpcc-is-running.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/b-tree-index-faults.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/index-fault.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/reindexing-fails.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/when-a-user-specifies-only-an-index-name.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/an-error-occurs-during-integer-conversion.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/different-data-is-displayed.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/forcibly-terminating-a-session.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/permission-session-data-type.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/performance-deterioration.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/primary-node-is-hung-in-demoting.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/query-failure.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/service-ha-concurrency.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/service-startup-failure.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/standby-node-in-the-need-repair-state.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/too-many-clients-already.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/analyzing-the-status-of-a-query-statement.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/analyzing-whether-a-query-statement-is-blocked.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/lock-wait-timeout-is-displayed.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/low-query-efficiency.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/slow-response-to-a-query-statement.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/sql-fault.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/an-error-is-reported-when-the-table-partition-is-modified.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/table-partition-table.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/table-size-does-not-change.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-methods.md delete mode 100644 product/en/docs-mogdb/v5.2/common-faults-and-identification/common-faults-and-identification.md delete mode 100644 product/en/docs-mogdb/v5.2/communication-matrix.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/1-1-stored-procedure.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/1-introduction-to-autonomous-transaction.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/2-function-supporting-autonomous-transaction.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/3-stored-procedure-supporting-autonomous-transaction.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/4-restrictions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/anonymous-block-supporting-autonomous-transaction.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/1-development-based-on-jdbc-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/10-example-common-operations.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/11-example-retrying-sql-queries-for-applications.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/12-example-importing-and-exporting-data-through-local-files.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/13-example-2-migrating-data-from-a-my-database-to-mogdb.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/14-example-logic-replication-code.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/14.1-example-parameters-for-connecting-to-the-database-in-different-scenarios.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/1-java-sql-Connection.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/10-javax-sql-DataSource.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/11-javax-sql-PooledConnection.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/12-javax-naming-Context.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/13-javax-naming-spi-InitialContextFactory.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/14-CopyManager.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/2-java-sql-CallableStatement.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/3-java-sql-DatabaseMetaData.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/4-java-sql-Driver.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/5-java-sql-PreparedStatement.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/6-java-sql-ResultSet.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/7-java-sql-ResultSetMetaData.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/8-java-sql-Statement.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/9-javax-sql-ConnectionPoolDataSource.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/jdbc-interface-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/2-jdbc-package-driver-class-and-environment-class.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/3-development-process.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/4-loading-the-driver.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/5-connecting-to-a-database.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/6-connecting-to-a-database-using-ssl.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/7-running-sql-statements.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/8-processing-data-in-a-result-set.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/8.1-log-management.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/9-closing-a-connection.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/connecting-to-a-database-using-uds.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/development-based-on-jdbc.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/example-jdbc-primary-and-backup-cluster-load-balancing.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/jdbc-based-common-parameter-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/jdbc-release-notes.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/1-development-based-on-odbc.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/2-odbc-packages-dependent-libraries-and-header-files.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/3-configuring-a-data-source-in-the-linux-os.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/4-development-process.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/5-example-common-functions-and-batch-binding.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/5.1-typical-application-scenarios-and-configurations.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-0-odbc-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-1-SQLAllocEnv.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-10-SQLExecDirect.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-11-SQLExecute.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-12-SQLFetch.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-13-SQLFreeStmt.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-14-SQLFreeConnect.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-15-SQLFreeHandle.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-16-SQLFreeEnv.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-17-SQLPrepare.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-18-SQLGetData.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-19-SQLGetDiagRec.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-2-SQLAllocConnect.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-20-SQLSetConnectAttr.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-21-SQLSetEnvAttr.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-22-SQLSetStmtAttr.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-23-Examples.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-3-SQLAllocHandle.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-4-SQLAllocStmt.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-5-SQLBindCol.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-6-SQLBindParameter.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-7-SQLColAttribute.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-8-SQLConnect.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-9-SQLDisconnect.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/odbc-interface-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/1-database-connection-control-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/10-PQstatus.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/2-PQconnectdbParams.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/3-PQconnectdb.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/4-PQconninfoParse.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/5-PQconnectStart.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/6-PQerrorMessage.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/7-PQsetdbLogin.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/8-PQfinish.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/9-PQreset.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/1-PQclear.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/10-PQntuples.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/11-PQprepare.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/12-PQresultStatus.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/2-PQexec.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/3-PQexecParams.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/4-PQexecParamsBatch.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/5-PQexecPrepared.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/6-PQexecPreparedBatch.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/7-PQfname.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/8-PQgetvalue.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/9-PQnfields.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/database-statement-execution-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/1-functions-for-asynchronous-command-processing.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/2-PQsendQuery.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/3-PQsendQueryParams.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/4-PQsendPrepare.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/5-PQsendQueryPrepared.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/6-PQflush.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/1-PQgetCancel.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/2-PQfreeCancel.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/3-PQcancel.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/functions-for-canceling-queries-in-progress.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/libpq-api-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/dependent-header-files-of-libpq.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/development-based-on-libpq.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/development-process.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/libpq-example.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/link-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/1-psycopg-based-development.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/1-psycopg2-connect.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/10-connection-close.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/2-connection-cursor.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/3-cursor-execute-query-vars-list.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/4-curosr-executemany-query-vars-list.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/5-connection-commit.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/6-connection-rollback.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/7-cursor-fetchone.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/8-cursor-fetchall.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/9-cursor-close.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/psycopg-api-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/12-psycopg2-release-notes.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/2-psycopg-package.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/3.1-development-process.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/4-connecting-to-a-database.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/5-adaptation-of-python-values-to-sql-types.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/6-new-features-in-mogdb.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/9-connecting-to-the-database-using-ssl.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/5-commissioning.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/application-development-tutorial.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/design-specification.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/introduction-to-development-specifications.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/naming-specification.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/overview-of-development-specifications.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/postgresql-compatibility.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/query-operations.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/syntax-specification.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/developer-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/extension.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/1-oracle_fdw.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/2-mysql_fdw.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/3-postgres_fdw.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/dblink.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/fdw-introduction.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/file_fdw.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/pg_bulkload-user-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/pg_prewarm-user-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/pg_repack-user-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/pg_trgm-user-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-extension.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-support-and-constraints.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/using-postgis.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/wal2json-user-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/extension/whale.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/1-logical-decoding.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/2-logical-decoding-by-sql-function-interfaces.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/logical-decoding.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-replication.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/architecture.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/configuration-settings.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/conflicts.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/monitoring.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/publication-subscription.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/publications.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/quick-setup.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/restrictions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/security.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/subscriptions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/materialized-view/1-materialized-view-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/1-full-materialized-view-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/2-full-materialized-view-usage.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/3-full-materialized-view-support-and-constraints.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/full-materialized-view.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/1-incremental-materialized-view-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/2-incremental-materialized-view-usage.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/3-incremental-materialized-view-support-and-constraints.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/incremental-materialized-view.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/assessment-tool.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-extension.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-installation.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-restrictions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/dolphin-reset-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/dolphin-syntax.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/guc-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/identifiers/dolphin-column-name-identifiers.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/identifiers/dolphin-identifiers.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-binary-types.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-bit-string-types.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-bool-types.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-character-types.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-data-types.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-date-time-types.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-enumeration-types.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-numeric-types.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-dcl-syntax.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-ddl-syntax.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-dml-syntax.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-keywords.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-sql-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/expressions/dolphin-conditional-expressions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/expressions/dolphin-expressions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-advisory-lock-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-aggregate-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-arithmetic-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-assignment-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-b-compatible-database-lock.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-bit-string-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-character-processing-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-comment-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-compatible-operators-and-operations.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-conditional-expression-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-date-and-time-processing-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-json-jsonb-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-logical-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-network-address-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-system-information-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-type-conversion-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-database.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-function.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-procedure.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-server.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table-partition.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-tablespace.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-view.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-analyze-analyse.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-ast.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-checksum-table.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-database.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-function.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-index.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-procedure.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-server.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table-as.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table-partition.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-tablespace.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-trigger.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-view.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-describe-table.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-do.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-database.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-index.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-tablespace.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-execute.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-explain.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-flush-binary-logs.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-grant-revoke-proxy.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-grant.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-insert.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-kill.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-load-data.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-optimize-table.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-prepare.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-rename-table.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-rename-user.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-revoke.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-select-hint.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-select.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-set-charset.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-set-password.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-character-set.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-collation.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-columns.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-database.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-function.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-procedure.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-table.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-trigger.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-view.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-databases.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-function-status.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-grants.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-index.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-master-status.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-plugins.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-privileges.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-procedure-status.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-processlist.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-slave-hosts.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-status.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-table-status.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-tables.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-triggers.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-variables.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-warnings.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-sql-syntax.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-update.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-use-db_name.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/basic-statements/dolphin-assignment-statements.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/basic-statements/dolphin-basic-statements.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/dolphin-stored-procedures.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-INDEX_STATISTIC.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-PG_TYPE_NONSTRICT_BASIC_VALUE.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-system-views.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/mysql-compatible-description.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-management.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/benefits-of-partition-pruning.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/dynamic-partition-pruning.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/how-to-identify-whether-partition-pruning-has-been-used.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/information-that-can-be-used-for-partition-pruning.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/partition-pruning.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/static-partition-pruning.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/recommendations-for-choosing-a-partitioning-strategy.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-hash-partitioning.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-list-partitioning.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-range-partitioning.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-1-plpgsql-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-10-other-statements.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-11-cursors.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-12-retry-management.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-13-debugging.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-14-package.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-2-data-types.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-3-data-type-conversion.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-4-arrays-and-records.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-5-declare-syntax.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-6-basic-statements.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-7-dynamic-statements.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-8-control-statements.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-9-transaction-management.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/advanced-packages/advanced-packages.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/advanced-packages/basic-interfaces/PKG_SERVICE.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/plpgsql/advanced-packages/basic-interfaces/basic-interfaces.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/scheduled-jobs/pkg-service.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/scheduled-jobs/scheduled-jobs.md delete mode 100644 product/en/docs-mogdb/v5.2/developer-guide/user-defined-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/faqs/application-development-faqs.md delete mode 100644 product/en/docs-mogdb/v5.2/faqs/deployment-and-maintenance-faqs.md delete mode 100644 product/en/docs-mogdb/v5.2/faqs/faqs.md delete mode 100644 product/en/docs-mogdb/v5.2/faqs/high-availability-faqs.md delete mode 100644 product/en/docs-mogdb/v5.2/faqs/migration-faqs.md delete mode 100644 product/en/docs-mogdb/v5.2/faqs/product-faqs.md delete mode 100644 product/en/docs-mogdb/v5.2/faqs/upgrade-faqs.md delete mode 100644 product/en/docs-mogdb/v5.2/glossary.md delete mode 100644 product/en/docs-mogdb/v5.2/high-available-guide/cluster-management/cluster-management.md delete mode 100644 product/en/docs-mogdb/v5.2/high-available-guide/cluster-management/cm-configuration-parameter/cm-cm_agent.md delete mode 100644 product/en/docs-mogdb/v5.2/high-available-guide/cluster-management/cm-configuration-parameter/cm-cm_server.md delete mode 100644 product/en/docs-mogdb/v5.2/high-available-guide/cluster-management/cm-configuration-parameter/cm-configuration-parameter.md delete mode 100644 product/en/docs-mogdb/v5.2/high-available-guide/cluster-management/feature-introduction.md delete mode 100644 product/en/docs-mogdb/v5.2/high-available-guide/cluster-management/introduction-to-cm_ctl-tool.md delete mode 100644 product/en/docs-mogdb/v5.2/high-available-guide/cluster-management/introduction-to-cm_persist.md delete mode 100644 product/en/docs-mogdb/v5.2/high-available-guide/cluster-management/introduction-to-installation-and-uninstallation-tool.md delete mode 100644 product/en/docs-mogdb/v5.2/high-available-guide/cluster-management/manual-configuration-of-vip.md delete mode 100644 product/en/docs-mogdb/v5.2/high-available-guide/cluster-management/safety-design.md delete mode 100644 product/en/docs-mogdb/v5.2/high-available-guide/high-available-dcf.md delete mode 100644 product/en/docs-mogdb/v5.2/high-available-guide/high-available-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/installation-guide/docker-installation/docker-installation.md delete mode 100644 product/en/docs-mogdb/v5.2/installation-guide/installation-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/installation-guide/installation-preparation/environment-requirement.md delete mode 100644 product/en/docs-mogdb/v5.2/installation-guide/installation-preparation/installation-preparation.md delete mode 100644 product/en/docs-mogdb/v5.2/installation-guide/installation-preparation/os-configuration.md delete mode 100644 product/en/docs-mogdb/v5.2/installation-guide/manual-installation.md delete mode 100644 product/en/docs-mogdb/v5.2/installation-guide/ptk-based-installation.md delete mode 100644 product/en/docs-mogdb/v5.2/installation-guide/recommended-parameter-settings.md delete mode 100644 product/en/docs-mogdb/v5.2/mogeaver/mogeaver-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/mogeaver/mogeaver-release-notes.md delete mode 100644 product/en/docs-mogdb/v5.2/mogeaver/mogeaver.md delete mode 100644 product/en/docs-mogdb/v5.2/overview.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/TPCC-performance-tuning-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/performance-tuning.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/sql-tuning/experience-in-rewriting-sql-statements.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/sql-tuning/hint-based-tuning.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/sql-tuning/introduction-to-the-sql-execution-plan.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/sql-tuning/query-execution-process.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/sql-tuning/resetting-key-parameters-during-sql-tuning.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/sql-tuning/reviewing-and-modifying-a-table-definition.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/sql-tuning/sql-tuning.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/sql-tuning/tuning-process.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/sql-tuning/typical-sql-optimization-methods.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/sql-tuning/updating-statistics.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/configuring-llvm.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/configuring-smp.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/configuring-ustore.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/configuring-vector-engine.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/optimizing-os-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-load-management-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-load-management.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/creating-resource-pool.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/enabling-resource-load-management.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/resource-management-preparations.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/resource-planning.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/setting-control-group.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/system-tuning/system-tuning.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/wdr/wdr-report.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/wdr/wdr-snapshot-schema.md delete mode 100644 product/en/docs-mogdb/v5.2/performance-tuning/wdr/wdr.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/container-based-installation.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/installation-on-a-single-node.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/mogdb-access.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-cli-to-access-mogdb/gsql.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-cli-to-access-mogdb/pgcli.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-cli-to-access-mogdb/use-cli-to-access-mogdb.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-gui-tools-to-access-mogdb/dbeaver.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-gui-tools-to-access-mogdb/mogeaver-usage.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-gui-tools-to-access-mogdb/use-gui-tools-to-access-mogdb.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-middleware-to-access-mogdb/use-middleware-to-access-mogdb.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-middleware-to-access-mogdb/weblogic-configures-mogdb-data-source-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-middleware-to-access-mogdb/websphere-configures-mogdb-data-source-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/adonet.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/c-cpp.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/go.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/java.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/nodejs.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/python.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/rust.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/use-programming-language-to-access-mogdb.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogdb-playground.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/mogila.md delete mode 100644 product/en/docs-mogdb/v5.2/quick-start/quick-start.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00001-GAUSS-00100.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00101-GAUSS-00200.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00201-GAUSS-00300.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00301-GAUSS-00400.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00401-GAUSS-00500.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00501-GAUSS-00600.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00601-GAUSS-00700.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00701-GAUSS-00800.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00801-GAUSS-00900.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00901-GAUSS-01000.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01001-GAUSS-01100.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01101-GAUSS-01200.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01201-GAUSS-01300.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01301-GAUSS-01400.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01401-GAUSS-01500.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01501-GAUSS-01600.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01601-GAUSS-01700.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01701-GAUSS-01800.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01801-GAUSS-01900.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01901-GAUSS-02000.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02001-GAUSS-02100.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02101-GAUSS-02200.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02201-GAUSS-02300.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02301-GAUSS-02400.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02401-GAUSS-02500.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02501-GAUSS-02600.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02601-GAUSS-02700.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02701-GAUSS-02800.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02801-GAUSS-02900.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02901-GAUSS-03000.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03001-GAUSS-03100.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03101-GAUSS-03200.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03201-GAUSS-03300.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03301-GAUSS-03400.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03401-GAUSS-03500.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03501-GAUSS-03600.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03601-GAUSS-03700.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03701-GAUSS-03800.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03801-GAUSS-03900.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03901-GAUSS-04000.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04001-GAUSS-04100.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04101-GAUSS-04200.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04201-GAUSS-04300.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04301-GAUSS-04400.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04401-GAUSS-04500.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04501-GAUSS-04600.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04601-GAUSS-04700.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04701-GAUSS-04800.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04801-GAUSS-04900.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04901-GAUSS-05000.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05001-GAUSS-05100.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05101-GAUSS-05200.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05201-GAUSS-05300.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05301-GAUSS-05400.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05401-GAUSS-05500.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05501-GAUSS-05600.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05601-GAUSS-05700.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05701-GAUSS-05800.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05801-GAUSS-05900.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05901-GAUSS-06000.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06001-GAUSS-06100.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06101-GAUSS-06200.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06201-GAUSS-06300.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06301-GAUSS-06400.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06401-GAUSS-06500.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06501-GAUSS-06600.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06601-GAUSS-06700.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06701-GAUSS-06800.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06801-GAUSS-06900.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06901-GAUSS-07000.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-07001-GAUSS-07100.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-07101-GAUSS-07200.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-07201-GAUSS-07300.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-07301-GAUSS-07400.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-07401-GAUSS-07500.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-50000-GAUSS-50999.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-51000-GAUSS-51999.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-52000-GAUSS-52999.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-53000-GAUSS-53699.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class00-class21.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class0A-class0Z.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class22-class24.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class25-class40.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class2B-class2F.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class3B-class3F.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class42-class44.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class53-class58.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/classCG-classTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/classF0-classP0.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/classXX-classYY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/description-of-sql-error-codes.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/sqlstate-values-of-mogdb-cm-error-code.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/sqlstate-values-of-mogdb-error-code.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/error-code-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-code-reference/third-party-library-error-codes.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-log-reference/error-log-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/error-log-reference/kernel-error-message.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/aggregate-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/ai-feature-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/array-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/binary-string-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/bit-string-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/character-processing-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/comparison-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/conditional-expressions-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/data-damage-detection-and-repair-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/date-and-time-processing-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/dynamic-data-masking-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/encrypted-equality-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/event-trigger-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/fault-injection-system-function.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/geometric-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/global-syscache-feature-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/global-temporary-table-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/hash-function.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/hll-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/internal-functions/internal-functions-1.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/internal-functions/internal-functions-2.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/internal-functions/internal-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/json-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/ledger-database-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/logical-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/mathematical-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/mode-matching-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/network-address-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/obsolete-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/other-system-functions/other-system-functions-1.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/other-system-functions/other-system-functions-2.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/other-system-functions/other-system-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/prompt-message-function.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/range-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/security-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/sequence-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/set-returning-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-1.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-2.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-3.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/access-privilege-inquiry-function.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/comment-information-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/guc-value-inquiry-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/other-function.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/schema-visibility-inquiry-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/session-information-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/system-catalog-information-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/system-information-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/transaction-ids-and-snapshots.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/advisory-lock-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/backup-and-restoration-control-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/configuration-settings-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/database-object-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/logical-replication-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/other-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/row-store-compression-system-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/segment-page-storage-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/server-signal-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/snapshot-synchronization-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/system-management-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/undo-system-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/universal-file-access-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/text-search-functions-and-operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/trigger-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/type-conversion-functions/type-conversion-functions-1.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/type-conversion-functions/type-conversion-functions-2.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/type-conversion-functions/type-conversion-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/window-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/functions-and-operators/xml-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/AI-features.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/DCF-parameters-settings.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/HyperLogLog.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/MogDB-transaction.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/alarm-detection.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/appendix.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/auditing/audit-switch.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/auditing/auditing.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/auditing/operation-audit.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/auditing/user-and-permission-audit.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/automatic-vacuuming.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/backend-compression.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/backup-and-restoration-parameter.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/cm-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/connection-and-authentication/communication-library-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/connection-and-authentication/connection-and-authentication.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/connection-and-authentication/connection-settings.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/connection-pool-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/default-settings-of-client-connection/default-settings-of-client-connection.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/default-settings-of-client-connection/other-default-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/developer-options.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/error-reporting-and-logging/error-reporting-and-logging.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/error-reporting-and-logging/logging-time.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/error-reporting-and-logging/using-csv-log-output.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/fault-tolerance.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/file-location.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/flashback.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/global-syscache-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/global-temporary-table.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/guc-parameter-list.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/guc-parameter-usage.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/guc-user-defined-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/ha-replication/ha-replication.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/ha-replication/primary-server.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/ha-replication/sending-server.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/ha-replication/standby-server.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/load-management.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/lock-management.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/miscellaneous-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/mot.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/parameters-related-to-efficient-data-compression-algorithms.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/query-planning/other-optimizer-options.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/query-planning/query-planning.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/query.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/reference-guide-guc-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/replication-parameters-of-two-database-instances.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/reserved-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/background-writer.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/cost-based-vacuum-delay.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/disk-space.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/kernel-resource-usage.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/memory.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/resource-consumption.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/rollback-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/scheduled-task.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/security-configuration.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/statistics-during-the-database-running/performance-statistics.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/statistics-during-the-database-running/statistics-during-the-database-running.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/system-performance-snapshot.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/thread-pool.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/upgrade-parameters.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/version-and-platform-compatibility/version-and-platform-compatibility.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/wait-events.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/write-ahead-log/archiving.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/write-ahead-log/checkpoints.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/write-ahead-log/log-replay.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/write-ahead-log/settings.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/write-ahead-log/write-ahead-log.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/guc-parameters/writer-statement-parameters-supported-by-standby-server.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/reference-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI-schema.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.ARCHIVE_SNAPSHOT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.CREATE_SNAPSHOT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.CREATE_SNAPSHOT_INTERNAL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.MANAGE_SNAPSHOT_INTERNAL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.PREPARE_SNAPSHOT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.PREPARE_SNAPSHOT_INTERNAL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.PUBLISH_SNAPSHOT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.PURGE_SNAPSHOT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.PURGE_SNAPSHOT_INTERNAL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.SAMPLE_SNAPSHOT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.SNAPSHOT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/DBE_PERF.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/Cache-IO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_ALL_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_ALL_SEQUENCES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_ALL_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_SYS_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_SYS_SEQUENCES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_SYS_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_USER_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_USER_SEQUENCES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_USER_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STAT_DB_CU.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STAT_SESSION_CU.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_ALL_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_ALL_SEQUENCES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_ALL_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_SYS_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_SYS_SEQUENCES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_SYS_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_USER_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_USER_SEQUENCES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_USER_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_ALL_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_ALL_SEQUENCES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_ALL_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_SYS_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_SYS_SEQUENCES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_SYS_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_USER_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_USER_SEQUENCES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_USER_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/configuration/CONFIG_SETTINGS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/configuration/GLOBAL_CONFIG_SETTINGS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/configuration/configuration.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/FILE_IOSTAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/FILE_REDO_IOSTAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/GLOBAL_FILE_IOSTAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/GLOBAL_FILE_REDO_IOSTAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/GLOBAL_REL_IOSTAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/LOCAL_REL_IOSTAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/SUMMARY_FILE_IOSTAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/SUMMARY_FILE_REDO_IOSTAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/SUMMARY_REL_IOSTAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/file.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/global-plancache/GLOBAL_PLANCACHE_CLEAN.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/global-plancache/GLOBAL_PLANCACHE_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/global-plancache/global-plancache.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/instance/GLOBAL_INSTANCE_TIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/instance/INSTANCE_TIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/instance/instance.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/lock/GLOBAL_LOCKS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/lock/LOCKS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/lock/lock.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/memory/GLOBAL_MEMORY_NODE_DETAIL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/memory/GLOBAL_SHARED_MEMORY_DETAIL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/memory/GS_SHARED_MEMORY_DETAIL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/memory/MEMORY_NODE_DETAIL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/memory/memory-schema.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_ALL_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_ALL_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_BAD_BLOCK.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_DATABASE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_DATABASE_CONFLICTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_SYS_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_SYS_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_USER_FUNCTIONS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_USER_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_USER_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_XACT_ALL_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_XACT_SYS_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_XACT_USER_FUNCTIONS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_XACT_USER_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_ALL_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_ALL_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_BAD_BLOCK.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_DATABASE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_DATABASE_CONFLICTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_SYS_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_SYS_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_USER_FUNCTIONS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_USER_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_USER_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_XACT_ALL_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_XACT_SYS_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_XACT_USER_FUNCTIONS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_XACT_USER_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_ALL_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_ALL_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_BAD_BLOCK.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_DATABASE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_DATABASE_CONFLICTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_SYS_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_SYS_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_USER_FUNCTIONS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_USER_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_USER_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_XACT_ALL_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_XACT_SYS_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_XACT_USER_FUNCTIONS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_XACT_USER_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/object-schema.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/GLOBAL_OPERATOR_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/GLOBAL_OPERATOR_HISTORY_TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/GLOBAL_OPERATOR_RUNTIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/OPERATOR_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/OPERATOR_HISTORY_TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/OPERATOR_RUNTIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/operator-schema.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/os/GLOBAL_OS_RUNTIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/os/GLOBAL_OS_THREADS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/os/OS_RUNTIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/os/OS_THREADS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/os/os-schema.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GLOBAL_SLOW_QUERY_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GLOBAL_SLOW_QUERY_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GLOBAL_STATEMENT_COMPLEX_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GLOBAL_STATEMENT_COMPLEX_HISTORY_TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GLOBAL_STATEMENT_COMPLEX_RUNTIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GLOBAL_STATEMENT_COUNT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GS_SLOW_QUERY_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GS_SLOW_QUERY_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_RUNTIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_COUNT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_HISTORY_query.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_RESPONSETIME_PERCENTILE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_WLMSTAT_COMPLEX_RUNTIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/SUMMARY_STATEMENT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/SUMMARY_STATEMENT_COUNT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/query-schema.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/rto/RTO-RPO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/rto/global_rto_status.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/rto/global_streaming_hadr_rto_and_rpo_stat.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/rto/gs_hadr_local_rto_and_rpo_stat.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_SESSION_MEMORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_SESSION_MEMORY_DETAIL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_SESSION_STAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_SESSION_STAT_ACTIVITY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_SESSION_TIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_THREADPOOL_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_THREAD_WAIT_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/LOCAL_ACTIVE_SESSION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/LOCAL_THREADPOOL_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_CPU_RUNTIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_MEMORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_MEMORY_DETAIL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_MEMORY_RUNTIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_STAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_STAT_ACTIVITY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_TIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/STATEMENT_IOSTAT_COMPLEX_RUNTIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/THREAD_WAIT_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/session-thread.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/GLOBAL_TRANSACTIONS_PREPARED_XACTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/GLOBAL_TRANSACTIONS_RUNNING_XACTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/SUMMARY_TRANSACTIONS_PREPARED_XACTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/SUMMARY_TRANSACTIONS_RUNNING_XACTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/TRANSACTIONS_PREPARED_XACTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/TRANSACTIONS_RUNNING_XACTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/transaction-schema.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/BGWRITER_STAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/CLASS_VITAL_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_BGWRITER_STAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_CANDIDATE_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_CKPT_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_DOUBLE_WRITE_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_GET_BGWRITER_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_PAGEWRITER_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_RECORD_RESET_TIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_RECOVERY_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_REDO_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_REPLICATION_SLOTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_REPLICATION_STAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_SINGLE_FLUSH_DW_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/REPLICATION_SLOTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/REPLICATION_STAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/SUMMARY_USER_LOGIN.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/USER_LOGIN.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/utility.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/wait-events/GLOBAL_WAIT_EVENTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/wait-events/WAIT_EVENTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/wait-events/dbe-perf-wait-events.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload-manager/WLM_USER_RESOURCE_CONFIG.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload-manager/WLM_USER_RESOURCE_RUNTIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload-manager/workload-manager.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/GLOBAL_USER_TRANSACTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/GLOBAL_WORKLOAD_TRANSACTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/SUMMARY_WORKLOAD_SQL_COUNT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/SUMMARY_WORKLOAD_SQL_ELAPSE_TIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/SUMMARY_WORKLOAD_TRANSACTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/USER_TRANSACTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/WORKLOAD_SQL_COUNT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/WORKLOAD_SQL_ELAPSE_TIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/WORKLOAD_TRANSACTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/workload-schema.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER-schema.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.abort.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.add_breakpoint.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.attach.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.backtrace.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.continue.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.delete_breakpoint.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.disable_breakpoint.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.enable_breakpoint.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.finish.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.info_breakpoints.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.info_code.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.info_locals.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.local_debug_server_info.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.next.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.print_var.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.set_var.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.step.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.turn_off.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.turn_on.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEVELOPER/DBE_PLDEVELOPER.gs_errors.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEVELOPER/DBE_PLDEVELOPER.gs_source.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEVELOPER/DBE_PLDEVELOPER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL-Schema.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL.create_abort_sql_patch.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL.create_hint_sql_patch.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL.disable_sql_patch.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL.drop_sql_patch.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL.enable_sql_patch.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL.show_sql_patch.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/information-schema/INFORMATION_SCHEMA_CATALOG_NAME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/information-schema/_PG_FOREIGN_DATA_WRAPPERS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/information-schema/_PG_FOREIGN_SERVERS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/information-schema/_PG_FOREIGN_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/information-schema/_PG_FOREIGN_TABLE_COLUMNS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/information-schema/_PG_USER_MAPPINGS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/information-schema/information-schema.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/schema/schema.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/alias.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/appendix.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/extended-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/extended-syntax.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes-introduction.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/gin-indexes/gin-tips-and-tricks.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/gin-indexes/implementation.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/gin-indexes/scalability.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/constant-and-macro.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/controlling-transactions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/dcl-syntax-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/ddl-syntax-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/dml-syntax-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/expressions/array-expressions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/expressions/condition-expressions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/expressions/expressions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/expressions/row-expressions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/expressions/simple-expressions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/expressions/subquery-expressions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/additional-features/additional-features.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/additional-features/gathering-document-statistics.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/additional-features/manipulating-queries.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/additional-features/manipulating-tsvector.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/additional-features/rewriting-queries.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/configuration-examples.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/controlling-text-search/controlling-text-search.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/controlling-text-search/highlighting-results.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/controlling-text-search/parsing-documents.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/controlling-text-search/parsing-queries.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/controlling-text-search/ranking-search-results.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/dictionaries-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/dictionaries.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/ispell-dictionary.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/simple-dictionary.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/snowball-dictionary.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/stop-words.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/synonym-dictionary.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/thesaurus-dictionary.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/full-text-search.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/introduction/basic-text-matching.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/introduction/configurations.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/introduction/full-text-retrieval.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/introduction/full-text-search-introduction.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/introduction/what-is-a-document.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/limitations.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/parser.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/tables-and-indexes/constraints-on-index-use.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/tables-and-indexes/creating-an-index.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/tables-and-indexes/searching-a-table.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/tables-and-indexes/tables-and-indexes.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-configuration.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-dictionary.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-parser.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-and-debugging-text-search.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/keywords/keywords-1.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/keywords/keywords-2.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/keywords/keywords.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/mogdb-sql.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/ordinary-table.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/partition-table.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-anonymous-block.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-contraints.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-cursor.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-index.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-llvm.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-lock.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-trigger.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/sub-query.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/system-operation.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/transaction/sql-reference-transaction.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/transaction/transaction-control.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/transaction/transaction-management.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/type-base-value.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/type-conversion/functions.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/type-conversion/operators.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/type-conversion/type-conversion-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/type-conversion/type-conversion.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/type-conversion/union-case-and-related-constructs.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-reference/type-conversion/value-storage.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ABORT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-AGGREGATE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-AUDIT-POLICY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-DATA-SOURCE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-DATABASE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-DEFAULT-PRIVILEGES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-DIRECTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-EVENT-TRIGGER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-EVENT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-EXTENSION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-FOREIGN-DATA-WRAPPER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-FOREIGN-TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-FUNCTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-GLOBAL-CONFIGURATION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-GROUP.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-INDEX.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-LANGUAGE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-LARGE-OBJECT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-MASKING-POLICY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-MATERIALIZED-VIEW.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-OPERATOR.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-PACKAGE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-PROCEDURE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-PUBLICATION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-RESOURCE-LABEL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-RESOURCE-POOL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-ROLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-ROW-LEVEL-SECURITY-POLICY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-RULE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SCHEMA.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SEQUENCE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SERVER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SESSION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SUBSCRIPTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SYNONYM.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SYSTEM-KILL-SESSION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SYSTEM-SET.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TABLE-PARTITION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TABLE-SUBPARTITION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TABLESPACE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TEXT-SEARCH-CONFIGURATION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TEXT-SEARCH-DICTIONARY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TRIGGER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TYPE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-USER-MAPPING.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-USER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-VIEW.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ANALYZE-ANALYSE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/BEGIN.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CALL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CHECKPOINT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CLEAN-CONNECTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CLOSE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CLUSTER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/COMMENT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/COMMIT-END.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/COMMIT-PREPARED.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CONNECT-BY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/COPY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-AGGREGATE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-AUDIT-POLICY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-CAST.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-CLIENT-MASTER-KEY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-COLUMN-ENCRYPTION-KEY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-DATA-SOURCE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-DATABASE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-DIRECTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-EVENT-TRIGGER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-EVENT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-EXTENSION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-FOREIGN-DATA-WRAPPER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-FOREIGN-TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-FUNCTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-GROUP.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-INCREMENTAL-MATERIALIZED-VIEW.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-INDEX.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-LANGUAGE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-MASKING-POLICY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-MATERIALIZED-VIEW.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-MODEL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-OPERATOR.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-PACKAGE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-PROCEDURE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-PUBLICATION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-RESOURCE-LABEL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-RESOURCE-POOL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-ROLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-ROW-LEVEL-SECURITY-POLICY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-RULE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-SCHEMA.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-SEQUENCE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-SERVER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-SUBSCRIPTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-SYNONYM.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TABLE-AS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TABLE-PARTITION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TABLE-SUBPARTITION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TABLESPACE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TEXT-SEARCH-CONFIGURATION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TEXT-SEARCH-DICTIONARY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TRIGGER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TYPE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-USER-MAPPING.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-USER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-VIEW.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-WEAK-PASSWORD-DICTIONARY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/CURSOR.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DEALLOCATE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DECLARE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DELETE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DELIMITER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-AGGREGATE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-AUDIT-POLICY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-CAST.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-CLIENT-MASTER-KEY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-COLUMN-ENCRYPTION-KEY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-DATA-SOURCE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-DATABASE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-DIRECTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-EVENT-TRIGGER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-EVENT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-EXTENSION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-FOREIGN-DATA-WRAPPER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-FOREIGN-TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-FUNCTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-GLOBAL-CONFIGURATION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-GROUP.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-INDEX.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-LANGUAGE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-MASKING-POLICY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-MATERIALIZED-VIEW.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-MODEL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-OPERATOR.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-OWNED.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-PACKAGE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-PROCEDURE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-PUBLICATION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-RESOURCE-LABEL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-RESOURCE-POOL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-ROLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-ROW-LEVEL-SECURITY-POLICY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-RULE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-SCHEMA.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-SEQUENCE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-SERVER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-SUBSCRIPTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-SYNONYM.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-TABLESPACE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-TEXT-SEARCH-CONFIGURATION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-TEXT-SEARCH-DICTIONARY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-TRIGGER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-TYPE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-USER-MAPPING.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-USER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-VIEW.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-WEAK-PASSWORD-DICTIONARY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/EXECUTE-DIRECT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/EXECUTE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/EXPLAIN-PLAN.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/EXPLAIN.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/FETCH.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/GRANT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/INSERT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/LOCK.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/MERGE-INTO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/MOVE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/PREDICT-BY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/PREPARE-TRANSACTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/PREPARE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/PURGE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/REASSIGN-OWNED.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/REFRESH-INCREMENTAL-MATERIALIZED-VIEW.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/REFRESH-MATERIALIZED-VIEW.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/REINDEX.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/RELEASE-SAVEPOINT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/RESET.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/REVOKE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ROLLBACK-PREPARED.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ROLLBACK-TO-SAVEPOINT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/ROLLBACK.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SAVEPOINT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SELECT-INTO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SELECT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SET-CONSTRAINTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SET-ROLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SET-SESSION-AUTHORIZATION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SET-TRANSACTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SET.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SHOW-EVENTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SHOW.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SHRINK.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SHUTDOWN.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/SNAPSHOT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/START-TRANSACTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/TIMECAPSULE-TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/TRUNCATE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/UPDATE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/VACUUM.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/VALUES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/sql-syntax/sql-syntax.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/HLL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/binary-data-types.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/bit-string-types.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/boolean-data-types.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/character-data-types.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/data-type-used-by-the-ledger-database.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/data-types-supported-by-column-store-tables.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/date-time-types.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/geometric.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/json-types.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/monetary.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/network-address.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/numeric-data-types.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/object-identifier-types.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/pseudo-types.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/range.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/set-type.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/supported-data-types.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/text-search-types.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/uuid-type.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/supported-data-types/xml-type.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/overview-of-system-catalogs-and-system-views.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs-and-system-views.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_ASP.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_AUDITING_POLICY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_AUDITING_POLICY_ACCESS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_AUDITING_POLICY_FILTERS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_AUDITING_POLICY_PRIVILEGES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_CLIENT_GLOBAL_KEYS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_CLIENT_GLOBAL_KEYS_ARGS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_COLUMN_KEYS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_COLUMN_KEYS_ARGS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_DB_PRIVILEGE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_ENCRYPTED_COLUMNS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_ENCRYPTED_PROC.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_GLOBAL_CHAIN.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_GLOBAL_CONFIG.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_MASKING_POLICY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_MASKING_POLICY_ACTIONS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_MASKING_POLICY_FILTERS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_MATVIEW.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_MATVIEW_DEPENDENCY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_MODEL_WAREHOUSE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_OPT_MODEL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_PACKAGE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_POLICY_LABEL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_RECYCLEBIN.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_TXN_SNAPSHOT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_UID.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_EC_OPERATOR_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_INSTANCE_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_OPERATOR_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_PLAN_ENCODING_TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_PLAN_OPERATOR_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_SESSION_QUERY_INFO_ALL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_USER_RESOURCE_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PGXC_CLASS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PGXC_GROUP.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PGXC_NODE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PGXC_SLICE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AGGREGATE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AM.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AMOP.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AMPROC.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_APP_WORKLOADGROUP_MAPPING.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_ATTRDEF.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_ATTRIBUTE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AUTHID.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AUTH_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AUTH_MEMBERS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_CAST.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_CLASS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_COLLATION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_CONSTRAINT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_CONVERSION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_DATABASE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_DB_ROLE_SETTING.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_DEFAULT_ACL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_DEPEND.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_DESCRIPTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_DIRECTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_ENUM.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_EVENT_TRIGGER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_EXTENSION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_EXTENSION_DATA_SOURCE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_FOREIGN_DATA_WRAPPER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_FOREIGN_SERVER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_FOREIGN_TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_HASHBUCKET.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_INDEX.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_INHERITS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_JOB.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_JOB_PROC.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_LANGUAGE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_LARGEOBJECT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_LARGEOBJECT_METADATA.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_NAMESPACE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_OBJECT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_OPCLASS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_OPERATOR.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_OPFAMILY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_PARTITION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_PLTEMPLATE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_PROC.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_PUBLICATION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_PUBLICATION_REL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_RANGE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_REPLICATION_ORIGIN.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_RESOURCE_POOL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_REWRITE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_RLSPOLICY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SECLABEL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SET.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SHDEPEND.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SHDESCRIPTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SHSECLABEL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_STATISTIC.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_STATISTIC_EXT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SUBSCRIPTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SUBSCRIPTION_REL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SYNONYM.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TABLESPACE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TRIGGER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TS_CONFIG.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TS_CONFIG_MAP.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TS_DICT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TS_PARSER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TS_TEMPLATE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TYPE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_USER_MAPPING.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_USER_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_WORKLOAD_GROUP.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PLAN_TABLE_DATA.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/STATEMENT_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/system-catalogs.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GET_GLOBAL_PREPARED_XACTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_ASYNC_SUBMIT_SESSIONS_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_AUDITING.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_AUDITING_ACCESS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_AUDITING_PRIVILEGE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_CLUSTER_RESOURCE_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_COMPRESSION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_DB_PRIVILEGES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_FILE_STAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_GSC_MEMORY_DETAIL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_INSTANCE_TIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_LABELS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_LSC_MEMORY_DETAIL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_MASKING.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_MATVIEWS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_OS_RUN_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_REDO_STAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_CPU_STATISTICS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_CONTEXT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_STATISTICS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_STAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_TIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SQL_COUNT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_STAT_SESSION_CU.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_THREAD_MEMORY_CONTEXT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_TOTAL_MEMORY_DETAIL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_CGROUP_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_EC_OPERATOR_STATISTICS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_OPERATOR_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_OPERATOR_STATISTICS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_PLAN_OPERATOR_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_REBUILD_USER_RESOURCE_POOL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_RESOURCE_POOL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_SESSION_HISTORY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_SESSION_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_SESSION_INFO_ALL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_SESSION_STATISTICS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_USER_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/MPP_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PGXC_PREPARED_XACTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_AVAILABLE_EXTENSIONS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_AVAILABLE_EXTENSION_VERSIONS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_COMM_DELAY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_COMM_RECV_STREAM.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_COMM_SEND_STREAM.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_COMM_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_CONTROL_GROUP_CONFIG.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_CURSORS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_EXT_STATS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_GET_INVALID_BACKENDS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_GET_SENDERS_CATCHUP_TIME.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_GROUP.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_GTT_ATTACHED_PIDS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_GTT_RELSTATS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_GTT_STATS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_LOCKS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_NODE_ENV.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_OS_THREADS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_PREPARED_STATEMENTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_PREPARED_XACTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_PUBLICATION_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_REPLICATION_ORIGIN_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_REPLICATION_SLOTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_RLSPOLICIES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_ROLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_RULES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_RUNNING_XACTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_SECLABELS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_SESSION_IOSTAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_SESSION_WLMSTAT.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_SETTINGS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_SHADOW.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_ALL_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_ALL_SEQUENCES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_ALL_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_SYS_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_SYS_SEQUENCES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_SYS_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_USER_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_USER_SEQUENCES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_USER_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_ACTIVITY.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_ACTIVITY_NG.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_ALL_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_ALL_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_BAD_BLOCK.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_BGWRITER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_DATABASE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_DATABASE_CONFLICTS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_REPLICATION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_SUBSCRIPTION.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_SYS_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_SYS_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_USER_FUNCTIONS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_USER_INDEXES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_USER_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_XACT_ALL_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_XACT_SYS_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_XACT_USER_FUNCTIONS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_XACT_USER_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TABLES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TDE_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_THREAD_WAIT_STATUS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TIMEZONE_ABBREVS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TIMEZONE_NAMES.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TOTAL_MEMORY_DETAIL.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TOTAL_USER_RESOURCE_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TOTAL_USER_RESOURCE_INFO_OID.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_USER.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_USER_MAPPINGS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_VARIABLE_INFO.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_VIEWS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_WLM_STATISTICS.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PLAN_TABLE.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/system-views.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/viewing-system-catalogs.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/FAQ.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/client-tool.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/client-tool-gsql.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/gsql-faq.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/gsql-introduction.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/meta-command-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/obtaining-help-information.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/usage-guidelines.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/functions-of-mogdb-executable-scripts.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_cgroup.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_check.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_checkos.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_checkperf.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_collector.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_dump.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_dumpall.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_encrypt.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_guc.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_om.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_plan_simulator.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_restore.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_sdr.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_ssh.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_watch.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/server-tools.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/system-catalogs-and-views-supported-by-gs_collector.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tool-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tool-reference.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/dsscmd.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/dssserver.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_backup.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_basebackup.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_ctl.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_dropnode.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_expansion.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_initdb.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_install.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_postuninstall.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_preinstall.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_probackup.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_sshexkey.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_tar.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_uninstall.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_upgradectl.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gstrace.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/kadmin-local.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/kdb5_util.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/kdestroy.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/kinit.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/klist.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/krb5kdc.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/mogdb.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pg_archivecleanup.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pg_config.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pg_controldata.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pg_recvlogical.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pg_resetxlog.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pscp.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pssh.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/tools-used-in-the-internal-system.md delete mode 100644 product/en/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/transfer.py.md delete mode 100644 product/en/docs-mogdb/v5.2/security-guide/security-guide.md delete mode 100644 product/en/docs-mogdb/v5.2/security-guide/security/1-client-access-authentication.md delete mode 100644 product/en/docs-mogdb/v5.2/security-guide/security/2-managing-users-and-their-permissions.md delete mode 100644 product/en/docs-mogdb/v5.2/security-guide/security/3-configuring-database-audit.md delete mode 100644 product/en/docs-mogdb/v5.2/security-guide/security/4-setting-encrypted-equality-query.md delete mode 100644 product/en/docs-mogdb/v5.2/security-guide/security/5-setting-a-ledger-database.md delete mode 100644 product/en/docs-mogdb/v5.2/security-guide/security/6-transparent-data-encryption.md delete mode 100644 product/en/docs-mogdb/v5.2/security-guide/security/database-security-management.md delete mode 100644 product/en/docs-mogdb/v5.2/source-code-parsing.md delete mode 100644 product/en/docs-mogdb/v5.2/toc.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_about.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_ai-features.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_characteristic_description.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_common-faults-and-identification.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_communication-matrix.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_datatypes-and-sql.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_dev.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_error.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_extension-referecne.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_faqs.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_glossary.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_high_available.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_install.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_manage.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_parameters-and-tools.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_performance.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_quickstart.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_secure.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_system-catalogs-and-functions.md delete mode 100644 product/en/docs-mogdb/v5.2/toc_upgrade.md delete mode 100644 product/en/docs-mogdb/v5.2/upgrade-guide/1-upgrade-overview.md delete mode 100644 product/en/docs-mogdb/v5.2/upgrade-guide/2-read-before-upgrade.md delete mode 100644 product/en/docs-mogdb/v5.2/upgrade-guide/3-in-place-upgrade.md delete mode 100644 product/en/docs-mogdb/v5.2/upgrade-guide/4-rolling-upgrade.md delete mode 100644 product/en/docs-mogdb/v5.2/upgrade-guide/upgrade-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai-feature.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/abo-optimizer.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-best-practices.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-prerequisites.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-troubleshooting.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-usage-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/ai4db-adaptive-plan-selection.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/ai4db-intelligent-cardinality-estimation.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-best-practices.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-prerequisites.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-troubleshooting.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-usage-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/ai-sub-functions-of-the-dbmind.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-command-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-multi-metric-correlation-analysis.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-obtaining-help-information.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-troubleshooting.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-usage-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-command-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-obtaining-help-information.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-troubleshooting.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-usage-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-command-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-environment-deployment.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-obtaining-help-information.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-troubleshooting.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-usage-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/index-advisor-index-recommendation.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/single-query-index-recommendation.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/virtual-index.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/workload-level-index-recommendation.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-command-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-environment-deployment.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-obtaining-help-information.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-troubleshooting.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-usage-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-command-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-obtaining-help-information.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-troubleshooting.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-usage-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-command-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-obtaining-help-information.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-troubleshooting.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-usage-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-command-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-examples.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-obtaining-help-information.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-parameter-optimization-and-diagnosis.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-preparations.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-troubleshooting.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/ai4db-autonomous-database-o&m.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/components-that-support-dbmind.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-command-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-environment-deployment.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-obtaining-help-information.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-troubleshooting.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-usage-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/component.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/dbmind-mode.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/service.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/set.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/db4ai/db4ai.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/db4ai-query-for-model-training-and-prediction.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/db4ai-snapshots-for-data-version-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/full-process-ai.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/plpython-fenced-mode.md delete mode 100644 product/zh/docs-mogdb/v5.2/AI-features/db4ai/native-db4ai-engine.md delete mode 100644 product/zh/docs-mogdb/v5.2/_index.md delete mode 100644 product/zh/docs-mogdb/v5.2/about-mogdb/MogDB-compared-to-openGauss.md delete mode 100644 product/zh/docs-mogdb/v5.2/about-mogdb/about-mogdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/about-mogdb/mogdb-new-feature/release-note.md delete mode 100644 product/zh/docs-mogdb/v5.2/about-mogdb/open-source-components/2-docker-based-mogdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/about-mogdb/open-source-components/DBMS-RANDOM.md delete mode 100644 product/zh/docs-mogdb/v5.2/about-mogdb/open-source-components/compat-tools.md delete mode 100644 product/zh/docs-mogdb/v5.2/about-mogdb/open-source-components/mog_filedump.md delete mode 100644 product/zh/docs-mogdb/v5.2/about-mogdb/open-source-components/mog_xlogdump.md delete mode 100644 product/zh/docs-mogdb/v5.2/about-mogdb/open-source-components/mogdb-monitor.md delete mode 100644 product/zh/docs-mogdb/v5.2/about-mogdb/open-source-components/open-source-components.md delete mode 100644 product/zh/docs-mogdb/v5.2/about-mogdb/terms-of-use.md delete mode 100644 product/zh/docs-mogdb/v5.2/about-mogdb/usage-limitations.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/administrator-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/backup-and-restoration-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/backup-and-restoration.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/flashback-restoration.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/logical-backup-and-restoration.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/physical-backup-and-restoration.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/column-store-tables-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/common-primary-backup-deployment-scenarios.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/database-deployment-scenario.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-architecture.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-current-architectural-feature-constraints.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-developer-environment-deployment-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/two-city-three-dc-dr.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/1-using-gs_dump-and-gs_dumpall-to-export-data-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/2-exporting-a-single-database.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/3-exporting-all-databases.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/4-data-export-by-a-user-without-required-permissions.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/exporting-data.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-and-exporting-data.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/1-import-modes.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/10-managing-concurrent-write-operations.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/2-running-the-INSERT-statement-to-insert-data.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/3-running-the-COPY-FROM-STDIN-statement-to-import-data.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/4-using-a-gsql-meta-command-to-import-data.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/5-using-gs_restore-to-import-data.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/6-updating-data-in-a-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/7-deep-copy.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/8-ANALYZE-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/9-doing-VACUUM-to-a-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/importing-data.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/localization/character-set-support.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/localization/collation-support.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/localization/locale-support.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/localization/localization.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/1-mot-introduction.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/2-mot-features-and-benefits.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/3-mot-key-technologies.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/4-mot-usage-scenarios.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/5-mot-performance-benchmarks.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/introducing-mot.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/1-using-mot-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/2-mot-preparation.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/3-mot-deployment.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/4-mot-usage.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/5-mot-administration.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/6-mot-sample-tpcc-benchmark.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/using-mot.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-1.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-2.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-3.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-4.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-5.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-6.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-7.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-8.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-9.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/concepts-of-mot.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/1-references.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/2-glossary.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/mot-appendix.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/mot-engine/mot-engine.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/primary-and-standby-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/0-starting-and-stopping-mogdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/1-routine-maintenance-check-items.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/10-data-security-maintenance-suggestions.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/11-log-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/2-checking-os-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/3-checking-mogdb-health-status.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/4-checking-database-performance.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/5-checking-and-deleting-logs.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/6-checking-time-consistency.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/7-checking-the-number-of-application-connections.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/8-routinely-maintaining-tables.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/9-routinely-recreating-an-index.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/exporting-and-viewing-the-wdr.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/routine-maintenance.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/slow-sql-diagnosis.md delete mode 100644 product/zh/docs-mogdb/v5.2/administrator-guide/routine-maintenance/using-the-gsql-client-for-connection.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/adaptive-plan-selection.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/characteristic-description-abo-optimizer.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/intelligent-cardinality-estimation.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai-capabilities.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/1-database-metric-collection-forecast-and-exception-detection.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/2-root-cause-analysis-for-slow-sql-statements.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/3-index-recommendation.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/4-parameter-tuning-and-diagnosis.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/5-slow-sql-statement-discovery.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/characteristic-description-ai4db.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/ai-capabilities/db4ai-database-driven-ai.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/1-standard-sql.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/2-standard-development-interfaces.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/3-postgresql-api-compatibility.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/ECPG.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/MogDB-MySQL-compatibility.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/MogDB-Oracle-compatibility.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/application-development-interfaces.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/characteristic-description-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/characteristic-description.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/add-rowtype-attribute-to-the-view.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-distinct-performance-optimization.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-support-keep-clause.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-support-scenario-extensions.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/authid-current-user.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/compatibility.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/compatible-with-mysql-alias-support-for-single-quotes.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/current_date-current_time-keywords-as-field-name.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/custom-type-array.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/for-update-supports-outer-join.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/format-error-backtrace.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/insert-on-conflict.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/mod-function-float-to-int.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/modify-table-log-property.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/mogdb-supports-insert-all.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/nesting-of-aggregate-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/oracle-dblink-syntax-compatibility.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/order-by-group-by-scenario-expansion.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/pivot-and-unpivot.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/remove-type-conversion-hint-when-creating-package-function-procedure.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/support-bypass-method-when-merge-into-hit-index.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/support-for-adding-nocopy-attributes-to-procedure-and-function-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/support-for-constants-in-package-as-default-values.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/support-passing-the-count-attribute.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/support-plpgsql-subtype.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/support-q-quote-escape-character.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/support-subtracting-two-date-types-to-return-numeric-type.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/support-synonym-calls-without-parentheses-for-function-without-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/support-table-function.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/support-to-keep-the-same-name-after-the-end-with-oracle.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/compatibility/support-where-current-of.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/1-access-control-model.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/10-row-level-access-control.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/11-password-strength-verification.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/12-equality-query-in-a-fully-encrypted-database.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/13-ledger-database-mechanism.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/14-transparent-data-encryption.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/2-separation-of-control-and-access-permissions.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/3-database-encryption-authentication.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/4-data-encryption-and-storage.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/5-database-audit.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/6-network-communication-security.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/7-resource-label.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/8-unified-audit.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/9-dynamic-data-anonymization.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/database-security/database-security.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/1-support-for-functions-and-stored-procedures.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/10-autonomous-transaction.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/11-global-temporary-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/12-pseudocolumn-rownum.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/13-stored-procedure-debugging.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/14-jdbc-client-load-balancing-and-readwrite-isolation.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/15-in-place-update-storage-engine.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/16-publication-subscription.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/17-foreign-key-lock-enhancement.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/18-data-compression-in-oltp-scenarios.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/19-transaction-async-submit.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/2-sql-hints.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/20-copy-import-optimization.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/21-dynamic-partition-pruning.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/22-sql-running-status-observation.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/23-index-creation-parallel-control.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/24-brin-index.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/25-bloom-index.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/3-full-text-indexing.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/4-copy-interface-for-error-tolerance.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/5-partitioning.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/6-support-for-advanced-analysis-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/7-materialized-view.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/8-hyperloglog.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/9-creating-an-index-online.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/enterprise-level-features.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/event-trigger.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/index-support-fuzzy-matching.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/pruning-order-by-in-subqueries.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/scroll-cursor.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/support-for-pruning-subquery-projection-columns.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/1-primary-standby.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/10-adding-or-deleting-a-standby-server.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/11-delaying-entering-the-maximum-availability-mode.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/12-parallel-logical-decoding.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/13-dcf.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/14-cm.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/15-global-syscache.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/16-using-a-standby-node-to-build-a-standby-node.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/17-two-city-three-dc-dr.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/2-logical-replication.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/4-logical-backup.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/5-physical-backup.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/6-automatic-job-retry-upon-failure.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/7-ultimate-rto.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/8-cascaded-standby-server.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/9-delayed-replay.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/cm-cluster-management-component-supporting-two-node-deployment.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/cm-dual-network-segment-deployment.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/ddl-query-of-view.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/enhanced-efficiency-of-logical-backup-and-restore.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/high-availability-based-on-the-paxos-protocol.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-availability/high-availability.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/1-cbo-optimizer.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/10-xlog-no-lock-flush.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/11-parallel-page-based-redo-for-ustore.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/12-row-store-execution-to-vectorized-execution.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/2-llvm.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/3-vectorized-engine.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/4-hybrid-row-column-store.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/5-adaptive-compression.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/7-kunpeng-numa-architecture-optimization.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/8-high-concurrency-of-thread-pools.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/9-smp-for-parallel-execution.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/adaptive-two-phase-aggregation.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/astore-row-level-compression.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/btree-index-compression.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/enhancement-of-tracing-backend-key-thread.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/enhancement-of-wal-redo-performance.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/high-performance.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/ock-accelerated-data-transmission.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/ock-scrlock-accelerate-distributed-lock.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/ordering-operator-optimization.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/parallel-index-scan.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/parallel-query-optimization.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/sql-bypass.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/high-performance/tracing-SQL-function.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/2-workload-diagnosis-report.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/3-slow-sql-diagnosis.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/4-session-performance-diagnosis.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/5-system-kpi-aided-diagnosis.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/autonomous-transaction-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/built-in-stack-tool.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/corrupt-files-handling.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/dcf-module-tracing.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/error-when-writing-illegal-characters.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/extension-splitting.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/fault-diagnosis.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/light-lock-export-and-analysis.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/maintainability.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/pageinspect-pagehack.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/maintainability/sql-patch.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/middleware/deploying-a-distributed-database-using-kubernetes.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/middleware/distributed-analysis-capabilities.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/middleware/distributed-database-capability.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/middleware/middleware.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/workload-management/high-latency-escape-at-the-infrastructure-layer.md delete mode 100644 product/zh/docs-mogdb/v5.2/characteristic-description/workload-management/workload-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-cluster-brain-split-fault.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-cluster-manual-failover.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-fault.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/common-fault-locating-cases.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-after-installation-on-x86.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-due-to-full-disk-space.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-due-to-incorrect-settings-of-guc-parameter-log-directory.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-when-removeipc-is-enabled.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-fault-locating.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/after-you-run-the-du-command.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/disk-space-usage-reaches-the-threshold.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/error-no-space-left-on-device-is-displayed.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/file-is-damaged-in-the-xfs-file-system.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/file-system-disk-memory.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/insufficient-memory.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/shared-memory-leak.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/when-the-tpcc-is-running.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/b-tree-index-faults.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/index-fault.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/reindexing-fails.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/when-a-user-specifies-only-an-index-name.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/an-error-occurs-during-integer-conversion.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/different-data-is-displayed.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/forcibly-terminating-a-session.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/permission-session-data-type.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/performance-deterioration.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/primary-node-is-hung-in-demoting.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/query-failure.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/service-ha-concurrency.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/service-startup-failure.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/standby-node-in-the-need-repair-state.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/too-many-clients-already.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/analyzing-the-status-of-a-query-statement.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/analyzing-whether-a-query-statement-is-blocked.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/lock-wait-timeout-is-displayed.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/low-query-efficiency.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/slow-response-to-a-query-statement.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/sql-fault.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/an-error-is-reported-when-the-table-partition-is-modified.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/table-partition-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/table-size-does-not-change.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-methods.md delete mode 100644 product/zh/docs-mogdb/v5.2/common-faults-and-identification/common-faults-and-identification.md delete mode 100644 product/zh/docs-mogdb/v5.2/communication-matrix.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/1-1-stored-procedure.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/autonomous-transaction/1-introduction-to-autonomous-transaction.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/autonomous-transaction/2-function-supporting-autonomous-transaction.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/autonomous-transaction/3-stored-procedure-supporting-autonomous-transaction.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/autonomous-transaction/4-restrictions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/autonomous-transaction/anonymous-block-supporting-autonomous-transaction.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/1-development-based-on-jdbc-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/10-example-common-operations.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/11-example-retrying-sql-queries-for-applications.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/12-example-importing-and-exporting-data-through-local-files.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/13-example-2-migrating-data-from-a-my-database-to-mogdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/14-example-logic-replication-code.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/14.1-example-parameters-for-connecting-to-the-database-in-different-scenarios.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/1-java-sql-Connection.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/10-javax-sql-DataSource.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/11-javax-sql-PooledConnection.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/12-javax-naming-Context.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/13-javax-naming-spi-InitialContextFactory.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/14-CopyManager.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/2-java-sql-CallableStatement.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/3-java-sql-DatabaseMetaData.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/4-java-sql-Driver.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/5-java-sql-PreparedStatement.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/6-java-sql-ResultSet.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/7-java-sql-ResultSetMetaData.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/8-java-sql-Statement.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/9-javax-sql-ConnectionPoolDataSource.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/jdbc-interface-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/2-jdbc-package-driver-class-and-environment-class.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/3-development-process.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/4-loading-the-driver.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/5-connecting-to-a-database.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/6-connecting-to-a-database-using-ssl.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/7-running-sql-statements.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/8-processing-data-in-a-result-set.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/8.1-log-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/9-closing-a-connection.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/connecting-to-a-database-using-uds.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/development-based-on-jdbc.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/example-jdbc-primary-and-backup-cluster-load-balancing.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/jdbc-based-common-parameter-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/jdbc-release-notes.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/1-development-based-on-odbc.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/2-odbc-packages-dependent-libraries-and-header-files.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/3-configuring-a-data-source-in-the-linux-os.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/4-development-process.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/5-example-common-functions-and-batch-binding.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/5.1-typical-application-scenarios-and-configurations.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-0-odbc-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-1-SQLAllocEnv.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-10-SQLExecDirect.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-11-SQLExecute.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-12-SQLFetch.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-13-SQLFreeStmt.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-14-SQLFreeConnect.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-15-SQLFreeHandle.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-16-SQLFreeEnv.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-17-SQLPrepare.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-18-SQLGetData.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-19-SQLGetDiagRec.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-2-SQLAllocConnect.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-20-SQLSetConnectAttr.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-21-SQLSetEnvAttr.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-22-SQLSetStmtAttr.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-23-Examples.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-3-SQLAllocHandle.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-4-SQLAllocStmt.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-5-SQLBindCol.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-6-SQLBindParameter.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-7-SQLColAttribute.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-8-SQLConnect.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-9-SQLDisconnect.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/odbc-interface-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/odbc-release-notes.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/1-database-connection-control-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/10-PQstatus.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/2-PQconnectdbParams.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/3-PQconnectdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/4-PQconninfoParse.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/5-PQconnectStart.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/6-PQerrorMessage.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/7-PQsetdbLogin.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/8-PQfinish.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/9-PQreset.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/1-PQclear.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/10-PQntuples.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/11-PQprepare.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/12-PQresultStatus.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/2-PQexec.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/3-PQexecParams.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/4-PQexecParamsBatch.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/5-PQexecPrepared.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/6-PQexecPreparedBatch.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/7-PQfname.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/8-PQgetvalue.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/9-PQnfields.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/database-statement-execution-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/1-functions-for-asynchronous-command-processing.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/2-PQsendQuery.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/3-PQsendQueryParams.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/4-PQsendPrepare.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/5-PQsendQueryPrepared.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/6-PQflush.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/1-PQgetCancel.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/2-PQfreeCancel.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/3-PQcancel.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/functions-for-canceling-queries-in-progress.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/libpq-api-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/dependent-header-files-of-libpq.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/development-based-on-libpq.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/development-process.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/libpq-example.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/libpq-release-notes.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/link-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/1-psycopg-based-development.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/1-psycopg2-connect.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/10-connection-close.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/2-connection-cursor.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/3-cursor-execute-query-vars-list.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/4-curosr-executemany-query-vars-list.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/5-connection-commit.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/6-connection-rollback.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/7-cursor-fetchone.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/8-cursor-fetchall.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/9-cursor-close.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/psycopg-api-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/12-psycopg2-release-notes.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/2-psycopg-package.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/3.1-development-process.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/4-connecting-to-a-database.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/5-adaptation-of-python-values-to-sql-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/6-new-features-in-mogdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/9-connecting-to-the-database-using-ssl.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/5-commissioning.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/application-development-tutorial.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/development-specifications/design-specification.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/development-specifications/introduction-to-development-specifications.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/development-specifications/naming-specification.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/development-specifications/overview-of-development-specifications.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/development-specifications/postgresql-compatibility.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/development-specifications/query-operations.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/dev/development-specifications/syntax-specification.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/developer-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/extension.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/1-oracle_fdw.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/2-mysql_fdw.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/3-postgres_fdw.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/dblink.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/fdw-introduction.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/file_fdw.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/pg_bulkload-user-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/pg_prewarm-user-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/pg_repack-user-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/pg_trgm-user-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-extension.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-support-and-constraints.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/using-postgis.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/wal2json-user-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/extension/whale.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/1-logical-decoding.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/2-logical-decoding-by-sql-function-interfaces.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/logical-decoding.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/logical-replication.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/architecture.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/configuration-settings.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/conflicts.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/monitoring.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/publication-subscription.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/publications.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/quick-setup.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/restrictions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/security.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/subscriptions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/materialized-view/1-materialized-view-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/1-full-materialized-view-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/2-full-materialized-view-usage.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/3-full-materialized-view-support-and-constraints.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/full-materialized-view.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/1-incremental-materialized-view-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/2-incremental-materialized-view-usage.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/3-incremental-materialized-view-support-and-constraints.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/incremental-materialized-view.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/assessment-tool.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-extension.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-installation.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-restrictions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/dolphin-reset-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/dolphin-syntax.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/guc-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/identifiers/dolphin-column-name-identifiers.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/identifiers/dolphin-identifiers.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-binary-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-bit-string-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-bool-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-character-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-data-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-date-time-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-enumeration-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-numeric-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-dcl-syntax.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-ddl-syntax.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-dml-syntax.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-keywords.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-sql-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/expressions/dolphin-conditional-expressions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/expressions/dolphin-expressions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-advisory-lock-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-aggregate-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-arithmetic-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-assignment-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-b-compatible-database-lock.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-bit-string-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-character-processing-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-comment-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-compatible-operators-and-operations.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-conditional-expression-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-date-and-time-processing-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-json-jsonb-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-logical-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-network-address-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-system-information-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-type-conversion-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-database.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-function.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-procedure.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-server.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table-partition.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-tablespace.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-view.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-analyze-analyse.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-ast.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-checksum-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-database.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-function.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-index.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-procedure.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-server.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table-as.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table-partition.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-tablespace.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-trigger.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-view.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-describe-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-do.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-database.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-index.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-tablespace.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-execute.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-explain.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-flush-binary-logs.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-grant-revoke-proxy.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-grant.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-insert.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-kill.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-load-data.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-optimize-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-prepare.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-rename-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-rename-user.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-revoke.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-select-hint.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-select.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-set-charset.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-set-password.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-character-set.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-collation.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-columns.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-database.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-function.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-procedure.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-trigger.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-view.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-databases.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-function-status.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-grants.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-index.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-master-status.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-plugins.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-privileges.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-procedure-status.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-processlist.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-slave-hosts.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-status.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-table-status.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-tables.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-triggers.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-variables.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-warnings.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-sql-syntax.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-update.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-use-db-name.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/basic-statements/dolphin-assignment-statements.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/basic-statements/dolphin-basic-statements.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/dolphin-stored-procedures.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-INDEX_STATISTIC.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-PG_TYPE_NONSTRICT_BASIC_VALUE.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-system-views.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/migrating-data/data-check.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/migrating-data/full-migration.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/migrating-data/incremental-migration.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/migrating-data/migrating-data-from-mysql-to-mogdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/migrating-data/quick-mysql-migration.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/migrating-data/reverse-migration.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/mysql-compatible-description.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/partition-management/partition-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/benefits-of-partition-pruning.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/dynamic-partition-pruning.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/how-to-identify-whether-partition-pruning-has-been-used.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/information-that-can-be-used-for-partition-pruning.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/partition-pruning.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/static-partition-pruning.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/recommendations-for-choosing-a-partitioning-strategy.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-hash-partitioning.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-list-partitioning.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-range-partitioning.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-1-plpgsql-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-10-other-statements.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-11-cursors.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-12-retry-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-13-debugging.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-14-package.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-2-data-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-3-data-type-conversion.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-4-arrays-and-records.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-5-declare-syntax.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-6-basic-statements.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-7-dynamic-statements.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-8-control-statements.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/1-9-transaction-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/advanced-packages/advanced-packages.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/advanced-packages/basic-interfaces/PKG_SERVICE.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/plpgsql/advanced-packages/basic-interfaces/basic-interfaces.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/scheduled-jobs/pkg-service.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/scheduled-jobs/scheduled-jobs.md delete mode 100644 product/zh/docs-mogdb/v5.2/developer-guide/user-defined-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/faqs/application-development-faqs.md delete mode 100644 product/zh/docs-mogdb/v5.2/faqs/deployment-and-maintenance-faqs.md delete mode 100644 product/zh/docs-mogdb/v5.2/faqs/faqs.md delete mode 100644 product/zh/docs-mogdb/v5.2/faqs/high-availability-faqs.md delete mode 100644 product/zh/docs-mogdb/v5.2/faqs/migration-faqs.md delete mode 100644 product/zh/docs-mogdb/v5.2/faqs/product-faqs.md delete mode 100644 product/zh/docs-mogdb/v5.2/faqs/upgrade-faqs.md delete mode 100644 product/zh/docs-mogdb/v5.2/glossary.md delete mode 100644 product/zh/docs-mogdb/v5.2/high-available-guide/cluster-management/cluster-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/high-available-guide/cluster-management/cm-configuration-parameter/cm-cm_agent.md delete mode 100644 product/zh/docs-mogdb/v5.2/high-available-guide/cluster-management/cm-configuration-parameter/cm-cm_server.md delete mode 100644 product/zh/docs-mogdb/v5.2/high-available-guide/cluster-management/cm-configuration-parameter/cm-configuration-parameter.md delete mode 100644 product/zh/docs-mogdb/v5.2/high-available-guide/cluster-management/feature-introduction.md delete mode 100644 product/zh/docs-mogdb/v5.2/high-available-guide/cluster-management/introduction-to-cm_ctl-tool.md delete mode 100644 product/zh/docs-mogdb/v5.2/high-available-guide/cluster-management/introduction-to-cm_persist.md delete mode 100644 product/zh/docs-mogdb/v5.2/high-available-guide/cluster-management/introduction-to-installation-and-uninstallation-tool.md delete mode 100644 product/zh/docs-mogdb/v5.2/high-available-guide/cluster-management/manual-configuration-of-vip.md delete mode 100644 product/zh/docs-mogdb/v5.2/high-available-guide/cluster-management/safety-design.md delete mode 100644 product/zh/docs-mogdb/v5.2/high-available-guide/high-available-dcf.md delete mode 100644 product/zh/docs-mogdb/v5.2/high-available-guide/high-available-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/installation-guide/docker-installation/docker-installation.md delete mode 100644 product/zh/docs-mogdb/v5.2/installation-guide/installation-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/installation-guide/installation-preparation/environment-requirement.md delete mode 100644 product/zh/docs-mogdb/v5.2/installation-guide/installation-preparation/installation-preparation.md delete mode 100644 product/zh/docs-mogdb/v5.2/installation-guide/installation-preparation/os-configuration.md delete mode 100644 product/zh/docs-mogdb/v5.2/installation-guide/manual-installation.md delete mode 100644 product/zh/docs-mogdb/v5.2/installation-guide/ptk-based-installation.md delete mode 100644 product/zh/docs-mogdb/v5.2/installation-guide/recommended-parameter-settings.md delete mode 100644 product/zh/docs-mogdb/v5.2/mogeaver/mogeaver-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/mogeaver/mogeaver-release-notes.md delete mode 100644 product/zh/docs-mogdb/v5.2/mogeaver/mogeaver.md delete mode 100644 product/zh/docs-mogdb/v5.2/overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/TPCC-performance-tuning-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/performance-tuning.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/sql-tuning/experience-in-rewriting-sql-statements.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/sql-tuning/hint-based-tuning.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/sql-tuning/introduction-to-the-sql-execution-plan.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/sql-tuning/query-execution-process.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/sql-tuning/resetting-key-parameters-during-sql-tuning.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/sql-tuning/reviewing-and-modifying-a-table-definition.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/sql-tuning/sql-tuning.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/sql-tuning/tuning-process.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/sql-tuning/typical-sql-optimization-methods.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/sql-tuning/updating-statistics.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/configuring-llvm.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/configuring-smp.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/configuring-ustore.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/configuring-vector-engine.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/optimizing-os-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-load-management-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-load-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/creating-resource-pool.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/enabling-resource-load-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/resource-management-preparations.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/resource-planning.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/resource-load-management/resource-management-preparations/setting-control-group.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/system-tuning/system-tuning.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/wdr/wdr-report.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/wdr/wdr-snapshot-schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/performance-tuning/wdr/wdr.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/container-based-installation.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/installation-on-a-single-node.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/mogdb-access.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-cli-to-access-mogdb/gsql.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-cli-to-access-mogdb/pgcli.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-cli-to-access-mogdb/use-cli-to-access-mogdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-gui-tools-to-access-mogdb/dbeaver.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-gui-tools-to-access-mogdb/mogeaver-usage.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-gui-tools-to-access-mogdb/use-gui-tools-to-access-mogdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-middleware-to-access-mogdb/use-middleware-to-access-mogdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-middleware-to-access-mogdb/weblogic-configures-mogdb-data-source-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-middleware-to-access-mogdb/websphere-configures-mogdb-data-source-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/adonet.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/c-cpp.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/go.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/java.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/nodejs.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/python.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/rust.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-access/use-programming-language-to-access-mogdb/use-programming-language-to-access-mogdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogdb-playground.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/mogila.md delete mode 100644 product/zh/docs-mogdb/v5.2/quick-start/quick-start.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00001-GAUSS-00100.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00101-GAUSS-00200.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00201-GAUSS-00300.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00301-GAUSS-00400.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00401-GAUSS-00500.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00501-GAUSS-00600.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00601-GAUSS-00700.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00701-GAUSS-00800.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00801-GAUSS-00900.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-00901-GAUSS-01000.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01001-GAUSS-01100.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01101-GAUSS-01200.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01201-GAUSS-01300.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01301-GAUSS-01400.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01401-GAUSS-01500.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01501-GAUSS-01600.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01601-GAUSS-01700.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01701-GAUSS-01800.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01801-GAUSS-01900.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-01901-GAUSS-02000.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02001-GAUSS-02100.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02101-GAUSS-02200.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02201-GAUSS-02300.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02301-GAUSS-02400.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02401-GAUSS-02500.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02501-GAUSS-02600.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02601-GAUSS-02700.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02701-GAUSS-02800.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02801-GAUSS-02900.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-02901-GAUSS-03000.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03001-GAUSS-03100.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03101-GAUSS-03200.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03201-GAUSS-03300.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03301-GAUSS-03400.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03401-GAUSS-03500.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03501-GAUSS-03600.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03601-GAUSS-03700.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03701-GAUSS-03800.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03801-GAUSS-03900.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-03901-GAUSS-04000.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04001-GAUSS-04100.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04101-GAUSS-04200.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04201-GAUSS-04300.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04301-GAUSS-04400.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04401-GAUSS-04500.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04501-GAUSS-04600.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04601-GAUSS-04700.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04701-GAUSS-04800.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04801-GAUSS-04900.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-04901-GAUSS-05000.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05001-GAUSS-05100.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05101-GAUSS-05200.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05201-GAUSS-05300.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05301-GAUSS-05400.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05401-GAUSS-05500.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05501-GAUSS-05600.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05601-GAUSS-05700.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05701-GAUSS-05800.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05801-GAUSS-05900.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-05901-GAUSS-06000.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06001-GAUSS-06100.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06101-GAUSS-06200.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06201-GAUSS-06300.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06301-GAUSS-06400.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06401-GAUSS-06500.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06501-GAUSS-06600.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06601-GAUSS-06700.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06701-GAUSS-06800.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06801-GAUSS-06900.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-06901-GAUSS-07000.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-07001-GAUSS-07100.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-07101-GAUSS-07200.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-07201-GAUSS-07300.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-07301-GAUSS-07400.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-07401-GAUSS-07500.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-50000-GAUSS-50999.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-51000-GAUSS-51999.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-52000-GAUSS-52999.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/GAUSS-53000-GAUSS-53699.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class00-class21.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class0A-class0Z.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class22-class24.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class25-class40.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class2B-class2F.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class3B-class3F.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class42-class44.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/class53-class58.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/classCG-classTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/classF0-classP0.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/classXX-classYY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/description-of-sql-error-codes.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/sqlstate-values-of-mogdb-cm-error-code.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/description-of-sql-error-codes/sqlstate-values-of-mogdb-error-code.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/error-code-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-code-reference/third-party-library-error-codes.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-log-reference/error-log-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/error-log-reference/kernel-error-message.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/aggregate-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/ai-feature-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/array-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/binary-string-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/bit-string-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/character-processing-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/comparison-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/conditional-expressions-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/data-damage-detection-and-repair-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/date-and-time-processing-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/dynamic-data-masking-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/encrypted-equality-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/event-trigger-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/fault-injection-system-function.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/geometric-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/global-syscache-feature-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/global-temporary-table-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/hash-function.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/hll-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/internal-functions/internal-functions-1.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/internal-functions/internal-functions-2.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/internal-functions/internal-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/json-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/ledger-database-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/logical-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/mathematical-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/mode-matching-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/network-address-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/obsolete-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/other-system-functions/other-system-functions-1.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/other-system-functions/other-system-functions-2.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/other-system-functions/other-system-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/prompt-message-function.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/range-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/security-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/sequence-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/set-returning-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-1.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-2.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-3.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/access-privilege-inquiry-function.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/comment-information-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/other-function.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/schema-visibility-inquiry-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/session-information-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/system-catalog-information-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/system-information-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-information-functions/transaction-ids-and-snapshots.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/advisory-lock-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/backup-and-restoration-control-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/configuration-settings-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/database-object-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/logical-replication-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/other-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/row-store-compression-system-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/segment-page-storage-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/server-signal-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/snapshot-synchronization-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/system-management-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/undo-system-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/system-management-functions/universal-file-access-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/text-search-functions-and-operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/trigger-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/type-conversion-functions/type-conversion-functions-1.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/type-conversion-functions/type-conversion-functions-2.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/type-conversion-functions/type-conversion-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/window-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/functions-and-operators/xml-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/AI-features.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/DCF-parameters-settings.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/HyperLogLog.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/MogDB-transaction.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/alarm-detection.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/appendix.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/auditing/audit-switch.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/auditing/auditing.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/auditing/operation-audit.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/auditing/user-and-permission-audit.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/automatic-vacuuming.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/backend-compression.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/backup-and-restoration-parameter.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/cm-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/connection-and-authentication/communication-library-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/connection-and-authentication/connection-and-authentication.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/connection-and-authentication/connection-settings.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/connection-pool-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/data-import-export.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/default-settings-of-client-connection/default-settings-of-client-connection.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/default-settings-of-client-connection/other-default-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/default-settings-of-client-connection/statement-behavior.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/delimiter.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/developer-options.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/error-reporting-and-logging/error-reporting-and-logging.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/error-reporting-and-logging/logging-content.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/error-reporting-and-logging/logging-time.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/error-reporting-and-logging/using-csv-log-output.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/fault-tolerance.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/file-location.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/flashback.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/global-syscache-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/global-temporary-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/guc-parameter-list.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/guc-parameter-usage.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/guc-user-defined-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/ha-replication/ha-replication.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/ha-replication/primary-server.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/ha-replication/sending-server.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/ha-replication/standby-server.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/load-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/lock-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/miscellaneous-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/multi-level-cache-management-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/parameters-related-to-efficient-data-compression-algorithms.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/query-planning/genetic-query-optimizer.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/query-planning/other-optimizer-options.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/query-planning/query-planning.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/query.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/reference-guide-guc-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/replication-parameters-of-two-database-instances.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/reserved-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/asynchronous-io-operations.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/background-writer.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/cost-based-vacuum-delay.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/disk-space.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/kernel-resource-usage.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/memory.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-consumption/resource-consumption.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/resource-pooling-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/rollback-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/scheduled-task.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/security-configuration.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/statistics-during-the-database-running/performance-statistics.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/statistics-during-the-database-running/query-and-index-statistics-collector.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/statistics-during-the-database-running/statistics-during-the-database-running.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/system-performance-snapshot.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/thread-pool.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/upgrade-parameters.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/version-and-platform-compatibility/compatibility-with-earlier-versions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/version-and-platform-compatibility/platform-and-client-compatibility.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/version-and-platform-compatibility/version-and-platform-compatibility.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/wait-events.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/write-ahead-log/archiving.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/write-ahead-log/checkpoints.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/write-ahead-log/log-replay.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/write-ahead-log/settings.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/write-ahead-log/write-ahead-log.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/guc-parameters/writer-statement-parameters-supported-by-standby-server.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/reference-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI-schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.ARCHIVE_SNAPSHOT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.CREATE_SNAPSHOT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.CREATE_SNAPSHOT_INTERNAL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.MANAGE_SNAPSHOT_INTERNAL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.PREPARE_SNAPSHOT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.PREPARE_SNAPSHOT_INTERNAL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.PUBLISH_SNAPSHOT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.PURGE_SNAPSHOT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.PURGE_SNAPSHOT_INTERNAL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.SAMPLE_SNAPSHOT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DB4AI-schema/DB4AI.SNAPSHOT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/DBE_PERF.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/Cache-IO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_ALL_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_ALL_SEQUENCES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_ALL_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_SYS_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_SYS_SEQUENCES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_SYS_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_USER_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_USER_SEQUENCES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STATIO_USER_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STAT_DB_CU.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/GLOBAL_STAT_SESSION_CU.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_ALL_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_ALL_SEQUENCES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_ALL_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_SYS_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_SYS_SEQUENCES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_SYS_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_USER_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_USER_SEQUENCES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/STATIO_USER_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_ALL_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_ALL_SEQUENCES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_ALL_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_SYS_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_SYS_SEQUENCES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_SYS_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_USER_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_USER_SEQUENCES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/cache-io/SUMMARY_STATIO_USER_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/configuration/CONFIG_SETTINGS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/configuration/GLOBAL_CONFIG_SETTINGS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/configuration/configuration.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/FILE_IOSTAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/FILE_REDO_IOSTAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/GLOBAL_FILE_IOSTAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/GLOBAL_FILE_REDO_IOSTAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/GLOBAL_REL_IOSTAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/LOCAL_REL_IOSTAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/SUMMARY_FILE_IOSTAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/SUMMARY_FILE_REDO_IOSTAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/SUMMARY_REL_IOSTAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/file/file.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/global-plancache/GLOBAL_PLANCACHE_CLEAN.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/global-plancache/GLOBAL_PLANCACHE_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/global-plancache/global-plancache.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/instance/GLOBAL_INSTANCE_TIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/instance/INSTANCE_TIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/instance/instance.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/lock/GLOBAL_LOCKS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/lock/LOCKS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/lock/lock.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/memory/GLOBAL_MEMORY_NODE_DETAIL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/memory/GLOBAL_SHARED_MEMORY_DETAIL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/memory/MEMORY_NODE_DETAIL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/memory/memory-schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_ALL_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_ALL_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_BAD_BLOCK.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_DATABASE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_DATABASE_CONFLICTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_SYS_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_SYS_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_USER_FUNCTIONS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_USER_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_USER_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_XACT_ALL_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_XACT_SYS_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_XACT_USER_FUNCTIONS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/GLOBAL_STAT_XACT_USER_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_ALL_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_ALL_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_BAD_BLOCK.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_DATABASE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_DATABASE_CONFLICTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_SYS_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_SYS_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_USER_FUNCTIONS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_USER_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_USER_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_XACT_ALL_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_XACT_SYS_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_XACT_USER_FUNCTIONS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/STAT_XACT_USER_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_ALL_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_ALL_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_BAD_BLOCK.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_DATABASE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_DATABASE_CONFLICTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_SYS_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_SYS_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_USER_FUNCTIONS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_USER_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_USER_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_XACT_ALL_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_XACT_SYS_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_XACT_USER_FUNCTIONS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/SUMMARY_STAT_XACT_USER_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/object/object-schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/GLOBAL_OPERATOR_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/GLOBAL_OPERATOR_HISTORY_TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/GLOBAL_OPERATOR_RUNTIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/OPERATOR_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/OPERATOR_HISTORY_TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/OPERATOR_RUNTIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/operator/operator-schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/os/GLOBAL_OS_RUNTIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/os/GLOBAL_OS_THREADS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/os/OS_RUNTIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/os/OS_THREADS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/os/os-schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GLOBAL_SLOW_QUERY_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GLOBAL_SLOW_QUERY_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GLOBAL_STATEMENT_COMPLEX_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GLOBAL_STATEMENT_COMPLEX_HISTORY_TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GLOBAL_STATEMENT_COMPLEX_RUNTIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GLOBAL_STATEMENT_COUNT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GS_SLOW_QUERY_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/GS_SLOW_QUERY_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_HISTORY_TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_COMPLEX_RUNTIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_COUNT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_HISTORY_query.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_RESPONSETIME_PERCENTILE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/STATEMENT_WLMSTAT_COMPLEX_RUNTIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/SUMMARY_STATEMENT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/SUMMARY_STATEMENT_COUNT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/query/query-schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/rto/RTO-RPO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/rto/global_rto_status.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/rto/global_streaming_hadr_rto_and_rpo_stat.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/rto/gs_hadr_local_rto_and_rpo_stat.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_SESSION_MEMORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_SESSION_MEMORY_DETAIL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_SESSION_STAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_SESSION_STAT_ACTIVITY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_SESSION_TIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_THREADPOOL_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/GLOBAL_THREAD_WAIT_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/LOCAL_ACTIVE_SESSION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/LOCAL_THREADPOOL_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_CPU_RUNTIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_MEMORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_MEMORY_DETAIL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_MEMORY_RUNTIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_STAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_STAT_ACTIVITY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/SESSION_TIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/STATEMENT_IOSTAT_COMPLEX_RUNTIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/THREAD_WAIT_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/session-thread/session-thread.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/GLOBAL_TRANSACTIONS_PREPARED_XACTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/GLOBAL_TRANSACTIONS_RUNNING_XACTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/SUMMARY_TRANSACTIONS_PREPARED_XACTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/SUMMARY_TRANSACTIONS_RUNNING_XACTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/TRANSACTIONS_PREPARED_XACTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/TRANSACTIONS_RUNNING_XACTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/transaction/transaction-schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/BGWRITER_STAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/CLASS_VITAL_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_BGWRITER_STAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_CANDIDATE_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_CKPT_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_DOUBLE_WRITE_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_GET_BGWRITER_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_PAGEWRITER_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_RECORD_RESET_TIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_RECOVERY_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_REDO_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_REPLICATION_SLOTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_REPLICATION_STAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/GLOBAL_SINGLE_FLUSH_DW_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/REPLICATION_SLOTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/REPLICATION_STAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/SUMMARY_USER_LOGIN.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/USER_LOGIN.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/utility/utility.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/wait-events/GLOBAL_WAIT_EVENTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/wait-events/WAIT_EVENTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/wait-events/dbe-perf-wait-events.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload-manager/WLM_USER_RESOURCE_CONFIG.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload-manager/WLM_USER_RESOURCE_RUNTIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload-manager/workload-manager.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/GLOBAL_USER_TRANSACTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/GLOBAL_WORKLOAD_TRANSACTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/SUMMARY_WORKLOAD_SQL_COUNT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/SUMMARY_WORKLOAD_SQL_ELAPSE_TIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/SUMMARY_WORKLOAD_TRANSACTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/USER_TRANSACTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/WORKLOAD_SQL_COUNT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/WORKLOAD_SQL_ELAPSE_TIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/WORKLOAD_TRANSACTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PERF/workload/workload-schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER-schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.abort.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.add_breakpoint.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.attach.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.backtrace.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.continue.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.delete_breakpoint.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.disable_breakpoint.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.enable_breakpoint.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.finish.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.info_breakpoints.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.info_code.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.info_locals.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.local_debug_server_info.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.next.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.print_var.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.set_var.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.step.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.turn_off.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEBUGGER-schema/DBE_PLDEBUGGER.turn_on.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEVELOPER/DBE_PLDEVELOPER.gs_errors.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEVELOPER/DBE_PLDEVELOPER.gs_source.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_PLDEVELOPER/DBE_PLDEVELOPER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL-Schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL.create_abort_sql_patch.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL.create_hint_sql_patch.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL.disable_sql_patch.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL.drop_sql_patch.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL.enable_sql_patch.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/DBE_SQL_UTIL-Schema/DBE_SQL_UTIL.show_sql_patch.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/information-schema/INFORMATION_SCHEMA_CATALOG_NAME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/information-schema/_PG_FOREIGN_DATA_WRAPPERS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/information-schema/_PG_FOREIGN_SERVERS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/information-schema/_PG_FOREIGN_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/information-schema/_PG_FOREIGN_TABLE_COLUMNS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/information-schema/_PG_USER_MAPPINGS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/information-schema/information-schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/schema/schema.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/alias.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/extended-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/extended-syntax.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes-introduction.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/gin-indexes/gin-indexes.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/gin-indexes/gin-tips-and-tricks.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/gin-indexes/implementation.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/gin-indexes/scalability.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/appendix/sql-reference-appendix.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/constant-and-macro.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/dcl-syntax-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/ddl-syntax-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/dml-syntax-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/expressions/array-expressions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/expressions/condition-expressions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/expressions/expressions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/expressions/row-expressions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/expressions/simple-expressions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/expressions/subquery-expressions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/additional-features/additional-features.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/additional-features/gathering-document-statistics.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/additional-features/manipulating-queries.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/additional-features/manipulating-tsvector.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/additional-features/rewriting-queries.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/configuration-examples.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/controlling-text-search/controlling-text-search.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/controlling-text-search/highlighting-results.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/controlling-text-search/parsing-documents.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/controlling-text-search/parsing-queries.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/controlling-text-search/ranking-search-results.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/dictionaries-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/dictionaries.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/ispell-dictionary.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/simple-dictionary.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/snowball-dictionary.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/stop-words.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/synonym-dictionary.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/dictionaries/thesaurus-dictionary.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/full-text-search.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/introduction/basic-text-matching.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/introduction/configurations.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/introduction/full-text-retrieval.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/introduction/full-text-search-introduction.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/introduction/what-is-a-document.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/limitations.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/parser.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/tables-and-indexes/constraints-on-index-use.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/tables-and-indexes/creating-an-index.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/tables-and-indexes/searching-a-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/tables-and-indexes/tables-and-indexes.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-configuration.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-dictionary.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-a-parser.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/full-text-search/testing-and-debugging-text-search/testing-and-debugging-text-search.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/keywords/keywords-1.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/keywords/keywords-2.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/keywords/keywords.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/mogdb-sql.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/ordinary-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/partition-table.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-anonymous-block.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-contraints.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-cursor.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-index.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-llvm.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-lock.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference-trigger.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/sql-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/sub-query.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/system-operation.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/transaction/sql-reference-transaction.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/transaction/transaction-auto-commit.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/transaction/transaction-control.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/transaction/transaction-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/type-base-value.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/type-conversion/functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/type-conversion/operators.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/type-conversion/type-conversion-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/type-conversion/type-conversion.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/type-conversion/union-case-and-related-constructs.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-reference/type-conversion/value-storage.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ABORT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-AGGREGATE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-AUDIT-POLICY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-DATA-SOURCE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-DATABASE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-DEFAULT-PRIVILEGES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-DIRECTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-EVENT-TRIGGER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-EVENT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-EXTENSION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-FOREIGN-DATA-WRAPPER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-FOREIGN-TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-FUNCTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-GLOBAL-CONFIGURATION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-GROUP.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-INDEX.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-LANGUAGE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-LARGE-OBJECT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-MASKING-POLICY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-MATERIALIZED-VIEW.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-OPERATOR.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-PACKAGE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-PROCEDURE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-PUBLICATION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-RESOURCE-LABEL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-RESOURCE-POOL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-ROLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-ROW-LEVEL-SECURITY-POLICY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-RULE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SCHEMA.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SEQUENCE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SERVER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SESSION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SUBSCRIPTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SYNONYM.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SYSTEM-KILL-SESSION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-SYSTEM-SET.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TABLE-PARTITION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TABLE-SUBPARTITION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TABLESPACE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TEXT-SEARCH-CONFIGURATION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TEXT-SEARCH-DICTIONARY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TRIGGER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-TYPE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-USER-MAPPING.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-USER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ALTER-VIEW.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ANALYZE-ANALYSE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/BEGIN.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CALL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CHECKPOINT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CLEAN-CONNECTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CLOSE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CLUSTER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/COMMENT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/COMMIT-END.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/COMMIT-PREPARED.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CONNECT-BY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/COPY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-AGGREGATE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-AUDIT-POLICY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-CAST.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-CLIENT-MASTER-KEY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-COLUMN-ENCRYPTION-KEY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-DATA-SOURCE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-DATABASE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-DIRECTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-EVENT-TRIGGER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-EVENT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-EXTENSION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-FOREIGN-DATA-WRAPPER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-FOREIGN-TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-FUNCTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-GROUP.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-INCREMENTAL-MATERIALIZED-VIEW.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-INDEX.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-LANGUAGE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-MASKING-POLICY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-MATERIALIZED-VIEW.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-MODEL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-OPERATOR.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-PACKAGE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-PROCEDURE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-PUBLICATION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-RESOURCE-LABEL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-RESOURCE-POOL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-ROLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-ROW-LEVEL-SECURITY-POLICY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-RULE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-SCHEMA.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-SEQUENCE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-SERVER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-SUBSCRIPTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-SYNONYM.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TABLE-AS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TABLE-PARTITION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TABLE-SUBPARTITION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TABLESPACE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TEXT-SEARCH-CONFIGURATION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TEXT-SEARCH-DICTIONARY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TRIGGER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-TYPE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-USER-MAPPING.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-USER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-VIEW.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CREATE-WEAK-PASSWORD-DICTIONARY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/CURSOR.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DEALLOCATE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DECLARE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DELETE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DELIMITER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-AGGREGATE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-AUDIT-POLICY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-CAST.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-CLIENT-MASTER-KEY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-COLUMN-ENCRYPTION-KEY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-DATA-SOURCE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-DATABASE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-DIRECTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-EVENT-TRIGGER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-EVENT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-EXTENSION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-FOREIGN-DATA-WRAPPER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-FOREIGN-TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-FUNCTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-GLOBAL-CONFIGURATION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-GROUP.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-INDEX.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-LANGUAGE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-MASKING-POLICY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-MATERIALIZED-VIEW.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-MODEL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-OPERATOR.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-OWNED.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-PACKAGE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-PROCEDURE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-PUBLICATION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-RESOURCE-LABEL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-RESOURCE-POOL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-ROLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-ROW-LEVEL-SECURITY-POLICY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-RULE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-SCHEMA.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-SEQUENCE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-SERVER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-SUBSCRIPTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-SYNONYM.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-TABLESPACE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-TEXT-SEARCH-CONFIGURATION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-TEXT-SEARCH-DICTIONARY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-TRIGGER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-TYPE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-USER-MAPPING.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-USER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-VIEW.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/DROP-WEAK-PASSWORD-DICTIONARY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/EXECUTE-DIRECT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/EXECUTE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/EXPLAIN-PLAN.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/EXPLAIN.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/FETCH.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/GRANT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/INSERT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/LOCK.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/MERGE-INTO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/MOVE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/PREDICT-BY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/PREPARE-TRANSACTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/PREPARE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/PURGE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/REASSIGN-OWNED.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/REFRESH-INCREMENTAL-MATERIALIZED-VIEW.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/REFRESH-MATERIALIZED-VIEW.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/REINDEX.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/RELEASE-SAVEPOINT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/RESET.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/REVOKE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ROLLBACK-PREPARED.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ROLLBACK-TO-SAVEPOINT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/ROLLBACK.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SAVEPOINT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SELECT-INTO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SELECT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SET-CONSTRAINTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SET-ROLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SET-SESSION-AUTHORIZATION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SET-TRANSACTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SET.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SHOW-EVENTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SHOW.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SHRINK.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SHUTDOWN.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/SNAPSHOT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/START-TRANSACTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/TIMECAPSULE-TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/TRUNCATE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/UPDATE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/VACUUM.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/VALUES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/sql-syntax/sql-syntax.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/HLL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/binary-data-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/bit-string-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/boolean-data-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/character-data-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/data-type-used-by-the-ledger-database.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/data-types-supported-by-column-store-tables.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/date-time-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/geometric.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/json-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/monetary.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/network-address.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/numeric-data-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/object-identifier-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/pseudo-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/range.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/set-type.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/supported-data-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/text-search-types.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/uuid-type.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/supported-data-types/xml-type.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/overview-of-system-catalogs-and-system-views.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs-and-system-views.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_ASP.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_AUDITING_POLICY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_AUDITING_POLICY_ACCESS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_AUDITING_POLICY_FILTERS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_AUDITING_POLICY_PRIVILEGES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_CLIENT_GLOBAL_KEYS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_CLIENT_GLOBAL_KEYS_ARGS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_COLUMN_KEYS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_COLUMN_KEYS_ARGS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_DB_PRIVILEGE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_ENCRYPTED_COLUMNS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_ENCRYPTED_PROC.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_GLOBAL_CHAIN.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_GLOBAL_CONFIG.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_MASKING_POLICY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_MASKING_POLICY_ACTIONS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_MASKING_POLICY_FILTERS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_MATVIEW.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_MATVIEW_DEPENDENCY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_MODEL_WAREHOUSE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_OPT_MODEL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_PACKAGE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_POLICY_LABEL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_RECYCLEBIN.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_TXN_SNAPSHOT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_UID.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_EC_OPERATOR_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_INSTANCE_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_OPERATOR_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_PLAN_ENCODING_TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_PLAN_OPERATOR_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_SESSION_QUERY_INFO_ALL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/GS_WLM_USER_RESOURCE_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PGXC_CLASS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PGXC_GROUP.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PGXC_NODE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PGXC_SLICE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AGGREGATE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AM.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AMOP.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AMPROC.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_APP_WORKLOADGROUP_MAPPING.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_ATTRDEF.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_ATTRIBUTE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AUTHID.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AUTH_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_AUTH_MEMBERS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_CAST.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_CLASS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_COLLATION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_CONSTRAINT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_CONVERSION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_DATABASE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_DB_ROLE_SETTING.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_DEFAULT_ACL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_DEPEND.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_DESCRIPTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_DIRECTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_ENUM.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_EVENT_TRIGGER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_EXTENSION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_EXTENSION_DATA_SOURCE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_FOREIGN_DATA_WRAPPER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_FOREIGN_SERVER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_FOREIGN_TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_HASHBUCKET.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_INDEX.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_INHERITS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_JOB.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_JOB_PROC.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_LANGUAGE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_LARGEOBJECT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_LARGEOBJECT_METADATA.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_NAMESPACE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_OBJECT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_OPCLASS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_OPERATOR.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_OPFAMILY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_PARTITION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_PLTEMPLATE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_PROC.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_PUBLICATION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_PUBLICATION_REL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_RANGE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_REPLICATION_ORIGIN.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_RESOURCE_POOL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_REWRITE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_RLSPOLICY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SECLABEL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SET.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SHDEPEND.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SHDESCRIPTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SHSECLABEL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_STATISTIC.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_STATISTIC_EXT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SUBSCRIPTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SUBSCRIPTION_REL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_SYNONYM.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TABLESPACE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TRIGGER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TS_CONFIG.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TS_CONFIG_MAP.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TS_DICT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TS_PARSER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TS_TEMPLATE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_TYPE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_USER_MAPPING.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_USER_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PG_WORKLOAD_GROUP.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/PLAN_TABLE_DATA.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/STATEMENT_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-catalogs/system-catalogs.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GET_GLOBAL_PREPARED_XACTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_ASYNC_SUBMIT_SESSIONS_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_AUDITING.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_AUDITING_ACCESS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_AUDITING_PRIVILEGE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_CLUSTER_RESOURCE_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_COMPRESSION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_DB_PRIVILEGES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_FILE_STAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_GSC_MEMORY_DETAIL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_INSTANCE_TIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_LABELS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_LSC_MEMORY_DETAIL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_MASKING.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_MATVIEWS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_OS_RUN_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_REDO_STAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_CPU_STATISTICS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_CONTEXT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_DETAIL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_MEMORY_STATISTICS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_STAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SESSION_TIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SHARED_MEMORY_DETAIL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_SQL_COUNT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_STAT_SESSION_CU.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_THREAD_MEMORY_CONTEXT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_TOTAL_MEMORY_DETAIL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_CGROUP_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_EC_OPERATOR_STATISTICS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_OPERATOR_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_OPERATOR_STATISTICS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_PLAN_OPERATOR_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_REBUILD_USER_RESOURCE_POOL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_RESOURCE_POOL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_SESSION_HISTORY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_SESSION_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_SESSION_INFO_ALL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_SESSION_STATISTICS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/GS_WLM_USER_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/MPP_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PATCH_INFORMATION_TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PGXC_PREPARED_XACTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_AVAILABLE_EXTENSIONS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_AVAILABLE_EXTENSION_VERSIONS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_COMM_DELAY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_COMM_RECV_STREAM.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_COMM_SEND_STREAM.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_COMM_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_CONTROL_GROUP_CONFIG.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_CURSORS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_EXT_STATS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_GET_INVALID_BACKENDS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_GET_SENDERS_CATCHUP_TIME.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_GROUP.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_GTT_ATTACHED_PIDS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_GTT_RELSTATS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_GTT_STATS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_LOCKS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_NODE_ENV.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_OS_THREADS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_PREPARED_STATEMENTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_PREPARED_XACTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_PUBLICATION_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_REPLICATION_ORIGIN_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_REPLICATION_SLOTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_RLSPOLICIES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_ROLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_RULES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_RUNNING_XACTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_SECLABELS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_SESSION_IOSTAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_SESSION_WLMSTAT.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_SETTINGS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_SHADOW.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_ALL_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_ALL_SEQUENCES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_ALL_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_SYS_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_SYS_SEQUENCES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_SYS_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_USER_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_USER_SEQUENCES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATIO_USER_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STATS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_ACTIVITY.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_ACTIVITY_NG.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_ALL_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_ALL_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_BAD_BLOCK.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_BGWRITER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_DATABASE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_DATABASE_CONFLICTS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_REPLICATION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_SUBSCRIPTION.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_SYS_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_SYS_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_USER_FUNCTIONS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_USER_INDEXES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_USER_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_XACT_ALL_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_XACT_SYS_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_XACT_USER_FUNCTIONS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_XACT_USER_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TABLES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TDE_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_THREAD_WAIT_STATUS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TIMEZONE_ABBREVS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TIMEZONE_NAMES.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TOTAL_MEMORY_DETAIL.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TOTAL_USER_RESOURCE_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_TOTAL_USER_RESOURCE_INFO_OID.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_USER.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_USER_MAPPINGS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_VARIABLE_INFO.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_VIEWS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PG_WLM_STATISTICS.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/PLAN_TABLE.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/system-views/system-views.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/system-catalogs-and-system-views/viewing-system-catalogs.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/FAQ.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/client-tool.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/client-tool-gsql.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/command-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/gsql-faq.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/gsql-introduction.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/gsql-release-notes.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/meta-command-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/obtaining-help-information.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/client-tool/gsql/usage-guidelines.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/functions-of-mogdb-executable-scripts.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_cgroup.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_check.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_checkos.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_checkperf.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_collector.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_dump.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_dumpall.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_encrypt.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_guc.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_om.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_plan_simulator.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_restore.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_sdr.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_ssh.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/gs_watch.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/server-tools/server-tools.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/system-catalogs-and-views-supported-by-gs_collector.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tool-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tool-reference.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/dsscmd.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/dssserver.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_backup.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_basebackup.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_ctl.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_dropnode.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_expansion.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_initdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_install.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_postuninstall.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_preinstall.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_probackup.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_sshexkey.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_tar.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_uninstall.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gs_upgradectl.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/gstrace.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/kadmin-local.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/kdb5_util.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/kdestroy.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/kinit.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/klist.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/krb5kdc.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/mogdb.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pg_archivecleanup.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pg_config.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pg_controldata.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pg_recvlogical.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pg_resetxlog.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pscp.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/pssh.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/tools-used-in-the-internal-system.md delete mode 100644 product/zh/docs-mogdb/v5.2/reference-guide/tool-reference/tools-used-in-the-internal-system/transfer.py.md delete mode 100644 product/zh/docs-mogdb/v5.2/security-guide/security-guide.md delete mode 100644 product/zh/docs-mogdb/v5.2/security-guide/security/1-client-access-authentication.md delete mode 100644 product/zh/docs-mogdb/v5.2/security-guide/security/2-managing-users-and-their-permissions.md delete mode 100644 product/zh/docs-mogdb/v5.2/security-guide/security/3-configuring-database-audit.md delete mode 100644 product/zh/docs-mogdb/v5.2/security-guide/security/4-setting-encrypted-equality-query.md delete mode 100644 product/zh/docs-mogdb/v5.2/security-guide/security/5-setting-a-ledger-database.md delete mode 100644 product/zh/docs-mogdb/v5.2/security-guide/security/6-transparent-data-encryption.md delete mode 100644 product/zh/docs-mogdb/v5.2/security-guide/security/database-security-management.md delete mode 100644 product/zh/docs-mogdb/v5.2/source-code-parsing.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_about.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_ai-features.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_characteristic_description.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_common-faults-and-identification.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_communication-matrix.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_datatypes-and-sql.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_dev.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_error.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_extension-referecne.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_faqs.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_glossary.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_high_available.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_install.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_manage.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_parameters-and-tools.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_performance.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_quickstart.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_secure.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_system-catalogs-and-functions.md delete mode 100644 product/zh/docs-mogdb/v5.2/toc_upgrade.md delete mode 100644 product/zh/docs-mogdb/v5.2/upgrade-guide/1-upgrade-overview.md delete mode 100644 product/zh/docs-mogdb/v5.2/upgrade-guide/2-read-before-upgrade.md delete mode 100644 product/zh/docs-mogdb/v5.2/upgrade-guide/3-in-place-upgrade.md delete mode 100644 product/zh/docs-mogdb/v5.2/upgrade-guide/4-rolling-upgrade.md delete mode 100644 product/zh/docs-mogdb/v5.2/upgrade-guide/upgrade-guide.md diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai-feature.md b/product/en/docs-mogdb/v5.2/AI-features/ai-feature.md deleted file mode 100644 index a59d962b..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai-feature.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: AI Features -summary: AI Features -author: Guo Huan -date: 2021-05-19 ---- - -# AI Features - -The history of artificial intelligence (AI) can be dated back to as early as the 1950s, even longer than the history of the database development. However, the AI technology has not been applied on a large scale for a long time due to various objective factors, and even experienced several obvious troughs. With the further development of information technologies in recent years, factors that restrict the AI development have been gradually weakened, and the AI, big data, and cloud computing (ABC) technologies are born. - -The combination of AI and databases has been a trending research topic in the industry in recent years. MogDB has participated in the exploration of this domain earlier and achieved phased achievements. An AI submodule DBMind is provided for the database. Compared with other functions, it is more independent. This module can be divided into AI4DB and DB4AI. - -- AI4DB uses AI technologies to optimize database execution performance as well as achieve autonomy and O&M free. It includes self-tuning, self-diagnosis, self-security, self-O&M, and self-healing. -- DB4AI streamlines the E2E process from databases to AI applications, drives AI tasks through databases, and unifies the AI technology stack to achieve out-of-the-box, high performance, and cost saving. For example, SQL-like statements are used to implement functions such as recommendation system, image retrieval, and time series prediction. The advantages of high parallelism and column store of databases can be fully utilized to avoid the cost of data and fragmented storage and avoid security risks caused by information leakage. - -The functions described in this section are stored in the **bin/dbmind** directory of the database installation directory (*$GAUSSHOME*). The sub-functions are stored in the **components** subdirectory of **bin/dbmind**. To invoke DBMind, you can run the **gs_dbmind** command. In addition, the built-in AI functions (such as DB4AI) of the database are presented in the form of SQL syntax and system functions. - -- **[AI4DB Autonomous Database O&M](./ai4db/ai4db-autonomous-database-o&m.md)** -- **[DB4AI Database-driven AI](./db4ai/db4ai.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/abo-optimizer.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/abo-optimizer.md deleted file mode 100644 index 2758ee7e..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/abo-optimizer.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ABO Optimizer -summary: ABO Optimizer -author: zhang cuiping -date: 2023-04-07 ---- - -# ABO Optimizer - -- **[intelligent-cardinality-estimation](./intelligent-cardinality-estimation/ai4db-intelligent-cardinality-estimation.md)** - -- **[adaptive-plan-selection](./adaptive-plan-selection/ai4db-adaptive-plan-selection.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-best-practices.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-best-practices.md deleted file mode 100644 index 6c761b83..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-best-practices.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Best Practice -summary: Best Practice -author: Guo Huan -date: 2022-10-24 ---- - -# Best Practice - -**Adaptive selection of multiple indexes is supported. The following is an example:** - -```sql -create table t1(c1 int, c2 int, c3 int, c4 varchar(32), c5 text); -create index t1_idx2 on t1(c1,c2,c3,c4); -create index t1_idx1 on t1(c1,c2,c3); - -insert into t1( c1, c2, c3, c4, c5) SELECT (random()*(2*10^9))::integer , (random()*(2*10^9))::integer, (random()*(2*10^9))::integer, (random()*(2*10^9))::integer, repeat('abc', i%10) ::text from generate_series(1,1000000) i; -insert into t1( c1, c2, c3, c4, c5) SELECT (random()*1)::integer, (random()*1)::integer, (random()*1)::integer, (random()*(2*10^9))::integer, repeat('abc', i%10) ::text from generate_series(1,1000000) i; -``` - -**Performance comparison:** - -Random parameters: c1~ random(1, 20); c2~ random(1, 20); c3~ random(1, 20); c4 ~ random(2, 10000) - -The number of threads is 50, the number of clients is 50, and the execution duration is 60s. - -| **Method** | **Statement** | **tps** | -| --------------- | ------------------------------------------------------------ | ------- | -| gplan | `prepare k as select * from t1 where c1=$1 and c2=$2 and c3=$3 and c4=$4;` | 35126 | -| cplan | `prepare k as select /*+ use_cplan */ * from t1 where c1=$1 and c2=$2 and c3=$3 and c4=$4;` | 75817 | -| gplan selection | `prepare k as select /*+ choose_adaptive_gplan */ * from t1 where c1=$1 and c2=$2 and c3=$3 and c4=$4;` | 175681 | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-overview.md deleted file mode 100644 index ff8885dd..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-10-24 ---- - -# Overview - -Adaptive plan selection applies to scenarios where a general cache plan is used for plan execution. Cache plan exploration is performed by using range linear expansion, and plan selection is performed by using range coverage matching. Adaptive plan selection makes up for the performance problem caused by the traditional single cache plan that cannot change according to the query condition parameter, and avoids frequent calling of query optimization. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-prerequisites.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-prerequisites.md deleted file mode 100644 index 490df64e..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-prerequisites.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Prerequisites -summary: Prerequisites -author: Guo Huan -date: 2022-10-24 ---- - -# Prerequisites - -The database is running properly. The GUC parameter **enable_cachedplan_mgr** is set to **on**, indicating that the adaptive plan selection function is enabled. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-troubleshooting.md deleted file mode 100644 index dc29f353..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-troubleshooting.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-10-24 ---- - -# Troubleshooting - -For complex slow queries, this feature may not be able to correctly select a plan due to feature range restrictions. You are advised to use CPLAN to generate a query plan. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-usage-guide.md deleted file mode 100644 index fc5fbce0..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/adaptive-plan-selection-usage-guide.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-10-24 ---- - -# Usage Guide - -On the live network, use hints to enable the plan adaptation management capability for queries with cache plan problems. - -```sql -select /*+ choose_adaptive_gplan */ * from tab where c1 = xxx; -``` - -By default, the JDBC client converts the preceding SQL statements with hints to the PBE model and creates a query template. In addition to directly modifying SQL statements, hints can be added through SQL patches. - -In the gsql environment, you can manually create a query template. - -```sql -prepare test_stmt as select /*+ choose_adaptive_gplan */ * from tab where c1 = $1; -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/ai4db-adaptive-plan-selection.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/ai4db-adaptive-plan-selection.md deleted file mode 100644 index e472d250..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/adaptive-plan-selection/ai4db-adaptive-plan-selection.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: adaptive-plan-selection -summary: adaptive-plan-selection -author: zhang cuiping -date: 2023-04-07 ---- - -# Adaptive Plan Selection - -- **[Overview](adaptive-plan-selection-overview.md)** - -- **[Prerequisites](adaptive-plan-selection-prerequisites.md)** - -- **[Usage Guide](adaptive-plan-selection-usage-guide.md)** - -- **[Best Practice](adaptive-plan-selection-best-practices.md)** - -- **[Troubleshooting](adaptive-plan-selection-troubleshooting.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/ai4db-intelligent-cardinality-estimation.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/ai4db-intelligent-cardinality-estimation.md deleted file mode 100644 index ab43288d..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/ai4db-intelligent-cardinality-estimation.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: intelligent-cardinality-estimation -summary: intelligent-cardinality-estimation -author: zhang cuiping -date: 2023-04-07 ---- - -# Intelligent Cardinality Estimation - -- **[Overview](intelligent-cardinality-estimation-overview.md)** - -- **[Prerequisites](intelligent-cardinality-estimation-prerequisites.md)** - -- **[Usage Guide](intelligent-cardinality-estimation-usage-guide.md)** - -- **[Best Practice](intelligent-cardinality-estimation-best-practices.md)** - -- **[Troubleshooting](intelligent-cardinality-estimation-troubleshooting.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-best-practices.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-best-practices.md deleted file mode 100644 index d4d0f9b4..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-best-practices.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Best Practice -summary: Best Practice -author: Guo Huan -date: 2022-10-24 ---- - -# Best Practice - -The following data table is generated: - -```sql -benchmark=# \d part; -id | integer | -p_brand | character varying(256) | -p_type | character varying(256) | -p_container | character varying(256) | -p_mfgr | character varying(256) | -``` - -Insert 10 million lines of data. - -```sql -benchmark=# select count(1) from part1; -10000000 -``` - -Create four different multi-column indexes on the data table: - -```sql -benchmark=# select * from pg_indexes where tablename='part1'; -public | part1 | brand_type_container | | CREATE INDEX brand_type_container ON part1 USING btree (p_brand, p_type, p_container) TABLESPACE pg_default -public | part1 | brand_type_mfgr | | CREATE INDEX brand_type_mfgr ON part1 USING btree (p_brand, p_type, p_mfgr) TABLESPACE pg_default -public | part1 | brand_container_mfgr | | CREATE INDEX brand_container_mfgr ON part1 USING btree (p_brand, p_container, p_mfgr) TABLESPACE pg_default -public | part1 | type_container_mfgr | | CREATE INDEX type_container_mfgr ON part1 USING btree (p_type, p_container, p_mfgr) TABLESPACE pg_default -``` - -Generate a batch of queries that contain multiple columns of equality conditions for the data table as follows: - -```sql -explain analyze select * from part1 where p_container='LG CASE' AND p_brand='Brand#34' AND p_mfgr='Manufacturer#2' AND p_type='SMALL BRUSHED COPPER'; -``` - -Test the execution plan in the scenarios where multi-column statistics are not created and ABO statistics are created. - -```sql -benchmark=# explain analyze select * from part1 where p_container='LG CASE' AND p_brand='Brand#34' AND p_mfgr='Manufacturer#2' AND p_type='SMALL BRUSHED COPPER'; -Bitmap Heap Scan on part1 (cost=5.30..336.06 rows=17 width=56) (actual time=0.953..7.061 rows=103 loops=1) - Recheck Cond: (((p_brand)::text = 'Brand#34'::text) AND ((p_type)::text = 'SMALL BRUSHED COPPER'::text) AND ((p_container)::text = 'LG CASE'::text)) - Filter: ((p_mfgr)::text = 'Manufacturer#2'::text) - Rows Removed by Filter: 773 - Heap Blocks: exact=871 - -> Bitmap Index Scan on brand_type_container (cost=0.00..5.30 rows=84 width=0) (actual time=0.704..0.704 rows=876 loops=1) - Index Cond: (((p_brand)::text = 'Brand#34'::text) AND ((p_type)::text = 'SMALL BRUSHED COPPER'::text) AND ((p_container)::text = 'LG CASE'::text)) -Total runtime: 7.213 ms -benchmark=# explain analyze select * from part1 where p_container='LG CASE' AND p_brand='Brand#34' AND p_mfgr='Manufacturer#2' AND p_type='SMALL BRUSHED COPPER'; -Bitmap Heap Scan on part1 (cost=10.59..723.97 rows=210 width=56) (actual time=0.112..0.434 rows=103 loops=1) - Recheck Cond: (((p_type)::text = 'SMALL BRUSHED COPPER'::text) AND ((p_container)::text = 'LG CASE'::text) AND ((p_mfgr)::text = 'Manufacturer#2'::text)) - Filter: ((p_brand)::text = 'Brand#34'::text) - Rows Removed by Filter: 64 - Heap Blocks: exact=167 - -> Bitmap Index Scan on type_container_mfgr (cost=0.00..10.54 rows=183 width=0) (actual time=0.081..0.081 rows=167 loops=1) - Index Cond: (((p_type)::text = 'SMALL BRUSHED COPPER'::text) AND ((p_container)::text = 'LG CASE'::text) AND ((p_mfgr)::text = 'Manufacturer#2'::text)) -Total runtime: 0.533 ms -``` - -According to the preceding operations, in this scenario, the ABO cardinality estimation accelerates the query by more than 10 times. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-overview.md deleted file mode 100644 index 08af26d6..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-10-24 ---- - -# Overview - -Intelligent cardinality estimation uses the Bayesian network model in the database to model the association and distribution of multi-column data samples, so as to provide more accurate cardinality estimation for multi-column equality queries. More accurate cardinality estimation can significantly improve the accuracy of the optimizer's selection of plans and operators, thereby improving the overall throughput of the database. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-prerequisites.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-prerequisites.md deleted file mode 100644 index 74af3085..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-prerequisites.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Prerequisites -summary: Prerequisites -author: Guo Huan -date: 2022-10-24 ---- - -# Prerequisites - -The database is running properly. The GUC parameter **enable_ai_stats** is set to **on**, and **multi_stats_type** is set to **'BAYESNET'** or **'ALL'**. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-troubleshooting.md deleted file mode 100644 index 4c6c7dd7..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-troubleshooting.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-10-24 ---- - -# Troubleshooting - -If a model cannot be created due to an exception, the ABO optimizer creates only traditional statistics. Rectify the fault based on the alarm information. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-usage-guide.md deleted file mode 100644 index 314585b3..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/intelligent-cardinality-estimation-usage-guide.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-10-24 ---- - -# Usage Guide - -1. Set the GUC parameter default_statistics_target to an integer ranging from [-100, -1], which indicates the sampling rate. -2. Use ANALYZE(([column_name,])) to collect data statistics and create models. -3. Enter a query. If a statistical model is created on the equality query column involved in the query, the statistical model is automatically used to estimate the selection rate. -4. When the intelligent statistics model is no longer needed, you can use ALTER TABLE [table_name] DELETE STATISTICS (([column_name,])) to collect statistics and delete the model. - -For details about other methods, see sections [ALTER TABLE](../../../../reference-guide/sql-syntax/ALTER-TABLE.md) and [ANALYZE | ANALYSE](../../../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md)。 diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-1-x-tuner-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-1-x-tuner-overview.md deleted file mode 100644 index 17d57b3f..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-1-x-tuner-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2021-05-19 ---- - -# Overview - -X-Tuner is a parameter tuning tool integrated into databases. It uses AI technologies such as deep reinforcement learning and global search algorithm to obtain the optimal database parameter settings without manual intervention. This function is not necessarily deployed with the database environment. It can be independently deployed and run without the database installation environment. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-2-preparations.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-2-preparations.md deleted file mode 100644 index dfd50971..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-2-preparations.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Preparations -summary: Preparations -author: Guo Huan -date: 2021-10-21 ---- - -# Preparations - -
- -## Prerequisites and Precautions - -- The database status is normal; the client can be properly connected; and data can be imported to the database. As a result, the optimization program can perform the benchmark test for optimization effect. -- To use this tool, you need to specify the user who logs in to the database. The user who logs in to the database must have sufficient permissions to obtain sufficient database status information. -- If you log in to the database host as a Linux user, add **$GAUSSHOME/bin** to the **PATH** environment variable so that you can directly run database O&M tools, such as gsql, gs_guc, and gs_ctl. -- The recommended Python version is Python 3.6 or later. The required dependency has been installed in the operating environment, and the optimization program can be started properly. You can install a Python 3.6+ environment independently without setting it as a global environment variable. You are not advised to install the tool as the root user. If you install the tool as the root user and run the tool as another user, ensure that you have the read permission on the configuration file. -- This tool can run in three modes. In **tune** and **train** modes, you need to configure the benchmark running environment and import data. This tool will iteratively run the benchmark to check whether the performance is improved after the parameters are modified. -- In **recommend** mode, you are advised to run the command when the database is executing the workload to obtain more accurate real-time workload information. -- By default, this tool provides benchmark running script samples of TPC-C, TPC-H, TPC-DS, and sysbench. If you use the benchmarks to perform pressure tests on the database system, you can modify or configure the preceding configuration files. To adapt to your own service scenarios, you need to compile the script file that drives your customized benchmark based on the **template.py** file in the **benchmark** directory. - -
- -## Principles - -The tuning program is a tool independent of the database kernel. The usernames and passwords for the database and instances are required to control the benchmark performance test of the database. Before starting the tuning program, ensure that the interaction in the test environment is normal, the benchmark test script can be run properly, and the database can be connected properly. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If the parameters to be tuned include the parameters that take effect only after the database is restarted, the database will be restarted multiple times during the tuning. Exercise caution when using **train** and **tune** modes if the database is running jobs. - -X-Tuner can run in any of the following modes: - -- **recommend**: Log in to the database using the specified user name, obtain the feature information about the running workload, and generate a parameter recommendation report based on the feature information. Report improper parameter settings and potential risks in the current database. Output the currently running workload behavior and characteristics. Output the recommended parameter settings. In this mode, the database does not need to be restarted. In other modes, the database may need to be restarted repeatedly. -- **train**: Modify parameters and execute the benchmark based on the benchmark information provided by users. The reinforcement learning model is trained through repeated iteration so that you can load the model in **tune** mode for optimization. -- **tune**: Use an optimization algorithm to tune database parameters. Currently, two types of algorithms are supported: deep reinforcement learning and global search algorithm (global optimization algorithm). The deep reinforcement learning mode requires **train** mode to generate the optimized model after training. However, the global search algorithm does not need to be trained in advance and can be directly used for search and optimization. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** If the deep reinforcement learning algorithm is used in **tune** mode, a trained model must be available, and the parameters for training the model must be the same as those in the parameter list (including max and min) for tuning. - -**Figure 1** X-Tuner structure - -![x-tuner-structure](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/preparations-1.png) - -Figure 1 X-Tuner architecture shows the overall architecture of the X-Tuner. The X-Tuner system can be divided into the following parts: - -- DB: The DB_Agent module is used to abstract database instances. It can be used to obtain the internal database status information and current database parameters and set database parameters. The SSH connection used for logging in to the database environment is included on the database side. -- Algorithm: algorithm package used for optimization, including global search algorithms (such as Bayesian optimization and particle swarm optimization) and deep reinforcement learning (such as DDPG). -- X-Tuner main logic module: encapsulated by the environment module. Each step is an optimization process. The entire optimization process is iterated through multiple steps. -- benchmark: a user-specified benchmark performance test script, which is used to run benchmark jobs. The benchmark result reflects the performance of the database system. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Ensure that the larger the benchmark script score is, the better the performance is. For example, for the benchmark used to measure the overall execution duration of SQL statements, such as TPC-H, the inverse value of the overall execution duration can be used as the benchmark score. - -
- -## Installing and Running X-Tuner - -You can run the X-Tuner in two ways. One is to run the X-Tuner directly through the source code. The other is to install the X-Tuner on the system through the Python setuptools, and then run the **gs_xtuner** command to call the X-Tuner. The following describes two methods of running the X-Tuner. - -Method 1: Run the source code directly. - -1. Switch to the **xtuner** source code directory. For the openGauss community code, the path is **openGauss-server/src/gausskernel/dbmind/tools/xtuner**. For an installed database system, the source code path is *$GAUSSHOME***/bin/dbmind/xtuner**. - -2. You can view the **requirements.txt** file in the current directory. Use the pip package management tool to install the dependency based on the **requirements.txt** file. - - ```bash - pip install -r requirements.txt - ``` - -3. After the installation is successful, add the environment variable PYTHONPATH, and then run **main.py**. For example, to obtain the help information, run the following command: - - ```bash - cd tuner # Switch to the directory where the main.py entry file is located. - export PYTHONPATH='..' # Add the upper-level directory to the path for searching for packages. - python main.py --help # Obtain help information. The methods of using other functions are similar. - ``` - -Method 2: Install the X-Tuner in the system. - -1. You can use the **setup.py** file to install the X-Tuner to the system and then run the **gs_xtuner** command. You need to switch to the root directory of **xtuner**. For details about the directory location, see the preceding description. - -2. Run the following command to install the tool in the Python environment using Python setuptools: - - ```bash - python setup.py install - ``` - - If the **bin** directory of Python is added to the *PATH* environment variable, the **gs_xtuner** command can be directly called anywhere. - -3. For example, to obtain the help information, run the following command: - - ```bash - gs_xtuner --help - ``` - -
- -## Description of the X-Tuner Configuration File - -Before running the X-Tuner, you need to load the configuration file. The default path of the configuration file is tuner/xtuner.conf. You can run the **gs_xtuner -help** command to view the absolute path of the configuration file that is loaded by default. - -``` -... - -x TUNER_CONFIG_FILE, --tuner-config-file TUNER_CONFIG_FILE - This is the path of the core configuration file of the - X-Tuner. You can specify the path of the new - configuration file. The default path is /path/to/xtuner/xtuner.conf. - You can modify the configuration file to control the - tuning process. -... -``` - -You can modify the configuration items in the configuration file as required to instruct the X-Tuner to perform different actions. For details about the configuration items in the configuration file, see Table 2 in [Command Reference](1-5-command-reference.md). If you need to change the loading path of the configuration file, you can specify the path through the **-x** command line option. - -
- -## Benchmark Selection and Configuration - -The benchmark drive script is stored in the benchmark subdirectory of the X-Tuner. X-Tuner provides common benchmark driver scripts, such as TPC-C and TPC-H. The X-Tuner invokes the **get_benchmark_instance()** command in the benchmark/__init__.py file to load different benchmark driver scripts and obtain benchmark driver instances. The format of the benchmark driver script is described as follows: - -- Name of the driver script: name of the benchmark. The name is used to uniquely identify the driver script. You can specify the benchmark driver script to be loaded by setting the **benchmark_script** configuration item in the configuration file of the X-Tuner. -- The driver script contains the *path* variable, *cmd* variable, and the **run** function. - -The following describes the three elements of the driver script: - -1. *path*: path for saving the benchmark script. You can modify the path in the driver script or specify the path by setting the **benchmark_path** configuration item in the configuration file. - -2. *cmd*: command for executing the benchmark script. You can modify the command in the driver script or specify the command by setting the **benchmark_cmd** configuration item in the configuration file. Placeholders can be used in the text of cmd to obtain necessary information for running cmd commands. For details, see the TPC-H driver script example. These placeholders include: - - - {host}: IP address of the database host machine - - {port}: listening port number of the database instance - - {user}: user name for logging in to the database - - {password}: password of the user who logs in to the database system - - {db}: name of the database that is being optimized - -3. **run** function: The signature of this function is as follows: - - ``` - def run(remote_server, local_host) -> float: - ``` - - The returned data type is float, indicating the evaluation score after the benchmark is executed. A larger value indicates better performance. For example, the TPC-C test result tpmC can be used as the returned value, the inverse number of the total execution time of all SQL statements in TPC-H can also be used as the return value. A larger return value indicates better performance. - - The *remote_server* variable is the shell command interface transferred by the X-Tuner program to the remote host (database host machine) used by the script. The *local_host* variable is the shell command interface of the local host (host where the X-Tuner script is executed) transferred by the X-Tuner program. Methods provided by the preceding shell command interface include: - - ``` - exec_command_sync(command, timeout) - Function: This method is used to run the shell command on the host. - Parameter list: - command: The data type can be str, and the element can be a list or tuple of the str type. This parameter is optional. - timeout: The timeout interval for command execution in seconds. This parameter is optional. - Return value: - Returns 2-tuple (stdout and stderr). stdout indicates the standard output stream result, and stderr indicates the standard error stream result. The data type is str. - ``` - - ``` - exit_status - Function: This attribute indicates the exit status code after the latest shell command is executed. - Note: Generally, if the exit status code is 0, the execution is normal. If the exit status code is not 0, an error occurs. - ``` - -Benchmark driver script example: - -1. TPC-C driver script - - ```bash - from tuner.exceptions import ExecutionError - - # WARN: You need to download the benchmark-sql test tool to the system, - # replace the PostgreSQL JDBC driver with the openGauss driver, - # and configure the benchmark-sql configuration file. - # The program starts the test by running the following command: - path = '/path/to/benchmarksql/run' # Path for storing the TPC-C test script benchmark-sql - cmd = "./runBenchmark.sh props.gs" # Customize a benchmark-sql test configuration file named props.gs. - - def run(remote_server, local_host): - # Switch to the TPC-C script directory, clear historical error logs, and run the test command. - # You are advised to wait for several seconds because the benchmark-sql test script generates the final test report through a shell script. The entire process may be delayed. - # To ensure that the final tpmC value report can be obtained, wait for 3 seconds. - stdout, stderr = remote_server.exec_command_sync(['cd %s' % path, 'rm -rf benchmarksql-error.log', cmd, 'sleep 3']) - # If there is data in the standard error stream, an exception is reported and the system exits abnormally. - if len(stderr) > 0: - raise ExecutionError(stderr) - - # Find the final tpmC result. - tpmC = None - split_string = stdout.split() # Split the standard output stream result. - for i, st in enumerate(split_string): - # In the benchmark-sql of version 5.0, the value of tpmC is the last two digits of the keyword (NewOrders). In normal cases, the value of tpmC is returned after the keyword is found. - if "(NewOrders)" in st: - tpmC = split_string[i + 2] - break - stdout, stderr = remote_server.exec_command_sync( - "cat %s/benchmarksql-error.log" % path) - nb_err = stdout.count("ERROR:") # Check whether errors occur during the benchmark running and record the number of errors. - return float(tpmC) - 10 * nb_err # The number of errors is used as a penalty item, and the penalty coefficient is 10. A higher penalty coefficient indicates a larger number of errors. - - ``` - -2. TPC-H driver script - - ```bash - import time - - from tuner.exceptions import ExecutionError - - # WARN: You need to import data into the database and SQL statements in the following path will be executed. - # The program automatically collects the total execution duration of these SQL statements. - path = '/path/to/tpch/queries' # Directory for storing SQL scripts used for the TPC-H test - cmd = "gsql -U {user} -W {password} -d {db} -p {port} -f {file}" # The command for running the TPC-H test script. Generally, gsql -f script file is used. - - def run(remote_server, local_host): - # Traverse all test case file names in the current directory. - find_file_cmd = "find . -type f -name '*.sql'" - stdout, stderr = remote_server.exec_command_sync(['cd %s' % path, find_file_cmd]) - if len(stderr) > 0: - raise ExecutionError(stderr) - files = stdout.strip().split('\n') - time_start = time.time() - for file in files: - # Replace {file} with the file variable and run the command. - perform_cmd = cmd.format(file=file) - stdout, stderr = remote_server.exec_command_sync(['cd %s' % path, perform_cmd]) - if len(stderr) > 0: - print(stderr) - # The cost is the total execution duration of all test cases. - cost = time.time() - time_start - # Use the inverse number to adapt to the definition of the run function. The larger the returned result is, the better the performance is. - return - cost - ``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-3-examples.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-3-examples.md deleted file mode 100644 index eb207db5..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-3-examples.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: Examples -summary: Examples -author: Guo Huan -date: 2021-05-19 ---- - -# Examples - -X-Tuner supports three modes: recommend mode for obtaining parameter diagnosis reports, train mode for training reinforcement learning models, and tune mode for using optimization algorithms. The preceding three modes are distinguished by command line parameters, and the details are specified in the configuration file. - -## Configuring the Database Connection Information - -Configuration items for connecting to a database in the three modes are the same. You can enter the detailed connection information in the command line or in the JSON configuration file. Both methods are described as follows: - -1. Entering the connection information in the command line - - Input the following options: **-db-name -db-user -port -host -host-user**. The **-host-ssh-port** is optional. The following is an example: - - ``` - gs_xtuner recommend --db-name postgres --db-user omm --port 5678 --host 192.168.1.100 --host-user omm - ``` - -2. Entering the connection information in the JSON configuration file - - Assume that the file name is **connection.json**. The following is an example of the JSON configuration file: - - ``` - { - "db_name": "postgres", # Database name - "db_user": "dba", # Username for logging in to the database - "host": "127.0.0.1", # IP address of the database host - "host_user": "dba", # Username for logging in to the database host - "port": 5432, # Listening port number of the database - "ssh_port": 22 # SSH listening port number of the database host - } - ``` - - Input **-f connection.json**. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** To prevent password leakage, the configuration file and command line parameters do not contain password information by default. After you enter the preceding connection information, the program prompts you to enter the database password and the OS login password in interactive mode. - -## Example of Using recommend Mode - -The configuration item **scenario** takes effect for recommend mode. If the value is **auto**, the workload type is automatically detected. - -Run the following command to obtain the diagnosis result: - -``` - -gs_xtuner recommend -f connection.json - -``` - -The diagnosis report is generated as follows: - -**Figure 1** Report generated in recommend mode - -![report-generated-in-recommend-mode](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/examples-1.png) - -In the preceding report, the database parameter configuration in the environment is recommended, and a risk warning is provided. The report also generates the current workload features. The following features are for reference: - -- **temp_file_size**: number of generated temporary files. If the value is greater than 0, the system uses temporary files. If too many temporary files are used, the performance is poor. If possible, increase the value of **work_mem**. -- **cache_hit_rate**: cache hit ratio of **shared_buffer**, indicating the cache efficiency of the current workload. -- **read_write_ratio**: read/write ratio of database jobs. -- **search_modify_ratio**: ratio of data query to data modification of a database job. -- **ap_index**: AP index of the current workload. The value ranges from 0 to 10. A larger value indicates a higher preference for data analysis and retrieval. -- **workload_type**: workload type, which can be AP, TP, or HTAP based on database statistics. -- **checkpoint_avg_sync_time**: average duration for refreshing data to the disk each time when the database is at the checkpoint, in milliseconds. -- **load_average**: average load of each CPU core in 1 minute, 5 minutes, and 15 minutes. Generally, if the value is about 1, the current hardware matches the workload. If the value is about 3, the current workload is heavy. If the value is greater than 5, the current workload is too heavy. In this case, you are advised to reduce the load or upgrade the hardware. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Some system catalogs keep recording statistics, which may affect load feature identification. Therefore, you are advised to clear the statistics of some system catalogs, run the workload for a period of time, and then use recommend mode for diagnosis to obtain more accurate results. To clear the statistics, run the following command: -> -> ```sql -> select pg_stat_reset_shared('bgwriter'); -> select pg_stat_reset(); -> ``` -> ->- In recommend mode, information in the **pg\_stat\_database** and **pg\_stat\_bgwriter** system catalogs in the database is read. Therefore, the database login user must have sufficient permissions. (You are advised to own the administrator permission which can be granted to *username* by running **alter user username sysadmin**.) - -## Example of Using train Mode - -This mode is used to train the deep reinforcement learning model. The configuration items related to this mode are as follows: - -- **rl_algorithm**: algorithm used to train the reinforcement learning model. Currently, this parameter can be set to **ddpg**. - -- **rl_model_path**: path for storing the reinforcement learning model generated after training. - -- **rl_steps**: maximum number of training steps in the training process. - -- **max_episode_steps**: maximum number of steps in each episode. - -- **scenario**: specifies the workload type. If the value is **auto**, the system automatically determines the workload type. The recommended parameter tuning list varies according to the mode. - -- **tuning_list**: specifies the parameters to be tuned. If this parameter is not specified, the list of parameters to be tuned is automatically recommended based on the workload type. If this parameter is specified, **tuning_list**indicates the path of the tuning list file. The following is an example of the content of a tuning list configuration file. - - ``` - { - "work_mem": { - "default": 65536, - "min": 65536, - "max": 655360, - "type": "int", - "restart": false - }, - "shared_buffers": { - "default": 32000, - "min": 16000, - "max": 64000, - "type": "int", - "restart": true - }, - "random_page_cost": { - "default": 4.0, - "min": 1.0, - "max": 4.0, - "type": "float", - "restart": false - }, - "enable_nestloop": { - "default": true, - "type": "bool", - "restart": false - } - } - ``` - -After the preceding configuration items are configured, run the following command to start the training: - -``` - -gs_xtuner train -f connection.json - -``` - -After the training is complete, a model file is generated in the directory specified by the **rl_model_path**configuration item. - -## Example of Using tune Mode - -The tune mode supports a plurality of algorithms, including a DDPG algorithm based on reinforcement learning (RL), and a Bayesian optimization algorithm and a particle swarm algorithm (PSO) which are both based on a global optimization algorithm (GOP). - -The configuration items related to tune mode are as follows: - -- **tune_strategy**: specifies the algorithm to be used for optimization. The value can be **rl**(using the reinforcement learning model), **gop**(using the global optimization algorithm), or **auto**(automatic selection). If this parameter is set to **rl**, RL-related configuration items take effect. In addition to the preceding configuration items that take effect in train mode, the **test_episode**configuration item also takes effect. This configuration item indicates the maximum number of episodes in the tuning process. This parameter directly affects the execution time of the tuning process. Generally, a larger value indicates longer time consumption. -- **gop_algorithm**: specifies a global optimization algorithm. The value can be **bayes** or **pso**. -- **max_iterations**: specifies the maximum number of iterations. A larger value indicates a longer search time and better search effect. -- **particle_nums**: specifies the number of particles. This parameter is valid only for the PSO algorithm. -- For details about **scenario** and **tuning_list**, see the description of train mode. - -After the preceding items are configured, run the following command to start tuning: - -``` - -gs_xtuner tune -f connection.json - -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** Before using tune and train modes, you need to import the data required by the benchmark, check whether the benchmark can run properly, and back up the current database parameters. To query the current database parameters, run the following command: select name, setting from pg_settings; diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-4-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-4-obtaining-help-information.md deleted file mode 100644 index 12f32ef2..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-4-obtaining-help-information.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2021-05-19 ---- - -# Obtaining Help Information - -Before starting the tuning program, run the following command to obtain help information: - -```bash -python main.py --help -``` - -The command output is as follows: - -```bash -usage: main.py [-h] [-m {train,tune}] [-f CONFIG_FILE] [--db-name DB_NAME] -[--db-user DB_USER] [--port PORT] [--host HOST] -[--host-user HOST_USER] [--host-ssh-port HOST_SSH_PORT] -[--scenario {ap,tp,htap}] [--benchmark BENCHMARK] -[--model-path MODEL_PATH] [-v] - -X-Tuner: a self-tuning toolkit for MogD. - -optional arguments: --h, --help show this help message and exit --m {train,tune}, --mode {train,tune} -train a reinforcement learning model or tune by your -trained model. --f CONFIG_FILE, --config-file CONFIG_FILE -you can pass a config file path or you should manually -set database information. ---db-name DB_NAME database name. ---db-user DB_USER database user name. ---port PORT database connection port. ---host HOST where did your database install on? ---host-user HOST_USER -user name of the host where your database installed -on. ---host-ssh-port HOST_SSH_PORT -host ssh port. ---scenario {ap,tp,htap} ---benchmark BENCHMARK ---model-path MODEL_PATH -the place where you want to save model weights to or -load model weights from. --v, --version -show version. -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-5-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-5-command-reference.md deleted file mode 100644 index b91dcea4..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-5-command-reference.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2021-05-19 ---- - -# Command Reference - -**Table 1** Command-line Parameter - -| Parameter | Description | Value Range | -| :--------------------- | :----------------------------------------------------------- | :--------------------- | -| mode | Specifies the running mode of the tuning program. | train, tune, recommend | -| -tuner-config-file, -x | Path of the core parameter configuration file of X-Tuner. The default path is **xtuner.conf** under the installation directory. | - | -| -db-config-file, -f | Path of the connection information configuration file used by the optimization program to log in to the database host. If the database connection information is configured in this file, the following database connection information can be omitted. | - | -| -db-name | Specifies the name of a database to be tuned. | - | -| -db-user | Specifies the user account used to log in to the tuned database. | - | -| -port | Specifies the database listening port. | - | -| -host | Specifies the host IP address of the database instance. | - | -| -host-user | Specifies the username for logging in to the host where the database instance is located. The database O&M tools, such as **gsql** and **gs_ctl**, can be found in the environment variables of the username. | - | -| -host-ssh-port | Specifies the SSH port number of the host where the database instance is located. This parameter is optional. The default value is **22**. | - | -| -help, -h | Returns the help information. | - | -| -version, -v | Returns the current tool version. | - | - -**Table 2** Parameters in the configuration file - -| Parameter | Description | Value Range | -| :-------------------- | :----------------- | :------------------- | -| logfile | Path for storing generated logs. | - | -| output_tuning_result | (Optional) Specifies the path for saving the tuning result. | - | -| verbose | Whether to print details. | on, off | -| recorder_file | Path for storing logs that record intermediate tuning information. | - | -| tune_strategy | Specifies a strategy used in tune mode. | rl, gop, auto | -| drop_cache | Whether to perform drop cache in each iteration. Drop cache can make the benchmark score more stable. If this parameter is enabled, add the login system user to the **/etc/sudoers** list and grant the NOPASSWD permission to the user. (You are advised to enable the NOPASSWD permission temporarily and disable it after the tuning is complete.) | on, off | -| used_mem_penalty_term | Penalty coefficient of the total memory used by the database. This parameter is used to prevent performance deterioration caused by unlimited memory usage. The greater the value is, the greater the penalty is. | Recommended value: 0 ~ 1 | -| rl_algorithm | Specifies the RL algorithm. | ddpg | -| rl_model_path | Path for saving or reading the RL model, including the save directory name and file name prefix. In train mode, this path is used to save the model. In tune mode, this path is used to read the model file. | - | -| rl_steps | Number of training steps of the deep reinforcement learning algorithm | - | -| max_episode_steps | Maximum number of training steps in each episode | - | -| test_episode | Number of episodes when the RL algorithm is used for optimization | - | -| gop_algorithm | Specifies a global optimization algorithm. | bayes, pso, auto | -| max_iterations | Maximum number of iterations of the global search algorithm. (The value is not fixed. Multiple iterations may be performed based on the actual requirements.) | - | -| particle_nums | Number of particles when the PSO algorithm is used | - | -| benchmark_script | Benchmark driver script. This parameter specifies the file with the same name in the benchmark path to be loaded. Typical benchmarks, such as TPC-C and TPC-H, are supported by default. | tpcc, tpch, tpcds, sysbench … | -| benchmark_path | Path for saving the benchmark script. If this parameter is not configured, the configuration in the benchmark drive script is used. | - | -| benchmark_cmd | Command for starting the benchmark script. If this parameter is not configured, the configuration in the benchmark drive script is used. | - | -| benchmark_period | This parameter is valid only for **period benchmark**. It indicates the test period of the entire benchmark. The unit is second. | - | -| scenario | Type of the workload specified by the user. | tp, ap, htap | -| tuning_list | List of parameters to be tuned. For details, see the **share/knobs.json.template** file. | - | diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-6-Troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-6-Troubleshooting.md deleted file mode 100644 index df96e0c7..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/1-x-tuner-parameter-optimization-and-diagnosis/1-6-Troubleshooting.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2021-05-19 ---- - -# Troubleshooting - -- Failure of connection to the database instance: Check whether the database instance is faulty or the security permissions of configuration items in the **pg_hba.conf** file are incorrectly configured. -- Restart failure: Check the health status of the database instance and ensure that the database instance is running properly. -- Dependency installation failure: Upgrade the pip package management tool by running the **python -m pip install -upgrade pip** command. -- Poor performance of TPC-C jobs: In high-concurrency scenarios such as TPC-C, a large amount of data is modified during pressure tests. Each test is not idempotent, for example, the data volume in the TPC-C database increases, invalid tuples are not cleared using VACUUM FULL, checkpoints are not triggered in the database, and drop cache is not performed. Therefore, it is recommended that the benchmark data that is written with a large amount of data, such as TPC-C, be imported again at intervals (depending on the number of concurrent tasks and execution duration). A simple method is to back up the $PGDATA directory. -- When the TPC-C job is running, the TPC-C driver script reports the error "TypeError: float() argument must be a string or a number, not 'NoneType'" (**none** cannot be converted to the float type). This is because the TPC-C pressure test result is not obtained. There are many causes for this problem, manually check whether TPC-C can be successfully executed and whether the returned result can be obtained. If the preceding problem does not occur, you are advised to set the delay time of the **sleep** command in the command list in the TPC-C driver script to a larger value. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-1-single-query-index-recommendation.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-1-single-query-index-recommendation.md deleted file mode 100644 index ea47de36..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-1-single-query-index-recommendation.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Single-query Index Recommendation -summary: Single-query Index Recommendation -author: Guo Huan -date: 2021-05-19 ---- - -# Single-query Index Recommendation - -The single-query index recommendation function allows users to directly perform operations in the database. This function generates recommended indexes for a single query statement entered by users based on the semantic information of the query statement and the statistics of the database. This function involves the following interfaces: - -**Table 1** Single-query index recommendation interfaces - -| Function Name | Parameter | Description | -| :-------------- | :------------------- | :----------------------------------------------------------- | -| gs_index_advise | SQL statement string | Generates a recommendation index for a single query statement. | - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - This function supports only a single SELECT statement and does not support other types of SQL statements. -> - Partitioned tables, column-store tables, segment-paged tables, common views, materialized views, global temporary tables, and encrypted databases are not supported. - -## Application Scenarios - -Use the preceding function to obtain the recommendation index generated for the query. The recommendation result consists of the table name and column name of the index. - -For example: - -```sql -mogdb=> select "table", "column" from gs_index_advise('SELECT c_discount from bmsql_customer where c_w_id = 10'); - table | column -----------------+---------- - bmsql_customer | (c_w_id) -(1 row) -``` - -The preceding information indicates that an index should be created on the **c_w_id** column of the **bmsql_customer** table. You can run the following SQL statement to create an index: - -```sql -CREATE INDEX idx on bmsql_customer(c_w_id); -``` - -Some SQL statements may also be recommended to create a join index, for example: - -```sql -mogdb=# select "table", "column" from gs_index_advise('select name, age, sex from t1 where age >= 18 and age < 35 and sex = ''f'';'); - table | column --------+------------ - t1 | (age, sex) -(1 row) -``` - -The preceding statement indicates that a join index **(age, sex)** needs to be created in the **t1** table. You can run the following command to create a join index: - -```sql -CREATE INDEX idx1 on t1(age, sex); -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Parameters of the system function **gs_index_advise()** are of the text type. If the parameters contain special characters such as single quotation marks ('), you can use single quotation marks (') to escape the special characters. For details, see the preceding example. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-2-virtual-index.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-2-virtual-index.md deleted file mode 100644 index 4d6cd416..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-2-virtual-index.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Virtual Index -summary: Virtual Index -author: Guo Huan -date: 2021-05-19 ---- - -# Virtual Index - -The virtual index function allows users to directly perform operations in the database. This function simulates the creation of a real index to avoid the time and space overhead required for creating a real index. Based on the virtual index, users can evaluate the impact of the index on the specified query statement by using the optimizer. - -This function involves the following interfaces: - -**Table 1** Virtual index function interfaces - -| Function Name | Parameter | Description | -| :------------------- | :------------------------------------------------------ | :----------------------------------------------------------- | -| hypopg_create_index | Character string of the statement for creating an index | Creates a virtual index. | -| hypopg_display_index | None | Displays information about all created virtual indexes. | -| hypopg_drop_index | OID of the index | Deletes a specified virtual index. | -| hypopg_reset_index | None | Clears all virtual indexes. | -| hypopg_estimate_size | OID of the index | Estimates the space required for creating a specified index. | - -This function involves the following GUC parameters: - -**Table 2** GUC parameters of the virtual index function - -| Parameter | Description | Default Value | -| :---------------- | :-------------------------------------------- | :------------ | -| enable_hypo_index | Whether to enable the virtual index function. | off | - -## Procedure - -1. Use the **hypopg_create_index** function to create a virtual index. For example: - - ```sql - mogdb=> select * from hypopg_create_index('create index on bmsql_customer(c_w_id)'); - indexrelid | indexname - ------------+------------------------------------- - 329726 | <329726>btree_bmsql_customer_c_w_id - (1 row) - ``` - -2. Enable the GUC parameter **enable_hypo_index**. This parameter controls whether the database optimizer considers the created virtual index when executing the EXPLAIN statement. By executing EXPLAIN on a specific query statement, you can evaluate whether the index can improve the execution efficiency of the query statement based on the execution plan provided by the optimizer. For example: - - ```sql - mogdb=> set enable_hypo_index = on; - SET - ``` - - Before enabling the GUC parameter, run **EXPLAIN** and the query statement. - - ```sql - mogdb=> explain SELECT c_discount from bmsql_customer where c_w_id = 10; - QUERY PLAN - ---------------------------------------------------------------------- - Seq Scan on bmsql_customer (cost=0.00..52963.06 rows=31224 width=4) - Filter: (c_w_id = 10) - (2 rows) - ``` - - After enabling the GUC parameter, run **EXPLAIN** and the query statement. - - ```sql - mogdb=> explain SELECT c_discount from bmsql_customer where c_w_id = 10; - QUERY PLAN - ------------------------------------------------------------------------------------------------------------------ - [Bypass] - Index Scan using <329726>btree_bmsql_customer_c_w_id on bmsql_customer (cost=0.00..39678.69 rows=31224 width=4) - Index Cond: (c_w_id = 10) - (3 rows) - ``` - - By comparing the two execution plans, you can find that the index may reduce the execution cost of the specified query statement. Then, you can consider creating a real index. - -3. (Optional) Use the **hypopg_display_index** function to display all created virtual indexes. For example: - - ```sql - mogdb=> select * from hypopg_display_index(); - indexname | indexrelid | table | column - --------------------------------------------+------------+----------------+------------------ - <329726>btree_bmsql_customer_c_w_id | 329726 | bmsql_customer | (c_w_id) - <329729>btree_bmsql_customer_c_d_id_c_w_id | 329729 | bmsql_customer | (c_d_id, c_w_id) - (2 rows) - ``` - -4. (Optional) Use the **hypopg_estimate_size** function to estimate the space (in bytes) required for creating a virtual index. For example: - - ```sql - mogdb=> select * from hypopg_estimate_size(329730); - hypopg_estimate_size - ---------------------- - 15687680 - (1 row) - ``` - -5. Delete the virtual index. - - Use the **hypopg_drop_index** function to delete the virtual index of a specified OID. For example: - - ```sql - mogdb=> select * from hypopg_drop_index(329726); - hypopg_drop_index - ------------------- - t - (1 row) - ``` - - Use the **hypopg_reset_index** function to clear all created virtual indexes at a time. For example: - - ```sql - mogdb=> select * from hypopg_reset_index(); - hypopg_reset_index - -------------------- - - (1 row) - ``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - Running **EXPLAIN ANALYZE** does not involve the virtual index function. -> - The created virtual index is at the database instance level and can be shared by sessions. After a session is closed, the virtual index still exists. However, the virtual index will be cleared after the database is restarted. -> - This function does not support common views, materialized views, and column-store tables. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-3-workload-level-index-recommendation.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-3-workload-level-index-recommendation.md deleted file mode 100644 index 409d1c16..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/2-index-advisor-index-recommendation/2-3-workload-level-index-recommendation.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Workload-level Index Recommendation -summary: Workload-level Index Recommendation -author: Guo Huan -date: 2021-05-19 ---- - -# Workload-level Index Recommendation - -For workload-level indexes, you can run scripts outside the database to use this function. This function uses the workload of multiple DML statements as the input to generate a batch of indexes that can optimize the overall workload execution performance. In addition, it provides the function of extracting service data SQL statements from logs. - -## Prerequisites - -- The database is normal, and the client can be connected properly. - -- The **gsql** tool has been installed by the current user, and the tool path has been added to the **PATH** environment variable. - -- The Python 3.6+ environment is available. - -- To use the service data extraction function, you need to set the GUC parameters of the node whose data is to be collected as follows: - - - log_min_duration_statement = 0 - - - log_statement= 'all' - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** After service data extraction is complete, you are advised to restore the preceding GUC parameters. Otherwise, log files may be expanded. - -## Procedure for Using the Service Data Extraction Script - -1. Set the GUC parameters according to instructions in the prerequisites. - -2. Run the Python script **extract_log.py**: - - ``` - ython extract_log.py [l LOG_DIRECTORY] [f OUTPUT_FILE] [-d DATABASE] [-U USERNAME][--start_time] [--sql_amount] [--statement] [--json] - ``` - - The input parameters are as follows: - - - **LOG_DIRECTORY**: directory for storing **pg_log**. - - **OUTPUT_PATH**: path for storing the output SQL statements, that is, path for storing the extracted service data. - - **DATABASE** (optional): database name. If this parameter is not specified, all databases are selected by default. - - **USERNAME** (optional): username. If this parameter is not specified, all users are selected by default. - - **start_time** (optional): start time for log collection. If this parameter is not specified, all files are collected by default. - - **sql_amount** (optional): maximum number of SQL statements to be collected. If this parameter is not specified, all SQL statements are collected by default. - - **statement** (optional): Collects the SQL statements starting with **statement** in **pg_log log**. If this parameter is not specified, the SQL statements are not collected by default. - - **json**: Specifies that the collected log files are stored in JSON format after SQL normalization. If the default format is not specified, each SQL statement occupies a line. - - An example is provided as follows. - - ``` - python extract_log.py $GAUSSLOG/pg_log/dn_6001 sql_log.txt -d postgres -U omm --start_time '2021-07-06 00:00:00' --statement - ``` - -3. Change the GUC parameter values set in step 1 to the values before the setting. - -## Procedure for Using the Index Recommendation Script - -1. Prepare a file that contains multiple DML statements as the input workload. Each statement in the file occupies a line. You can obtain historical service statements from the offline logs of the database. - -2. Run the Python script **index_advisor_workload.py**: - - ``` - python index_advisor_workload.py [p PORT] [d DATABASE] [f FILE] [--h HOST] [-U USERNAME] [-W PASSWORD][--schema SCHEMA][--max_index_num MAX_INDEX_NUM][--max_index_storage MAX_INDEX_STORAGE] [--multi_iter_mode] [--multi_node] [--json] [--driver] [--show_detail] - ``` - - The input parameters are as follows: - - - **PORT**: port number of the connected database. - - **DATABASE**: name of the connected database. - - **FILE**: file path that contains the workload statement. - - **HOST** (optional): ID of the host that connects to the database. - - **USERNAME** (optional): username for connecting to the database. - - **PASSWORD** (optional): password for connecting to the database. - - **SCHEMA**: schema name. - - **MAX_INDEX_NUM** (optional): maximum number of recommended indexes. - - **MAX_INDEX_STORAGE** (optional): maximum size of the index set space. - - **multi_node** (optional): specifies whether the current instance is a distributed database instance. - - **multi_iter_mode** (optional): algorithm mode. You can switch the algorithm mode by setting this parameter. - - **json** (optional): Specifies the file path format of the workload statement as JSON after SQL normalization. By default, each SQL statement occupies one line. - - **driver** (optional): Specifies whether to use the Python driver to connect to the database. By default, **gsql** is used for the connection. - - **show_detail** (optional): Specifies whether to display the detailed optimization information about the current recommended index set. - - Example: - - ``` - python index_advisor_workload.py 6001 postgres tpcc_log.txt --schema public --max_index_num 10 --multi_iter_mode - ``` - - The recommendation result is a batch of indexes, which are displayed on the screen in the format of multiple create index statements. The following is an example of the result. - - ```sql - create index ind0 on public.bmsql_stock(s_i_id,s_w_id); - create index ind1 on public.bmsql_customer(c_w_id,c_id,c_d_id); - create index ind2 on public.bmsql_order_line(ol_w_id,ol_o_id,ol_d_id); - create index ind3 on public.bmsql_item(i_id); - create index ind4 on public.bmsql_oorder(o_w_id,o_id,o_d_id); - create index ind5 on public.bmsql_new_order(no_w_id,no_d_id,no_o_id); - create index ind6 on public.bmsql_customer(c_w_id,c_d_id,c_last,c_first); - create index ind7 on public.bmsql_new_order(no_w_id); - create index ind8 on public.bmsql_oorder(o_w_id,o_c_id,o_d_id); - create index ind9 on public.bmsql_district(d_w_id); - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The value of the **multi_node** parameter must be specified based on the current database architecture. Otherwise, the recommendation result is incomplete, or even no recommendation result is generated. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-1-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-1-overview.md deleted file mode 100644 index 0a7c9571..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-1-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-05-06 ---- - -# Overview - -Slow SQL statements have always been a pain point in data O&M. How to effectively diagnose the root causes of slow SQL statements is a big challenge. Based on the characteristics of MogDB and the slow SQL diagnosis experience of DBAs on the live network, this tool supports more than 25 root causes of slow SQL statements, outputs multiple root causes based on the possibility, and provides specific solutions. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-2-environment-deployment.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-2-environment-deployment.md deleted file mode 100644 index c9c04efc..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-2-environment-deployment.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Environment Deployment -summary: Environment Deployment -author: Guo Huan -date: 2022-05-06 ---- - -# Environment Deployment - -- The database is working properly. -- The metric collection system is running properly. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-3-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-3-usage-guide.md deleted file mode 100644 index 7eff71a5..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-3-usage-guide.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-05-06 ---- - -# Usage Guide - -Assume that the **confpath** configuration file directory has been initialized. - -- Run the following command to start only the slow SQL diagnosis function and output the top 3 root causes (for details, see the description of the **service** subcommand): - - ``` - gs_dbmind service start -c confpath --only-run slow_query_diagnosis - ``` - -- Run the following command to diagnose slow SQL statements in interactive mode: - - ``` - gs_dbmind component slow_query_diagnosis show -c confpath --query SQL --start-time timestamps0 --end-time timestamps1 - ``` - -- Run the following command to manually clear historical prediction results: - - ``` - gs_dbmind component slow_query_diagnosis clean -c confpath --retention-days DAYS - ``` - -- Run the following command to stop the services that have been started: - - ``` - gs_dbmind service stop -c confpath - ``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-4-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-4-obtaining-help-information.md deleted file mode 100644 index fb76fac7..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-4-obtaining-help-information.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-05-06 ---- - -# Obtaining Help Information - -You can run the **–help** command to obtain the help information. For example: - -``` -gs_dbmind component slow_query_diagnosis --help -``` - -``` -usage: [-h] -c DIRECTORY [--query SLOW_QUERY] - [--start-time TIMESTAMP_IN_MICROSECONDS] - [--end-time TIMESTAMP_IN_MICROSECONDS] [--retention-days DAYS] - {show,clean} - -Slow Query Diagnosis: Analyse the root cause of slow query - -positional arguments: - {show,clean} choose a functionality to perform - -optional arguments: - -h, --help show this help message and exit - -c DIRECTORY, --conf DIRECTORY - set the directory of configuration files - --query SLOW_QUERY set a slow query you want to retrieve - --start-time TIMESTAMP_IN_MICROSECONDS - set the start time of a slow SQL diagnosis result to - be retrieved - --end-time TIMESTAMP_IN_MICROSECONDS - set the end time of a slow SQL diagnosis result to be - retrieved - --retention-days DAYS - clear historical diagnosis results and set the maximum - number of days to retain data -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-5-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-5-command-reference.md deleted file mode 100644 index a6b1931c..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-5-command-reference.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-05-06 ---- - -# Command Reference - -**Table 1** gs_dbmind component slow_query_diagnosis parameters - -| Parameter | Description | Value Range | -| :--------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| -h, --help | Help information | - | -| action | Action parameter | - **show**: displays results.
- **clean**: clears results.
- **diagnosis**: interactive diagnosis. | -| -c,--conf | Configuration directory | - | -| --query | Slow SQL text | * | -| --start-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| --end-time | Timestamp of the end time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| --retention-days | Number of days retaining results | Non-negative real number | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-6-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-6-troubleshooting.md deleted file mode 100644 index 8f80a70f..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/3-slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/3-6-troubleshooting.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-05-06 ---- - -# Troubleshooting - -- If you run the interactive diagnosis command for a slow SQL statement that has not been executed, no diagnosis result is provided. -- If the exporter metric collection function is not enabled, the slow SQL diagnosis function is not available. -- After the parameters in the configuration file are reset, you need to restart the service process for the settings to take effect. -- When the interactive diagnosis function of slow SQL statements is used, the tool obtains necessary data based on the RPC and data collection services. Therefore, if the RPC and data collection services are not started, the diagnosis cannot be performed. -- When the diagnosis function is used for interactive diagnosis, the tool checks the entered SQL and database. If the entered SQL and database are invalid, the diagnosis cannot be performed. -- During slow SQL diagnosis, SMALL\_SHARED\_BUFFER needs to collect column information of related tables. Therefore, ensure that the opengauss\_exporter connection user has the permission on the schema to which the table belongs. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-1-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-1-overview.md deleted file mode 100644 index 33df1fa4..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-1-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-05-06 ---- - -# Overview - -The trend prediction module predicts the future time series change trend based on historical time series data. The framework of this module has been decoupled to flexibly change prediction algorithms. This module can automatically select algorithms for different feature time series. The LR regression algorithm for linear feature time series prediction and the ARIMA algorithm for non-linear feature prediction are supported. At present, this module can cover the accurate prediction of linear time series, non-linear time series and periodic time series. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-2-environment-deployment.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-2-environment-deployment.md deleted file mode 100644 index e370be78..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-2-environment-deployment.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Environment Deployment -summary: Environment Deployment -author: Guo Huan -date: 2022-05-06 ---- - -# Environment Deployment - -The metric collection system is running properly. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-3-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-3-usage-guide.md deleted file mode 100644 index d5283c1e..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-3-usage-guide.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-05-06 ---- - -# Usage Guide - -Assume that the **confpath** configuration file directory has been initialized. - -- Run the following command to start only the slow SQL diagnosis function (the number of root causes for slow SQL diagnosis is determined by the algorithm running result and is not fixed). For more usage, see the description of the **service** subcommand. - - ``` - gs_dbmind service start -c confpath --only-run slow_query_diagnosis - ``` - -- Run the following command to query the diagnosis history of slow SQL statements: - - ``` - gs_dbmind component slow_query_diagnosis show -c confpath --query SQL --start-time timestamps0 --end-time timestamps1 - ``` - -- Run the following command to diagnose slow SQL statements in interactive mode: - - ``` - gs_dbmind component slow_query_diagnosis diagnosis -c confpath --database dbname --schema schema_name --query SQL - ``` - -- Run the following command to manually clear historical prediction results: - - ``` - gs_dbmind component slow_query_diagnosis clean -c confpath --retention-days DAYS - ``` - -- Run the following command to stop the services that have been started: - - ``` - gs_dbmind service stop -c confpath - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-4-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-4-obtaining-help-information.md deleted file mode 100644 index cfb64369..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-4-obtaining-help-information.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-05-06 ---- - -# Obtaining Help Information - -You can run the **–help** command to obtain the help information. For example: - -``` -gs_dbmind component forecast --help -``` - -``` -usage: [-h] -c DIRECTORY [--metric-name METRIC_NAME] [--host HOST] [--start-time TIMESTAMP_IN_MICROSECONDS] [--end-time TIMESTAMP_IN_MICROSECONDS] [--retention-days DAYS] - {show,clean} - -Workload Forecasting: Forecast monitoring metrics - -positional arguments: - {show,clean} choose a functionality to perform - -optional arguments: - -h, --help show this help message and exit - -c DIRECTORY, --conf DIRECTORY - set the directory of configuration files - --metric-name METRIC_NAME - set a metric name you want to retrieve - --host HOST set a host you want to retrieve - --start-time TIMESTAMP_IN_MICROSECONDS - set a start time of for retrieving - --end-time TIMESTAMP_IN_MICROSECONDS - set a end time of for retrieving - --retention-days DAYS - clear historical diagnosis results and set the maximum number of days to retain data -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-5-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-5-command-reference.md deleted file mode 100644 index 4d7741bd..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-5-command-reference.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-05-06 ---- - -# Command Reference - -**Table 1** gs_dbmind component forecast parameters - -| Parameter | Description | Value Range | -| :-------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| -h, –help | Help information | - | -| action | Action parameter | - **show**: displays results.
- **clean**: clears results. | -| -c, –conf | Configuration directory | - | -| –metric-name | Specifies the metric name to be displayed, which is used for filtering. | - | -| –host | Specifies the service IP address and port number, which is used for filtering. | - | -| –start-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| –end-time | Timestamp of the end time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| –retention-days | Number of days retaining results | Non-negative real number | diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-6-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-6-troubleshooting.md deleted file mode 100644 index 46cf947b..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/4-forcast-trend-prediction/4-6-troubleshooting.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-05-06 ---- - -# Troubleshooting - -- Considering the actual service and model prediction effect, you are advised to set the trend prediction duration to a value greater than 3600 seconds. (If the metric collection period is 15 seconds, the number of data records collected is 240.) Otherwise, the prediction effect will deteriorate, and the service will be abnormal when the data volume is extremely small. The default value is 3600 seconds. -- After the parameters in the configuration file are reset, you need to restart the service process for the settings to take effect. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-1-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-1-overview.md deleted file mode 100644 index 4a79133b..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-1-overview.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: SQLdiag Slow SQL Discovery -summary: SQLdiag Slow SQL Discovery -author: Guo Huan -date: 2021-05-19 ---- - -# SQLdiag Slow SQL Discovery - -SQLdiag is a framework for predicting the execution duration of SQL statements in MogDB. The existing prediction technologies are mainly based on model prediction of execution plans. These prediction solutions are applicable only to jobs whose execution plans can be obtained in the OLAP scenarios, and are not useful for quick query such as OLTP or HTAP. Different from the preceding solutions, SQLdiag focuses on the historical SQL statements of databases. Because the execution duration of the database SQL statements in a short time does not vary greatly, SQLdiag can detect instruction sets similar to the entered instructions from the historical data, and predict the SQL statement execution duration based on the SQL vectorization technology and the time series prediction algorithm. This framework has the following benefits: - -1. Execution plans do not require instructions. This has no impact on database performance. -2. The framework is widely used, unlike many other well-targeted algorithms in the industry, for example, they may applicable only to OLTP or OLAP. -3. The framework is robust and easy to understand. Users can design their own prediction models by simply modifying the framework. - -SQLdiag is an SQL statement execution time prediction tool. It predicts the execution time of SQL statements based on the statement logic similarity and historical execution records without obtaining the SQL statement execution plan using a template or deep learning. Abnormal SQL statements can also be detected with this tool. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-2-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-2-usage-guide.md deleted file mode 100644 index 74336594..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-2-usage-guide.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2021-10-21 ---- - -# Usage Guide - -## Prerequisites - -- You have obtained training data. -- If you use the provided tool to collect training data, you need to enable the WDR function. The involved parameters are **track_stmt_stat_level** and **log_min_duration_statement**. For details, see the following sections. -- To ensure the prediction accuracy, the historical statement logs provided by users should be as comprehensive and representative as possible. - -## Collecting SQL Statements - -This tool requires users to prepare data in advance. Each sample is separated by a newline character. The training data format is as follows: - -``` -SQL,EXECUTION_TIME -``` - -The prediction data format is as follows: - -``` -SQL -``` - -**SQL** indicates the text of an SQL statement, and **EXECUTION_TIME** indicates the execution time of the SQL statement. For details about the sample data, see **train.csv** and **predict.csv** in **sample_data**. - -You can collect training data in the required format. The tool also provides the **load_sql_from_rd** script for automatic collection. The script obtains SQL information based on the WDR report. The involved parameters are **log_min_duration_statement** and **track_stmt_stat_level**: - -- **log_min_duration_statement** indicates the slow SQL threshold. If the value is **0**, full collection is performed. The unit is millisecond. -- **track_stmt_stat_level** indicates the information capture level. You are advised to set it to **'L0,L0'**. - -After this parameter is set, a certain amount of system resources may be occupied but the usage is generally low. In continuous high-concurrency scenarios, this may cause a performance loss less than 5%. If the database concurrency is low, the performance loss can be ignored. The following script is stored in the sqldiag root directory (*$GAUSSHOME***/bin/components/sqldiag**). - -``` -Use a script to obtain the training set: -load_sql_from_wdr.py [-h] --port PORT --start_time START_TIME - --finish_time FINISH_TIME [--save_path SAVE_PATH] -Example: - python load_sql_from_wdr.py --start_time "2021-04-25 00:00:00" --finish_time "2021-04-26 14:00:00" --port 5432 --save_path ./data.csv -``` - -## Procedure - -1. Provide historical logs for model training. - -2. Perform training and prediction. - - ``` - Template-based training and prediction: - gs_dbmind component sqldiag [train, predict] -f FILE --model template --model-path template_model_path - DNN-based training and prediction: - gs_dbmind component sqldiag [train, predict] -f FILE --model dnn --model-path dnn_model_path - ``` - -## Examples - -Use the provided test data to perform template-based training: - -``` -gs_dbmind component sqldiag train -f ./sample_data/train.csv --model template --model-path ./template -``` - -Use the provided test data for template-based prediction: - -``` -gs_dbmind component sqldiag predict -f ./sample_data/predict.csv --model template --model-path ./template --predicted-file ./result/t_result -``` - -Use the provided test data to update the template-based model: - -``` -gs_dbmind component sqldiag finetune -f ./sample_data/train.csv --model template --model-path ./template -``` - -Use the provided test data to perform DNN-based training: - -``` -gs_dbmind component sqldiag train -f ./sample_data/train.csv --model dnn --model-path ./dnn_model -``` - -Use the provided test data for DNN-based prediction: - -``` -gs_dbmind component sqldiag predict -f ./sample_data/predict.csv --model dnn --model-path ./dnn_model --predicted-file -``` - -Use the provided test data to update the DNN-based model: - -``` -gs_dbmind component sqldiag finetune -f ./sample_data/train.csv --model dnn --model-path ./dnn_model -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-3-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-3-obtaining-help-information.md deleted file mode 100644 index b863b043..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-3-obtaining-help-information.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2021-05-19 ---- - -# Obtaining Help Information - -Before using the SQLdiag tool, run the following command to obtain help information: - -``` -gs_dbmind component sqldiag --help -``` - -The command output is as follows: - -``` -usage: [-h] [-f CSV_FILE] [--predicted-file PREDICTED_FILE] - [--model {template,dnn}] --model-path MODEL_PATH - [--config-file CONFIG_FILE] - {train,predict,finetune} - -SQLdiag integrated by MogDB. - -positional arguments: - {train,predict,finetune} - The training mode is to perform feature extraction and - model training based on historical SQL statements. The - prediction mode is to predict the execution time of a - new SQL statement through the trained model. - -optional arguments: - -h, --help show this help message and exit - -f CSV_FILE, --csv-file CSV_FILE - The data set for training or prediction. The file - format is CSV. If it is two columns, the format is - (SQL statement, duration time). If it is three - columns, the format is (timestamp of SQL statement - execution time, SQL statement, duration time). - --predicted-file PREDICTED_FILE - The file path to save the predicted result. - --model {template,dnn} - Choose the model model to use. - --model-path MODEL_PATH - The storage path of the model file, used to read or - save the model file. - --config-file CONFIG_FILE -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-4-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-4-command-reference.md deleted file mode 100644 index 533a05e9..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-4-command-reference.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2021-05-19 ---- - -# Command Reference - -**Table 1** Command-line options - -| Parameter | Description | Value Range | -| :-------------- | :----------------------------------- | :------------ | -| -f | Training or prediction file location | N/A | -| –predicted-file | Prediction result location | N/A | -| –model | Model selection | template, dnn | -| –model-path | Location of the training model | N/A | diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-5-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-5-troubleshooting.md deleted file mode 100644 index 442f8d1d..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/5-sqldiag-slow-sql-discovery/5-5-troubleshooting.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2021-05-19 ---- - -# Troubleshooting - -- Failure in the training scenario: Check whether the file path of historical logs is correct and whether the file format meets the requirements. -- Failure in the prediction scenario: Check whether the model path is correct. Ensure that the format of the load file to be predicted is correct. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-1-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-1-overview.md deleted file mode 100644 index ff2cdb48..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-1-overview.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-10-24 ---- - -# Overview - -SQL Rewriter is an SQL rewriting tool. It converts query statements into more efficient or standard forms based on preset rules to improve query efficiency. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - This function does not apply to statements that contain subqueries. -> - This function supports only the SELECT and DELETE statements for deleting the entire table. -> - This function contains 11 rewriting rules. Statements that do not comply with the rewriting rules are not processed. -> - This function displays original query statements and rewritten statements on the screen. You are not advised to rewrite SQL statements that contain sensitive information. -> - The rule for converting UNION to UNION ALL avoids deduplication and improves the query performance. The obtained result may be redundant. -> - If a statement contains ORDER BY + specified column name or GROUP BY + specified column name, the SelfJoin rule is not applicable. -> - The tool does not ensure equivalent conversion of query statements. The purpose is to improve the efficiency of query statements. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-2-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-2-usage-guide.md deleted file mode 100644 index 3f6fe167..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-2-usage-guide.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-10-24 ---- - -# Usage Guide - -## Prerequisites - -The database and connection are normal. - -## Example - -Use the **tpcc** database as an example: - -```shell -gs_dbmind component sql_rewriter 5030 tpcc queries.sql --db-host 127.0.0.1 --db-user myname --schema public -``` - -**queries.sql** is the SQL statement to be modified. The content is as follows: - -```sql -select cfg_name from bmsql_config group by cfg_name having cfg_name='1'; -delete from bmsql_config; -delete from bmsql_config where cfg_name='1'; -``` - -The result is multiple rewritten query statements, which are displayed on the screen (the statements that cannot be rewritten are displayed as null), as shown in the following. - -```shell -+--------------------------------------------------------------------------+------------------------------+ -| Raw SQL | Rewritten SQL | -+--------------------------------------------------------------------------+------------------------------+ -| select cfg_name from bmsql_config group by cfg_name having cfg_name='1'; | SELECT cfg_name | -| | FROM bmsql_config | -| | WHERE cfg_name = '1'; | -| delete from bmsql_config; | TRUNCATE TABLE bmsql_config; | -| delete from bmsql_config where cfg_name='1'; | | -+--------------------------------------------------------------------------+------------------------------+ -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-3-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-3-obtaining-help-information.md deleted file mode 100644 index a5668891..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-3-obtaining-help-information.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-10-24 ---- - -# Obtaining Help Information - -Before using the SQL Rewriter tool, run the following command to obtain help information: - -```shell -gs_dbmind component sql_rewriter --help -``` - -The following information is displayed: - -```shell -usage: [-h] [--db-host DB_HOST] [--db-user DB_USER] [--schema SCHEMA] - db_port database file - -SQL Rewriter - -positional arguments: - db_port Port for database - database Name for database - file File containing SQL statements which need to rewrite - -optional arguments: - -h, --help show this help message and exit - --db-host DB_HOST Host for database - --db-user DB_USER Username for database log-in - --schema SCHEMA Schema name for the current business data -``` - -Passwords are entered through pipes or in interactive mode. For password-free users, any input can pass the verification. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-4-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-4-command-reference.md deleted file mode 100644 index 899e4191..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-4-command-reference.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-10-24 ---- - -# Command Reference - -**Table 1** Command line parameters - -| Parameter | Definition | -| :-------- | :------------------------------------------------------- | -| db_port | Database port number | -| database | Database name | -| file | Path of the file that contains multiple query statements | -| db-host | (Optional) Database host ID | -| db-user | (Optional) Database user name | -| schema | (Optional, public schema) Schema | diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-5-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-5-troubleshooting.md deleted file mode 100644 index d78caa09..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/6-sql-rewriter-sql-statement-rewriting/6-5-troubleshooting.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-10-24 ---- - -# Troubleshooting - -- If the SQL statement cannot be rewritten, check whether the SQL statement complies with the rewriting rule or whether the SQL syntax is correct. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-1-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-1-overview.md deleted file mode 100644 index d2fb9557..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-1-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-10-24 ---- - -# Overview - -The anomaly detection module implements time series data based on statistics methods to detect possible exceptions in the data. The framework of this module is decoupled to flexibly replace different anomaly detection algorithms. In addition, this module can automatically select algorithms based on different features of time series data. It supports anomaly value detection, threshold detection, box plot detection, gradient detection, growth rate detection, fluctuation rate detection, and status conversion detection. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-2-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-2-usage-guide.md deleted file mode 100644 index dc22e2c8..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-2-usage-guide.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-10-24 ---- - -# Usage Guide - -Assume that the metric collection system is running properly and the configuration file directory **confpath** has been initialized. You can run the following command to implement this feature: - -Enable only the anomaly detection function: - -```shell -gs_dbmind service start --conf confpath --only-run anomaly_detection -``` - -View a metric on all nodes from timestamps1 to timestamps2: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action overview --metric metric_name --start-time timestamps1 --end-time timestamps2 -``` - -View a metric on a specific node from timestamps1 to timestamps2: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action overview --metric metric_name --start-time timestamps1 --end-time timestamps2 --host ip_address --anomaly anomaly_type -``` - -View a metric on all nodes from timestamps1 to timestamps2 in a specific anomaly detection mode: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action overview --metric metric_name --start-time timestamps1 --end-time timestamps2 --anomaly anomaly_type -``` - -View a metric on a specific node from timestamps1 to timestamps2 in a specific anomaly detection mode: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action overview --metric metric_name --start-time timestamps1 --end-time timestamps2 --host ip_address --anomaly anomaly_type -``` - -Visualize a metric on all nodes from timestamps1 to timestamps2 in a specific anomaly detection mode: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action plot --metric metric_name --start-time timestamps1 --end-time timestamps2 --host ip_address --anomaly anomaly_type -``` - -Stop the started service: - -```shell -gs_dbmind service stop --conf confpath -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** When setting anomaly detection parameters, ensure that start-time is at least 30 seconds earlier than end-time. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-3-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-3-obtaining-help-information.md deleted file mode 100644 index fe993502..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-3-obtaining-help-information.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-10-24 ---- - -# Obtaining Help Information - -You can run the **--help** command to obtain the help information. For example: - -```shell -gs_dbmind component anomaly_detection --help -``` - -The following information is displayed: - -```shell -usage: anomaly_detection.py [-h] --action {overview,plot} -c CONF -m METRIC -s - START_TIME -e END_TIME [-H HOST] [-a ANOMALY] - -Workload Anomaly detection: Anomaly detection of monitored metric. - -optional arguments: - -h, --help show this help message and exit - --action {overview,plot} - choose a functionality to perform - -c CONF, --conf CONF set the directory of configuration files - -m METRIC, --metric METRIC - set the metric name you want to retrieve - -s START_TIME, --start-time START_TIME - set the start time of for retrieving in ms - -e END_TIME, --end-time END_TIME - set the end time of for retrieving in ms - -H HOST, --host HOST set a host of the metric, ip only or ip and port. - -a ANOMALY, --anomaly ANOMALY - set a anomaly detector of the metric(increase_rate, - level_shift, spike, threshold) - -Process finished with exit code 0 -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-4-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-4-command-reference.md deleted file mode 100644 index 5d966981..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-4-command-reference.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-10-24 ---- - -# Command Reference - -**Table 1** Command line parameters - -| Parameter | Description | Value Range | -| :---------------- | :----------------------------------------------------------- | :------------------------------------------- | -| -h, --help | Help command | - | -| --action | Action parameter | - **overview**
- **plot**: visualization | -| -c,--conf | Configuration file directory | - | -| -m,--metric-name | Metric name to be displayed | - | -| -H, --host | Data source IP address which is used to filter data | IP address or IP address + port number | -| -a, --anomaly | Anomaly detection mode, which is used for filtering | - | -| -s, --start-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| -e, --end-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-5-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-5-troubleshooting.md deleted file mode 100644 index 5683b12c..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/7-anomaly-detection/7-5-troubleshooting.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-10-24 ---- - -# Troubleshooting - -- Overview scenario failure: Check whether the configuration file path is correct and whether the configuration file information is complete. Check whether the metric name is correct, whether the host IP address is correct, whether the anomaly detection type is correct, and whether the metric data exists in the start time and end time. -- Visualization scenario failure: Check whether the configuration file path is correct and whether the configuration file information is complete. Check whether the metric name is correct, whether the host IP address is correct, whether the anomaly detection type is correct, and whether the metric data exists in the start time and end time. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/ai-sub-functions-of-the-dbmind.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/ai-sub-functions-of-the-dbmind.md deleted file mode 100644 index a787f482..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/ai-sub-functions-of-the-dbmind.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: AI Sub-Functions of the DBMind -summary: AI Sub-Functions of the DBMind -author: zhang cuiping -date: 2023-04-07 ---- - -# AI Sub-Functions of the DBMind - -You can run the **component** subcommand of **gs_dbmind** to enable the corresponding AI sub-functions. The following sections describe the AI functions in detail. - -- **[X-Tuner: Parameter Tuning and Diagnosis](./x-tuner-parameter-optimization-and-diagnosis/x-tuner-parameter-optimization-and-diagnosis.md)** -- **[Index-advisor: Index Recommendation](./index-advisor-index-recommendation/index-advisor-index-recommendation.md)** -- **[Slow Query Diagnosis: Root Cause Analysis for Slow SQL Statements](./slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements.md)** -- **[Forecast: Trend Prediction](./forcast-trend-prediction/forcast-trend-prediction.md)** -- **[SQLdiag: Slow SQL Discovery](./sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery.md)** -- **[SQL Rewriter: SQL Statement Rewriting](./sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting.md)** -- **[Anomaly Detection](./anomaly-detection/anomaly-detection.md)** -- **[Anomaly-analysis: Multi-Metric Correlation Analysis](./anomaly-analysis/anomaly-analysis.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-command-reference.md deleted file mode 100644 index f9a438d8..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-command-reference.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: zhang cuiping -date: 2023-04-07 ---- - -# Command Reference - -**Table 1** Command line parameters - -| Parameter | Description | Value Range | -| :--------------- | :----------------------------------------------------------- | :--------------------------------------- | -| -h, --help | Help command | - | -| -c, --conf | Configuration file directory | - | -| -m, --metric | Metric name to be displayed | - | -| -H, --host | Data source IP address which is used to filter data | IP address or IP address + port number | -| -s, --start-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| -e, --end-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| --csv-dump-path | Path of the exported CSV file. | - | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-obtaining-help-information.md deleted file mode 100644 index 8a0b4ae1..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-obtaining-help-information.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: zhang cuiping -date: 2023-04-07 ---- - -# Obtaining Help Information - -You can run the **–help** command to obtain the help information. For example: - -``` -gs_dbmind component anomaly_detection --help -``` - -The following information is displayed: - -``` -usage: anomaly_analysis.py [-h] -c CONF -m METRIC -s START_TIME -e END_TIME -H - HOST [--csv-dump-path CSV_DUMP_PATH] - -Workload Anomaly analysis: Anomaly analysis of monitored metric. - -optional arguments: - -h, --help show this help message and exit - -c CONF, --conf CONF set the directory of configuration files - -m METRIC, --metric METRIC - set the metric name you want to retrieve - -s START_TIME, --start-time START_TIME - set the start time of for retrieving in ms, supporting - UNIX-timestamp with microsecond or datetime format - -e END_TIME, --end-time END_TIME - set the end time of for retrieving in ms, supporting - UNIX-timestamp with microsecond or datetime format - -H HOST, --host HOST set a host of the metric, ip only or ip and port. - --csv-dump-path CSV_DUMP_PATH - dump the result csv file to the dump path if it is - specified. -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-overview.md deleted file mode 100644 index 2d38ae13..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: zhang cuiping -date: 2023-04-07 ---- - -# Overview - -The Anomaly analysis multi-metric correlation module analyzes the Pearson correlation coefficient of time series data to find the metrics that are most closely related to known exceptions. The framework of this module is decoupled. The supported time series databases include Prometheus and InfluxDB. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-troubleshooting.md deleted file mode 100644 index 6bbef3ce..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-troubleshooting.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: zhang cuiping -date: 2023-04-07 ---- - -# Troubleshooting - -Analysis scenario failure: Check whether the configuration file path is correct and whether the configuration file information is complete. Check whether the metric name is correct, whether the host address is correct, and whether the metric data exists in the start time and end time. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-usage-guide.md deleted file mode 100644 index 3e9b455a..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis-usage-guide.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: zhang cuiping -date: 2023-04-07 ---- - -# Usage Guide - -Assume that the metric collection system is running properly and the configuration file directory **confpath** has been initialized. You can run the following command to implement this feature: - -For a metric, analyze the correlation between other metrics and the data of the metric from timestamps1 to timestamps2 on a specific node. - -``` -gs_dbmind component anomaly_analysis --conf confpath --metric metric_name --start-time timestamps1 --end-time timestamps2 --host ip_address -``` - -For a metric, analyze the correlation between other metrics and the metric data from timestamps1 to timestamps2 on a specific node, and save the analysis result as a CSV file. - -``` -gs_dbmind component anomaly_analysis --conf confpath --metric metric_name --start-time timestamps1 --end-time timestamps2 --host ip_address --csv-dump-path csv_path -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** When setting anomaly_analysis parameters, ensure that start-time is at least 30 seconds earlier than end-time. -> \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis.md deleted file mode 100644 index e226f720..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-analysis/anomaly-analysis.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anomaly-analysis -summary: Anomaly-analysis -author: zhang cuiping -date: 2023-04-07 ---- - -# Anomaly-analysis: Multi-Metric Correlation Analysis - -- **[Overview](anomaly-analysis-overview.md)** -- **[Usage Guide](anomaly-analysis-usage-guide.md)** -- **[Obtaining Help Information](anomaly-analysis-obtaining-help-information.md)** -- **[Command Reference](anomaly-analysis-command-reference.md)** -- **[Troubleshooting](anomaly-analysis-troubleshooting.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-command-reference.md deleted file mode 100644 index 1f42722e..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-command-reference.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-10-24 ---- - -# Command Reference - -**Table 1** Command line parameters - -| Parameter | Description | Value Range | -| :---------------- | :----------------------------------------------------------- | :--------------------------------------- | -| -h, --help | Help command | - | -| --action | Action parameter | **overview****plot**: visualization | -| -c, --conf | Configuration file directory | - | -| -m, --metric-name | Metric name to be displayed | - | -| -H, --host | Data source IP address which is used to filter data | IP address or IP address + port number | -| -a, --anomaly | Anomaly detection mode, which is used for filtering | - | -| -s, --start-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| -e, --end-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-obtaining-help-information.md deleted file mode 100644 index bd14d90b..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-obtaining-help-information.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-10-24 ---- - -# Obtaining Help Information - -You can run the **–help** command to obtain the help information. For example: - -```shell -gs_dbmind component anomaly_detection --help -``` - -The following information is displayed: - -```shell -usage: anomaly_detection.py [-h] --action {overview,plot} -c CONF -m METRIC -s - START_TIME -e END_TIME [-H HOST] [-a ANOMALY] - -Workload Anomaly detection: Anomaly detection of monitored metric. - -optional arguments: - -h, --help show this help message and exit - --action {overview,plot} - choose a functionality to perform - -c CONF, --conf CONF set the directory of configuration files - -m METRIC, --metric METRIC - set the metric name you want to retrieve - -s START_TIME, --start-time START_TIME - set the start time of for retrieving in ms - -e END_TIME, --end-time END_TIME - set the end time of for retrieving in ms - -H HOST, --host HOST set a host of the metric, ip only or ip and port. - -a ANOMALY, --anomaly ANOMALY - set a anomaly detector of the metric(increase_rate, - level_shift, spike, threshold) - -Process finished with exit code 0 -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-overview.md deleted file mode 100644 index fe7eaf71..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-10-24 ---- - -# Overview - -The anomaly detection module implements time series data based on statistics methods to detect possible exceptions in the data. The framework of this module is decoupled to flexibly replace different anomaly detection algorithms. In addition, this module can automatically select algorithms based on different features of time series data. It supports anomaly value detection, threshold detection, box plot detection, gradient detection, growth rate detection, fluctuation rate detection, and status conversion detection. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-troubleshooting.md deleted file mode 100644 index 05e297d6..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-troubleshooting.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-10-24 ---- - -# Troubleshooting - -- Overview scenario failure: Check whether the configuration file path is correct and whether the configuration file information is complete. Check whether the metric name is correct, whether the host IP address is correct, whether the anomaly detection type is correct, and whether the metric data exists in the start time and end time. -- Visualization scenario failure: Check whether the configuration file path is correct and whether the configuration file information is complete. Check whether the metric name is correct, whether the host IP address is correct, whether the anomaly detection type is correct, and whether the metric data exists in the start time and end time. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-usage-guide.md deleted file mode 100644 index dc22e2c8..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection-usage-guide.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-10-24 ---- - -# Usage Guide - -Assume that the metric collection system is running properly and the configuration file directory **confpath** has been initialized. You can run the following command to implement this feature: - -Enable only the anomaly detection function: - -```shell -gs_dbmind service start --conf confpath --only-run anomaly_detection -``` - -View a metric on all nodes from timestamps1 to timestamps2: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action overview --metric metric_name --start-time timestamps1 --end-time timestamps2 -``` - -View a metric on a specific node from timestamps1 to timestamps2: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action overview --metric metric_name --start-time timestamps1 --end-time timestamps2 --host ip_address --anomaly anomaly_type -``` - -View a metric on all nodes from timestamps1 to timestamps2 in a specific anomaly detection mode: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action overview --metric metric_name --start-time timestamps1 --end-time timestamps2 --anomaly anomaly_type -``` - -View a metric on a specific node from timestamps1 to timestamps2 in a specific anomaly detection mode: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action overview --metric metric_name --start-time timestamps1 --end-time timestamps2 --host ip_address --anomaly anomaly_type -``` - -Visualize a metric on all nodes from timestamps1 to timestamps2 in a specific anomaly detection mode: - -```shell -gs_dbmind component anomaly_detection --conf confpath --action plot --metric metric_name --start-time timestamps1 --end-time timestamps2 --host ip_address --anomaly anomaly_type -``` - -Stop the started service: - -```shell -gs_dbmind service stop --conf confpath -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** When setting anomaly detection parameters, ensure that start-time is at least 30 seconds earlier than end-time. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection.md deleted file mode 100644 index 1974c16f..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/anomaly-detection/anomaly-detection.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anomaly detection -summary: Anomaly detection -author: zhang cuiping -date: 2023-04-07 ---- - -# Anomaly Detection - -- **[Overview](anomaly-detection-overview.md)** -- **[Usage Guide](anomaly-detection-usage-guide.md)** -- **[Obtaining Help Information](anomaly-detection-obtaining-help-information.md)** -- **[Command Reference](anomaly-detection-command-reference.md)** -- **[Troubleshooting](anomaly-detection-troubleshooting.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-command-reference.md deleted file mode 100644 index da351b20..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-command-reference.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-05-06 ---- - -# Command Reference - -**Table 1** gs_dbmind component forecast parameters - -| Parameter | Description | Value Range | -| :--------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| -h, --help | Help information | - | -| action | Action parameter | **show**: displays results.**clean**: clears results. | -| -c, --conf | Configuration directory | - | -| --metric-name | Specifies the metric name to be displayed, which is used for filtering. | - | -| --host | Specifies the service IP address and port number, which is used for filtering. | - | -| --start-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| --end-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | -| --retention-days | Number of days retaining results | Non-negative real number | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-environment-deployment.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-environment-deployment.md deleted file mode 100644 index d3f1eae4..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-environment-deployment.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Environment Deployment -summary: Environment Deployment -author: Guo Huan -date: 2022-05-06 ---- - -# Environment Deployment - -The metric collection system is running properly. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-obtaining-help-information.md deleted file mode 100644 index bb0efd0c..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-obtaining-help-information.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-05-06 ---- - -# Obtaining Help Information - -You can run the **–help** command to obtain the help information. For example: - -``` -gs_dbmind component forecast --help -``` - -``` -usage: forecast.py [-h] -c DIRECTORY [--metric-name METRIC_NAME] [--host HOST] - [--labels LABELS] [--start-time TIMESTAMP_IN_MICROSECONDS] - [--end-time TIMESTAMP_IN_MICROSECONDS] - [--retention-days DAYS] [--upper UPPER] [--lower LOWER] - [--warning-hours WARNING-HOURS] - [--csv-dump-path CSV_DUMP_PATH] - {show,clean,early-warning} - -Workload Forecasting: Forecast monitoring metrics - -positional arguments: - {show,clean,early-warning} - Choose a functionality to perform - -optional arguments: - -h, --help show this help message and exit - -c DIRECTORY, --conf DIRECTORY - Set the directory of configuration files - --metric-name METRIC_NAME - Set a metric name you want to retrieve - --host HOST Set a host you want to retrieve. IP only or IP with - port. - --labels LABELS A list of label (format is label=name) separated by - comma(,). Using in warning. - --start-time TIMESTAMP_IN_MICROSECONDS - Set a start time for retrieving, supporting UNIX- - timestamp with microsecond or datetime format - --end-time TIMESTAMP_IN_MICROSECONDS - Set an end time for retrieving, supporting UNIX- - timestamp with microsecond or datetime format - --retention-days DAYS - Clear historical diagnosis results and set the maximum - number of days to retain data - --upper UPPER The upper value of early-warning. Using in warning. - --lower LOWER The lower value of early-warning. Using in warning. - --warning-hours WARNING-HOURS - warning length, unit is hour. - --csv-dump-path CSV_DUMP_PATH - Dump the result CSV file to the path if it is - specified. Use in warning. -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-overview.md deleted file mode 100644 index b4a487d0..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-05-06 ---- - -# Overview - -The trend prediction module predicts the future time series change trend based on historical time series data. The framework of this module has been decoupled to flexibly change prediction algorithms. This module can automatically select algorithms for different feature time series. The LR regression algorithm for linear feature time series prediction and the ARIMA algorithm for non-linear feature prediction are supported. At present, this module can cover the accurate prediction of linear time series, non-linear time series and periodic time series. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-troubleshooting.md deleted file mode 100644 index 9af967f6..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-troubleshooting.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-05-06 ---- - -# Troubleshooting - -- Considering the actual service and model prediction effect, you are advised to set the trend prediction duration to a value greater than 3600 seconds. (If the metric collection period is 15 seconds, the number of data records collected is 240.) Otherwise, the prediction effect will deteriorate, and the service will be abnormal when the data volume is extremely small. The default value is 3600 seconds. -- After the parameters in the configuration file are reset, you need to restart the service process for the settings to take effect. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-usage-guide.md deleted file mode 100644 index c561ba0a..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction-usage-guide.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-05-06 ---- - -# Usage Guide - -Assume that the **confpath** configuration file directory has been initialized. - -- Run the following command to start only the slow SQL diagnosis function (the number of root causes for slow SQL diagnosis is determined by the algorithm running result and is not fixed). For more usage, see the description of the **service** subcommand. - - ``` - gs_dbmind service start -c confpath --only-run slow_query_diagnosis - ``` - -- Run the following command to query the diagnosis history of slow SQL statements: - - ``` - gs_dbmind component slow_query_diagnosis show -c confpath --query SQL --start-time timestamps0 --end-time timestamps1 - ``` - -- Run the following command to diagnose slow SQL statements in interactive mode: - - ``` - gs_dbmind component slow_query_diagnosis diagnosis -c confpath --database dbname --schema schema_name --query SQL - ``` - -- Run the following command to manually clear historical prediction results: - - ``` - gs_dbmind component slow_query_diagnosis clean -c confpath --retention-days DAYS - ``` - -- Run the following command to stop the services that have been started: - - ``` - gs_dbmind service stop -c confpath - ``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction.md deleted file mode 100644 index 4eba7487..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/forcast-trend-prediction/forcast-trend-prediction.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Forecast -summary: Forecast -author: zhang cuiping -date: 2023-04-07 ---- - -# Forecast - -- **[Overview](forcast-trend-prediction-overview.md)** -- **[Environment Deployment](forcast-trend-prediction-environment-deployment.md)** -- **[Usage Guide](forcast-trend-prediction-usage-guide.md)** -- **[Obtaining Help Information](forcast-trend-prediction-obtaining-help-information.md)** -- **[Command Reference](forcast-trend-prediction-command-reference.md)** -- **[Troubleshooting](forcast-trend-prediction-troubleshooting.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/index-advisor-index-recommendation.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/index-advisor-index-recommendation.md deleted file mode 100644 index 62ae5c9e..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/index-advisor-index-recommendation.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Index-advisor -summary: Index-advisor -author: zhang cuiping -date: 2023-04-07 ---- - -# Index-advisor: Index Recommendation - -This section describes the index recommendation functions, including single-query index recommendation, virtual index recommendation, and workload-level index recommendation. - -- **[Single-query Index Recommendation](single-query-index-recommendation.md)** -- **[Virtual Index](virtual-index.md)** -- **[Workload-level Index Recommendation](workload-level-index-recommendation.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/single-query-index-recommendation.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/single-query-index-recommendation.md deleted file mode 100644 index 4a3526f9..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/single-query-index-recommendation.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Single-query Index Recommendation -summary: Single-query Index Recommendation -author: Guo Huan -date: 2021-05-19 ---- - -# Single-query Index Recommendation - -The single-query index recommendation function allows users to directly perform operations in the database. This function generates recommended indexes for a single query statement entered by users based on the semantic information of the query statement and the statistics of the database. This function involves the following interfaces: - -**Table 1** Single-query index recommendation APIs - -| Function Name | Parameter | Description | -| :-------------- | :------------------- | :----------------------------------------------------------- | -| gs_index_advise | SQL statement string | Generates a recommendation index for a single query statement. | - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - This function supports only a single SELECT statement and does not support other types of SQL statements. -> - Column-store tables, segment-paged tables, common views, materialized views, global temporary tables, and encrypted databases are not supported. - -
- -## Application Scenarios - -Use the preceding function to obtain the recommendation index generated for the query. The recommendation result consists of the table name and column name of the index. - -For example: - -```sql -mogdb=> select "table", "column" from gs_index_advise('SELECT c_discount from bmsql_customer where c_w_id = 10'); - table | column -----------------+---------- - bmsql_customer | (c_w_id) -(1 row) -``` - -The preceding information indicates that an index should be created on the **c_w_id** column of the **bmsql_customer** table. You can run the following SQL statement to create an index: - -```sql -CREATE INDEX idx on bmsql_customer(c_w_id); -``` - -Some SQL statements may also be recommended to create a join index, for example: - -```sql -MogDB=# select "table", "column" from gs_index_advise('select name, age, sex from t1 where age >= 18 and age < 35 and sex = ''f'';'); - table | column --------+------------ - t1 | (age, sex) -(1 row) -``` - -The preceding statement indicates that a join index **(age, sex)** needs to be created in the **t1** table. You can run the following command to create a join index: - -```sql -CREATE INDEX idx1 on t1(age, sex); -``` - -You can recommend specific index types for partitioned tables. For example: - -```sql -MogDB=# select "table", "column", "indextype" from gs_index_advise('select name, age, sex from range_table where age = 20;'); - table | column | indextype --------+--------+----------- - t1 | age | global -(1 row) -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Parameters of the system function **gs_index_advise()** are of the text type. If the parameters contain special characters such as single quotation marks ('), you can use single quotation marks (') to escape the special characters. For details, see the preceding example. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/virtual-index.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/virtual-index.md deleted file mode 100644 index e248174e..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/virtual-index.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: Virtual Index -summary: Virtual Index -author: Guo Huan -date: 2021-05-19 ---- - -# Virtual Index - -The virtual index function allows users to directly perform operations in the database. This function simulates the creation of a real index to avoid the time and space overhead required for creating a real index. Based on the virtual index, users can evaluate the impact of the index on the specified query statement by using the optimizer. - -This function involves the following APIs: - -**Table 1** Virtual index function APIs - -| Function Name | Parameter | Description | -| :------------------- | :------------------------------------------------------ | :----------------------------------------------------------- | -| hypopg_create_index | Character string of the statement for creating an index | Creates a virtual index. | -| hypopg_display_index | None | Displays information about all created virtual indexes. | -| hypopg_drop_index | OID of the index | Deletes a specified virtual index. | -| hypopg_reset_index | None | Clears all virtual indexes. | -| hypopg_estimate_size | OID of the index | Estimates the space required for creating a specified index. | - -This function involves the following GUC parameters: - -**Table 2** GUC parameters of the virtual index function - -| Parameter | Description | Default Value | -| :---------------- | :-------------------------------------------- | :------------ | -| enable_hypo_index | Whether to enable the virtual index function. | off | - -
- -## Procedure - -1. Use the **hypopg_create_index** function to create a virtual index. For example: - - ```sql - mogdb=> select * from hypopg_create_index('create index on bmsql_customer(c_w_id)'); - indexrelid | indexname - ------------+------------------------------------- - 329726 | <329726>btree_bmsql_customer_c_w_id - (1 row) - ``` - -2. Enable the GUC parameter **enable_hypo_index**. This parameter controls whether the database optimizer considers the created virtual index when executing the EXPLAIN statement. By executing EXPLAIN on a specific query statement, you can evaluate whether the index can improve the execution efficiency of the query statement based on the execution plan provided by the optimizer. For example: - - ```sql - mogdb=> set enable_hypo_index = on; - SET - ``` - - Before enabling the GUC parameter, run **EXPLAIN** and the query statement. - - ```sql - mogdb=> explain SELECT c_discount from bmsql_customer where c_w_id = 10; - QUERY PLAN - ---------------------------------------------------------------------- - Seq Scan on bmsql_customer (cost=0.00..52963.06 rows=31224 width=4) - Filter: (c_w_id = 10) - (2 rows) - ``` - - After enabling the GUC parameter, run **EXPLAIN** and the query statement. - - ```sql - mogdb=> explain SELECT c_discount from bmsql_customer where c_w_id = 10; - QUERY PLAN - ------------------------------------------------------------------------------------------------------------------ - [Bypass] - Index Scan using <329726>btree_bmsql_customer_c_w_id on bmsql_customer (cost=0.00..39678.69 rows=31224 width=4) - Index Cond: (c_w_id = 10) - (3 rows) - ``` - - By comparing the two execution plans, you can find that the index may reduce the execution cost of the specified query statement. Then, you can consider creating a real index. - -3. (Optional) Use the **hypopg_display_index** function to display all created virtual indexes. For example: - - ```sql - mogdb=> select * from hypopg_display_index(); - indexname | indexrelid | table | column - --------------------------------------------+------------+----------------+------------------ - <329726>btree_bmsql_customer_c_w_id | 329726 | bmsql_customer | (c_w_id) - <329729>btree_bmsql_customer_c_d_id_c_w_id | 329729 | bmsql_customer | (c_d_id, c_w_id) - (2 rows) - ``` - -4. (Optional) Use the **hypopg_estimate_size** function to estimate the space (in bytes) required for creating a virtual index. For example: - - ```sql - mogdb=> select * from hypopg_estimate_size(329730); - hypopg_estimate_size - ---------------------- - 15687680 - (1 row) - ``` - -5. Delete the virtual index. - - Use the **hypopg_drop_index** function to delete the virtual index of a specified OID. For example: - - ```sql - mogdb=> select * from hypopg_drop_index(329726); - hypopg_drop_index - ------------------- - t - (1 row) - ``` - - Use the **hypopg_reset_index** function to clear all created virtual indexes at a time. For example: - - ```sql - mogdb=> select * from hypopg_reset_index(); - hypopg_reset_index - -------------------- - - (1 row) - ``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - Running **EXPLAIN ANALYZE** does not involve the virtual index function. -> - The created virtual index is at the database instance level and can be shared by sessions. After a session is closed, the virtual index still exists. However, the virtual index will be cleared after the database is restarted. -> - This function does not support common views, materialized views, and column-store tables. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/workload-level-index-recommendation.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/workload-level-index-recommendation.md deleted file mode 100644 index 7d204ae7..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/workload-level-index-recommendation.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: Workload-level Index Recommendation -summary: Workload-level Index Recommendation -author: Guo Huan -date: 2021-05-19 ---- - -# Workload-level Index Recommendation - -For workload-level indexes, you can run scripts outside the database to use this function. This function uses the workload of multiple DML statements as the input to generate a batch of indexes that can optimize the overall workload execution performance. In addition, it provides the function of extracting service data SQL statements from logs. - -## Prerequisites - -- The database is normal, and the client can be connected properly. -- The **gsql** tool has been installed by the current user, and the tool path has been added to the _PATH_environment variable. - -## Service Data Extraction - -### SQL in Logs - -1. Set the GUC parameters. - log_min_duration_statement = 0 - log_statement= 'all' - -2. Run the following command to extract SQL statements based on logs: - - ``` - gs_dbmind component extract_log [l LOG_DIRECTORY] [f OUTPUT_FILE] [p LOG_LINE_PREFIX] [-d DATABASE] [-U USERNAME][--start_time] [--sql_amount] [--statement] [--json] [--max_reserved_period] [--max_template_num] - ``` - - The input parameters are as follows: - - - **LOG_DIRECTORY**: directory for storing **pg_log**. - - **OUTPUT_PATH**: path for storing the output SQL statements, that is, path for storing the extracted service data. - - **LOG_LINE_PREFIX**: specifies the prefix format of each log. - - **DATABASE** (optional): database name. If this parameter is not specified, all databases are selected by default. - - **USERNAME** (optional): username. If this parameter is not specified, all users are selected by default. - - **start_time** (optional): start time for log collection. If this parameter is not specified, all files are collected by default. - - **sql_amount** (optional): maximum number of SQL statements to be collected. If this parameter is not specified, all SQL statements are collected by default. - - **statement** (optional): Collects the SQL statements starting with **statement** in **pg_log log**. If this parameter is not specified, the SQL statements are not collected by default. - - **json** (optional): specifies that the collected log files are stored in JSON format after SQL normalization. If no format is specified, each SQL statement occupies a line. - - **max_reserved_period** (optional): specifies the maximum number of days of reserving the template in incremental log collection in JSON mode. If this parameter is not specified, the template is reserved by default. The unit is day. - - **max_template_num** (optional): Specifies the maximum number of templates that can be reserved in JSON mode. If this parameter is not specified, all templates are reserved by default. - - An example is provided as follows. - - ``` - gs_dbmind component extract_log $GAUSSLOG/pg_log/dn_6001 sql_log.txt '%m %c %d %p %a %x %n %e' -d postgres -U omm --start_time '2021-07-06 00:00:00' --statement - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If the **-d/-U** parameter is specified, the prefix of each log record must contain **%d** and **%u**. If transactions need to be extracted, **%p** must be specified. For details, see the **log_line_prefix** parameter. It is recommended that the value of **max_template_num** be less than or equal to **5000** to avoid long execution time of workload indexes. - > - -3. Change the GUC parameter values set in [1](#1) to the values before the setting. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > After service data extraction is completed, restore the GUC parameters to the default configurations. Otherwise, it may lead to log file bloating. - -### SQL in System Tables - -To enable this function, run the following command: - -``` -echo PASSWORD | gs_dbmind component fetch_statement [DB_PORT] [DATABASE] [OUTPUT] [--db-host DB_HOST] [-U DB_USER] [--schema SCHEMA] -[--statement-type {asp,slow,history,activity}] [--start-time STAET_TIME] [--end-time END_TIME] [--verify] [--driver] -``` - -The input parameters are as follows: - -- **DB_PORT**: port number of the connected database. -- **DATABASE**: name of the connected database. -- **OUTPUT**: output file including SQLs. -- **DB_HOST** (optional): ID of the host that connects to the database. -- **DB_USER** (optional): username for connecting to the database. The user needs to have the sysadmin or monitor admin permission. -- **SCHEMA**: schema name. It is used only when **statement-type** is set to **history**. The default value is **public**. -- **statement-type**: SQL statement type, including **asp**, **slow**, **history**, and **activity**. - - **asp**: SQL extracted from gs_asp needs to make **enable_asp** enabled. - - **slow**: current active slow SQLs are extracted. - - **history**: historical slow SQLs are extracted. - - **activity**: current active SQLs are extracted. -- **START_TIME**: start time for log collection. It is used only when **statement-type** is set to **asp** and the parameter is mandatory. -- **END_TIME**: end time for log collection. It is used only when **statement-type** is set to **asp** and the parameter is mandatory. -- **verify**: whether to verify the SQL validity. -- **driver**: whether to use a Python driver to connect a database. The default value is **gsql**. - -## Procedure for Using the Index Recommendation Script - -1. Prepare a file that contains multiple DML statements as the input workload. Each statement in the file occupies a line. You can obtain historical service statements from the offline logs of the database. - -2. To enable this function, run the following command: - - ``` - echo PASSWORD | gs_dbmind component index_advisor [p DB_PORT] [d DATABASE] [f FILE] [--h DB_HOST] [-U DB_USER] [--schema SCHEMA] - [--max_index_num MAX_INDEX_NUM][--max_index_storage MAX_INDEX_STORAGE] [--multi_iter_mode] [--max-n-distinct MAX_N_DISTINCT] - [--min-improved-rate MIN_IMPROVED_RATE] [--max-candidate-columns MAX_CANDIDATE_COLUMNS] [--max-index-columns MAX_INDEX_COLUMNS] - [--min-reltuples MIN_RELTUPLES] [--multi_node] [--json] [--driver] [--show_detail] [--show-benifits] - ``` - - The input parameters are as follows: - - - **DB_PORT**: port number of the connected database. - - **DATABASE**: name of the connected database. - - **FILE**: file path that contains the workload statement. - - **DB_HOST** (optional): ID of the host that connects to the database. - - **DB_USERNAME** (optional): username for connecting to the database. - - **SCHEMA**: schema name. - - **MAX_INDEX_NUM** (optional): maximum number of recommended indexes. - - **MAX_INDEX_STORAGE** (optional): maximum size of the index set space. - - **MAX_N_DISTINCT**: reciprocal value of the number for the distinct value. The default value is **0.01**. - - **MIN_IMPROVED_RATE**: minimum improved rate. The default value is **0.1**. - - **MAX_CANDIDATE_COLUMNS** (optional): maximum number of candidate index columns. - - **MAX_INDEX_COLUMNS**: maximum number of index columns. The default value is **4**. - - **MIN_RELTUPLES**: minimum number of records. The default value is **10000**. - - **multi_node** (optional): specifies whether the current instance is a distributed database instance. - - **multi_iter_mode** (optional): algorithm mode. You can switch the algorithm mode by setting this parameter. - - **json** (optional): specifies the file path format of the workload statement as JSON after SQL normalization. By default, each SQL statement occupies one line. - - **driver** (optional): specifies whether to use the Python driver to connect to the database. By default, **gsql** is used for the connection. - - **show_detail** (optional): specifies whether to display the detailed optimization information about the current recommended index set. - - **show-benefits** (optional): whether to show index benefits. - - The recommendation result is a batch of indexes, which are displayed on the screen in the format of multiple create index statements. The following is an example of the result. - - ``` - create index ind0 on public.bmsql_stock(s_i_id,s_w_id); - create index ind1 on public.bmsql_customer(c_w_id,c_id,c_d_id); - create index ind2 on public.bmsql_order_line(ol_w_id,ol_o_id,ol_d_id); - create index ind3 on public.bmsql_item(i_id); - create index ind4 on public.bmsql_oorder(o_w_id,o_id,o_d_id); - create index ind5 on public.bmsql_new_order(no_w_id,no_d_id,no_o_id); - create index ind6 on public.bmsql_customer(c_w_id,c_d_id,c_last,c_first); - create index ind7 on public.bmsql_new_order(no_w_id); - create index ind8 on public.bmsql_oorder(o_w_id,o_c_id,o_d_id); - create index ind9 on public.bmsql_district(d_w_id); - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-command-reference.md deleted file mode 100644 index a6b1931c..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-command-reference.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-05-06 ---- - -# Command Reference - -**Table 1** gs_dbmind component slow_query_diagnosis parameters - -| Parameter | Description | Value Range | -| :--------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| -h, --help | Help information | - | -| action | Action parameter | - **show**: displays results.
- **clean**: clears results.
- **diagnosis**: interactive diagnosis. | -| -c,--conf | Configuration directory | - | -| --query | Slow SQL text | * | -| --start-time | Timestamp of the start time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| --end-time | Timestamp of the end time, in milliseconds. Alternatively, the date and time format is %Y-%m-%d %H:%M:%S. | Positive integer or date and time format | -| --retention-days | Number of days retaining results | Non-negative real number | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-environment-deployment.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-environment-deployment.md deleted file mode 100644 index c9c04efc..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-environment-deployment.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Environment Deployment -summary: Environment Deployment -author: Guo Huan -date: 2022-05-06 ---- - -# Environment Deployment - -- The database is working properly. -- The metric collection system is running properly. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-obtaining-help-information.md deleted file mode 100644 index fb76fac7..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-obtaining-help-information.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-05-06 ---- - -# Obtaining Help Information - -You can run the **–help** command to obtain the help information. For example: - -``` -gs_dbmind component slow_query_diagnosis --help -``` - -``` -usage: [-h] -c DIRECTORY [--query SLOW_QUERY] - [--start-time TIMESTAMP_IN_MICROSECONDS] - [--end-time TIMESTAMP_IN_MICROSECONDS] [--retention-days DAYS] - {show,clean} - -Slow Query Diagnosis: Analyse the root cause of slow query - -positional arguments: - {show,clean} choose a functionality to perform - -optional arguments: - -h, --help show this help message and exit - -c DIRECTORY, --conf DIRECTORY - set the directory of configuration files - --query SLOW_QUERY set a slow query you want to retrieve - --start-time TIMESTAMP_IN_MICROSECONDS - set the start time of a slow SQL diagnosis result to - be retrieved - --end-time TIMESTAMP_IN_MICROSECONDS - set the end time of a slow SQL diagnosis result to be - retrieved - --retention-days DAYS - clear historical diagnosis results and set the maximum - number of days to retain data -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-overview.md deleted file mode 100644 index 0a7c9571..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-05-06 ---- - -# Overview - -Slow SQL statements have always been a pain point in data O&M. How to effectively diagnose the root causes of slow SQL statements is a big challenge. Based on the characteristics of MogDB and the slow SQL diagnosis experience of DBAs on the live network, this tool supports more than 25 root causes of slow SQL statements, outputs multiple root causes based on the possibility, and provides specific solutions. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-troubleshooting.md deleted file mode 100644 index 8f80a70f..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-troubleshooting.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-05-06 ---- - -# Troubleshooting - -- If you run the interactive diagnosis command for a slow SQL statement that has not been executed, no diagnosis result is provided. -- If the exporter metric collection function is not enabled, the slow SQL diagnosis function is not available. -- After the parameters in the configuration file are reset, you need to restart the service process for the settings to take effect. -- When the interactive diagnosis function of slow SQL statements is used, the tool obtains necessary data based on the RPC and data collection services. Therefore, if the RPC and data collection services are not started, the diagnosis cannot be performed. -- When the diagnosis function is used for interactive diagnosis, the tool checks the entered SQL and database. If the entered SQL and database are invalid, the diagnosis cannot be performed. -- During slow SQL diagnosis, SMALL\_SHARED\_BUFFER needs to collect column information of related tables. Therefore, ensure that the opengauss\_exporter connection user has the permission on the schema to which the table belongs. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-usage-guide.md deleted file mode 100644 index 7eff71a5..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements-usage-guide.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-05-06 ---- - -# Usage Guide - -Assume that the **confpath** configuration file directory has been initialized. - -- Run the following command to start only the slow SQL diagnosis function and output the top 3 root causes (for details, see the description of the **service** subcommand): - - ``` - gs_dbmind service start -c confpath --only-run slow_query_diagnosis - ``` - -- Run the following command to diagnose slow SQL statements in interactive mode: - - ``` - gs_dbmind component slow_query_diagnosis show -c confpath --query SQL --start-time timestamps0 --end-time timestamps1 - ``` - -- Run the following command to manually clear historical prediction results: - - ``` - gs_dbmind component slow_query_diagnosis clean -c confpath --retention-days DAYS - ``` - -- Run the following command to stop the services that have been started: - - ``` - gs_dbmind service stop -c confpath - ``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements.md deleted file mode 100644 index cc4370a8..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Slow Query Diagnosis -summary: Slow Query Diagnosis -author: zhang cuiping -date: 2023-04-07 ---- - -# Slow Query Diagnosis: Root Cause Analysis for Slow SQL Statements - -- **[Overview](slow-sql-statements-overview.md)** -- **[Environment Deployment](slow-sql-statements-environment-deployment.md)** -- **[Usage Guide](slow-sql-statements-usage-guide.md)** -- **[Obtaining Help Information](slow-sql-statements-obtaining-help-information.md)** -- **[Command Reference](slow-sql-statements-command-reference.md)** -- **[Troubleshooting](slow-sql-statements-troubleshooting.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-command-reference.md deleted file mode 100644 index 899e4191..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-command-reference.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-10-24 ---- - -# Command Reference - -**Table 1** Command line parameters - -| Parameter | Definition | -| :-------- | :------------------------------------------------------- | -| db_port | Database port number | -| database | Database name | -| file | Path of the file that contains multiple query statements | -| db-host | (Optional) Database host ID | -| db-user | (Optional) Database user name | -| schema | (Optional, public schema) Schema | diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-obtaining-help-information.md deleted file mode 100644 index a5668891..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-obtaining-help-information.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-10-24 ---- - -# Obtaining Help Information - -Before using the SQL Rewriter tool, run the following command to obtain help information: - -```shell -gs_dbmind component sql_rewriter --help -``` - -The following information is displayed: - -```shell -usage: [-h] [--db-host DB_HOST] [--db-user DB_USER] [--schema SCHEMA] - db_port database file - -SQL Rewriter - -positional arguments: - db_port Port for database - database Name for database - file File containing SQL statements which need to rewrite - -optional arguments: - -h, --help show this help message and exit - --db-host DB_HOST Host for database - --db-user DB_USER Username for database log-in - --schema SCHEMA Schema name for the current business data -``` - -Passwords are entered through pipes or in interactive mode. For password-free users, any input can pass the verification. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-overview.md deleted file mode 100644 index ff2cdb48..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-overview.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-10-24 ---- - -# Overview - -SQL Rewriter is an SQL rewriting tool. It converts query statements into more efficient or standard forms based on preset rules to improve query efficiency. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - This function does not apply to statements that contain subqueries. -> - This function supports only the SELECT and DELETE statements for deleting the entire table. -> - This function contains 11 rewriting rules. Statements that do not comply with the rewriting rules are not processed. -> - This function displays original query statements and rewritten statements on the screen. You are not advised to rewrite SQL statements that contain sensitive information. -> - The rule for converting UNION to UNION ALL avoids deduplication and improves the query performance. The obtained result may be redundant. -> - If a statement contains ORDER BY + specified column name or GROUP BY + specified column name, the SelfJoin rule is not applicable. -> - The tool does not ensure equivalent conversion of query statements. The purpose is to improve the efficiency of query statements. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-troubleshooting.md deleted file mode 100644 index d78caa09..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-troubleshooting.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-10-24 ---- - -# Troubleshooting - -- If the SQL statement cannot be rewritten, check whether the SQL statement complies with the rewriting rule or whether the SQL syntax is correct. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-usage-guide.md deleted file mode 100644 index 3f6fe167..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting-usage-guide.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-10-24 ---- - -# Usage Guide - -## Prerequisites - -The database and connection are normal. - -## Example - -Use the **tpcc** database as an example: - -```shell -gs_dbmind component sql_rewriter 5030 tpcc queries.sql --db-host 127.0.0.1 --db-user myname --schema public -``` - -**queries.sql** is the SQL statement to be modified. The content is as follows: - -```sql -select cfg_name from bmsql_config group by cfg_name having cfg_name='1'; -delete from bmsql_config; -delete from bmsql_config where cfg_name='1'; -``` - -The result is multiple rewritten query statements, which are displayed on the screen (the statements that cannot be rewritten are displayed as null), as shown in the following. - -```shell -+--------------------------------------------------------------------------+------------------------------+ -| Raw SQL | Rewritten SQL | -+--------------------------------------------------------------------------+------------------------------+ -| select cfg_name from bmsql_config group by cfg_name having cfg_name='1'; | SELECT cfg_name | -| | FROM bmsql_config | -| | WHERE cfg_name = '1'; | -| delete from bmsql_config; | TRUNCATE TABLE bmsql_config; | -| delete from bmsql_config where cfg_name='1'; | | -+--------------------------------------------------------------------------+------------------------------+ -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting.md deleted file mode 100644 index 7fa87934..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sql-rewriter-sql-statement-rewriting/sql-rewriter-sql-statement-rewriting.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: SQL Rewriter -summary: SQL Rewriter -author: zhang cuiping -date: 2023-04-07 ---- - -# SQL Rewriter: SQL Statement Rewriting - -- **[Overview](sql-rewriter-sql-statement-rewriting-overview.md)** -- **[Usage Guide](sql-rewriter-sql-statement-rewriting-usage-guide.md)** -- **[Obtaining Help Information](sql-rewriter-sql-statement-rewriting-obtaining-help-information.md)** -- **[Command Reference](sql-rewriter-sql-statement-rewriting-command-reference.md)** -- **[Troubleshooting](sql-rewriter-sql-statement-rewriting-troubleshooting.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-command-reference.md deleted file mode 100644 index 533a05e9..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-command-reference.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2021-05-19 ---- - -# Command Reference - -**Table 1** Command-line options - -| Parameter | Description | Value Range | -| :-------------- | :----------------------------------- | :------------ | -| -f | Training or prediction file location | N/A | -| –predicted-file | Prediction result location | N/A | -| –model | Model selection | template, dnn | -| –model-path | Location of the training model | N/A | diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-obtaining-help-information.md deleted file mode 100644 index b863b043..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-obtaining-help-information.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2021-05-19 ---- - -# Obtaining Help Information - -Before using the SQLdiag tool, run the following command to obtain help information: - -``` -gs_dbmind component sqldiag --help -``` - -The command output is as follows: - -``` -usage: [-h] [-f CSV_FILE] [--predicted-file PREDICTED_FILE] - [--model {template,dnn}] --model-path MODEL_PATH - [--config-file CONFIG_FILE] - {train,predict,finetune} - -SQLdiag integrated by MogDB. - -positional arguments: - {train,predict,finetune} - The training mode is to perform feature extraction and - model training based on historical SQL statements. The - prediction mode is to predict the execution time of a - new SQL statement through the trained model. - -optional arguments: - -h, --help show this help message and exit - -f CSV_FILE, --csv-file CSV_FILE - The data set for training or prediction. The file - format is CSV. If it is two columns, the format is - (SQL statement, duration time). If it is three - columns, the format is (timestamp of SQL statement - execution time, SQL statement, duration time). - --predicted-file PREDICTED_FILE - The file path to save the predicted result. - --model {template,dnn} - Choose the model model to use. - --model-path MODEL_PATH - The storage path of the model file, used to read or - save the model file. - --config-file CONFIG_FILE -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-overview.md deleted file mode 100644 index 4a79133b..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-overview.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: SQLdiag Slow SQL Discovery -summary: SQLdiag Slow SQL Discovery -author: Guo Huan -date: 2021-05-19 ---- - -# SQLdiag Slow SQL Discovery - -SQLdiag is a framework for predicting the execution duration of SQL statements in MogDB. The existing prediction technologies are mainly based on model prediction of execution plans. These prediction solutions are applicable only to jobs whose execution plans can be obtained in the OLAP scenarios, and are not useful for quick query such as OLTP or HTAP. Different from the preceding solutions, SQLdiag focuses on the historical SQL statements of databases. Because the execution duration of the database SQL statements in a short time does not vary greatly, SQLdiag can detect instruction sets similar to the entered instructions from the historical data, and predict the SQL statement execution duration based on the SQL vectorization technology and the time series prediction algorithm. This framework has the following benefits: - -1. Execution plans do not require instructions. This has no impact on database performance. -2. The framework is widely used, unlike many other well-targeted algorithms in the industry, for example, they may applicable only to OLTP or OLAP. -3. The framework is robust and easy to understand. Users can design their own prediction models by simply modifying the framework. - -SQLdiag is an SQL statement execution time prediction tool. It predicts the execution time of SQL statements based on the statement logic similarity and historical execution records without obtaining the SQL statement execution plan using a template or deep learning. Abnormal SQL statements can also be detected with this tool. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-troubleshooting.md deleted file mode 100644 index 442f8d1d..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-troubleshooting.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2021-05-19 ---- - -# Troubleshooting - -- Failure in the training scenario: Check whether the file path of historical logs is correct and whether the file format meets the requirements. -- Failure in the prediction scenario: Check whether the model path is correct. Ensure that the format of the load file to be predicted is correct. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-usage-guide.md deleted file mode 100644 index 74336594..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery-usage-guide.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2021-10-21 ---- - -# Usage Guide - -## Prerequisites - -- You have obtained training data. -- If you use the provided tool to collect training data, you need to enable the WDR function. The involved parameters are **track_stmt_stat_level** and **log_min_duration_statement**. For details, see the following sections. -- To ensure the prediction accuracy, the historical statement logs provided by users should be as comprehensive and representative as possible. - -## Collecting SQL Statements - -This tool requires users to prepare data in advance. Each sample is separated by a newline character. The training data format is as follows: - -``` -SQL,EXECUTION_TIME -``` - -The prediction data format is as follows: - -``` -SQL -``` - -**SQL** indicates the text of an SQL statement, and **EXECUTION_TIME** indicates the execution time of the SQL statement. For details about the sample data, see **train.csv** and **predict.csv** in **sample_data**. - -You can collect training data in the required format. The tool also provides the **load_sql_from_rd** script for automatic collection. The script obtains SQL information based on the WDR report. The involved parameters are **log_min_duration_statement** and **track_stmt_stat_level**: - -- **log_min_duration_statement** indicates the slow SQL threshold. If the value is **0**, full collection is performed. The unit is millisecond. -- **track_stmt_stat_level** indicates the information capture level. You are advised to set it to **'L0,L0'**. - -After this parameter is set, a certain amount of system resources may be occupied but the usage is generally low. In continuous high-concurrency scenarios, this may cause a performance loss less than 5%. If the database concurrency is low, the performance loss can be ignored. The following script is stored in the sqldiag root directory (*$GAUSSHOME***/bin/components/sqldiag**). - -``` -Use a script to obtain the training set: -load_sql_from_wdr.py [-h] --port PORT --start_time START_TIME - --finish_time FINISH_TIME [--save_path SAVE_PATH] -Example: - python load_sql_from_wdr.py --start_time "2021-04-25 00:00:00" --finish_time "2021-04-26 14:00:00" --port 5432 --save_path ./data.csv -``` - -## Procedure - -1. Provide historical logs for model training. - -2. Perform training and prediction. - - ``` - Template-based training and prediction: - gs_dbmind component sqldiag [train, predict] -f FILE --model template --model-path template_model_path - DNN-based training and prediction: - gs_dbmind component sqldiag [train, predict] -f FILE --model dnn --model-path dnn_model_path - ``` - -## Examples - -Use the provided test data to perform template-based training: - -``` -gs_dbmind component sqldiag train -f ./sample_data/train.csv --model template --model-path ./template -``` - -Use the provided test data for template-based prediction: - -``` -gs_dbmind component sqldiag predict -f ./sample_data/predict.csv --model template --model-path ./template --predicted-file ./result/t_result -``` - -Use the provided test data to update the template-based model: - -``` -gs_dbmind component sqldiag finetune -f ./sample_data/train.csv --model template --model-path ./template -``` - -Use the provided test data to perform DNN-based training: - -``` -gs_dbmind component sqldiag train -f ./sample_data/train.csv --model dnn --model-path ./dnn_model -``` - -Use the provided test data for DNN-based prediction: - -``` -gs_dbmind component sqldiag predict -f ./sample_data/predict.csv --model dnn --model-path ./dnn_model --predicted-file -``` - -Use the provided test data to update the DNN-based model: - -``` -gs_dbmind component sqldiag finetune -f ./sample_data/train.csv --model dnn --model-path ./dnn_model -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery.md deleted file mode 100644 index 5178d707..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: SQLdiag -summary: SQLdiag -author: zhang cuiping -date: 2023-04-07 ---- - -# SQLdiag: Slow SQL Discovery - -SQLdiag is a framework for predicting the execution duration of SQL statements in openGauss. The existing prediction technologies are mainly based on model prediction of execution plans. These prediction solutions are applicable only to jobs whose execution plans can be obtained in the OLAP scenarios, and are not useful for quick query such as OLTP or HTAP. Different from the preceding solutions, SQLdiag focuses on the historical SQL statements of databases. Because the execution duration of the database SQL statements in a short time does not vary greatly, SQLdiag can detect instruction sets similar to the entered instructions from the historical data, and predict the SQL statement execution duration based on the SQL vectorization technology and the time series prediction algorithm. This framework has the following benefits: - -1. Execution plans do not require instructions. This has no impact on database performance. -2. The framework is widely used, unlike many other well-targeted algorithms in the industry, for example, they may applicable only to OLTP or OLAP. -3. The framework is robust and easy to understand. Users can design their own prediction models by simply modifying the framework. - -The typical application scenario of this tool is to detect a batch of SQL statements to be online so as to identify risks in advance. - -- **[Overview](sqldiag-slow-sql-discovery-overview.md)** -- **[Usage Guide](sqldiag-slow-sql-discovery-usage-guide.md)** -- **[Obtaining Help Information](sqldiag-slow-sql-discovery-obtaining-help-information.md)** -- **[Command Reference](sqldiag-slow-sql-discovery-command-reference.md)** -- **[Troubleshooting](sqldiag-slow-sql-discovery-troubleshooting.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-command-reference.md deleted file mode 100644 index b91dcea4..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-command-reference.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2021-05-19 ---- - -# Command Reference - -**Table 1** Command-line Parameter - -| Parameter | Description | Value Range | -| :--------------------- | :----------------------------------------------------------- | :--------------------- | -| mode | Specifies the running mode of the tuning program. | train, tune, recommend | -| -tuner-config-file, -x | Path of the core parameter configuration file of X-Tuner. The default path is **xtuner.conf** under the installation directory. | - | -| -db-config-file, -f | Path of the connection information configuration file used by the optimization program to log in to the database host. If the database connection information is configured in this file, the following database connection information can be omitted. | - | -| -db-name | Specifies the name of a database to be tuned. | - | -| -db-user | Specifies the user account used to log in to the tuned database. | - | -| -port | Specifies the database listening port. | - | -| -host | Specifies the host IP address of the database instance. | - | -| -host-user | Specifies the username for logging in to the host where the database instance is located. The database O&M tools, such as **gsql** and **gs_ctl**, can be found in the environment variables of the username. | - | -| -host-ssh-port | Specifies the SSH port number of the host where the database instance is located. This parameter is optional. The default value is **22**. | - | -| -help, -h | Returns the help information. | - | -| -version, -v | Returns the current tool version. | - | - -**Table 2** Parameters in the configuration file - -| Parameter | Description | Value Range | -| :-------------------- | :----------------- | :------------------- | -| logfile | Path for storing generated logs. | - | -| output_tuning_result | (Optional) Specifies the path for saving the tuning result. | - | -| verbose | Whether to print details. | on, off | -| recorder_file | Path for storing logs that record intermediate tuning information. | - | -| tune_strategy | Specifies a strategy used in tune mode. | rl, gop, auto | -| drop_cache | Whether to perform drop cache in each iteration. Drop cache can make the benchmark score more stable. If this parameter is enabled, add the login system user to the **/etc/sudoers** list and grant the NOPASSWD permission to the user. (You are advised to enable the NOPASSWD permission temporarily and disable it after the tuning is complete.) | on, off | -| used_mem_penalty_term | Penalty coefficient of the total memory used by the database. This parameter is used to prevent performance deterioration caused by unlimited memory usage. The greater the value is, the greater the penalty is. | Recommended value: 0 ~ 1 | -| rl_algorithm | Specifies the RL algorithm. | ddpg | -| rl_model_path | Path for saving or reading the RL model, including the save directory name and file name prefix. In train mode, this path is used to save the model. In tune mode, this path is used to read the model file. | - | -| rl_steps | Number of training steps of the deep reinforcement learning algorithm | - | -| max_episode_steps | Maximum number of training steps in each episode | - | -| test_episode | Number of episodes when the RL algorithm is used for optimization | - | -| gop_algorithm | Specifies a global optimization algorithm. | bayes, pso, auto | -| max_iterations | Maximum number of iterations of the global search algorithm. (The value is not fixed. Multiple iterations may be performed based on the actual requirements.) | - | -| particle_nums | Number of particles when the PSO algorithm is used | - | -| benchmark_script | Benchmark driver script. This parameter specifies the file with the same name in the benchmark path to be loaded. Typical benchmarks, such as TPC-C and TPC-H, are supported by default. | tpcc, tpch, tpcds, sysbench … | -| benchmark_path | Path for saving the benchmark script. If this parameter is not configured, the configuration in the benchmark drive script is used. | - | -| benchmark_cmd | Command for starting the benchmark script. If this parameter is not configured, the configuration in the benchmark drive script is used. | - | -| benchmark_period | This parameter is valid only for **period benchmark**. It indicates the test period of the entire benchmark. The unit is second. | - | -| scenario | Type of the workload specified by the user. | tp, ap, htap | -| tuning_list | List of parameters to be tuned. For details, see the **share/knobs.json.template** file. | - | diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-examples.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-examples.md deleted file mode 100644 index eb207db5..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-examples.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: Examples -summary: Examples -author: Guo Huan -date: 2021-05-19 ---- - -# Examples - -X-Tuner supports three modes: recommend mode for obtaining parameter diagnosis reports, train mode for training reinforcement learning models, and tune mode for using optimization algorithms. The preceding three modes are distinguished by command line parameters, and the details are specified in the configuration file. - -## Configuring the Database Connection Information - -Configuration items for connecting to a database in the three modes are the same. You can enter the detailed connection information in the command line or in the JSON configuration file. Both methods are described as follows: - -1. Entering the connection information in the command line - - Input the following options: **-db-name -db-user -port -host -host-user**. The **-host-ssh-port** is optional. The following is an example: - - ``` - gs_xtuner recommend --db-name postgres --db-user omm --port 5678 --host 192.168.1.100 --host-user omm - ``` - -2. Entering the connection information in the JSON configuration file - - Assume that the file name is **connection.json**. The following is an example of the JSON configuration file: - - ``` - { - "db_name": "postgres", # Database name - "db_user": "dba", # Username for logging in to the database - "host": "127.0.0.1", # IP address of the database host - "host_user": "dba", # Username for logging in to the database host - "port": 5432, # Listening port number of the database - "ssh_port": 22 # SSH listening port number of the database host - } - ``` - - Input **-f connection.json**. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** To prevent password leakage, the configuration file and command line parameters do not contain password information by default. After you enter the preceding connection information, the program prompts you to enter the database password and the OS login password in interactive mode. - -## Example of Using recommend Mode - -The configuration item **scenario** takes effect for recommend mode. If the value is **auto**, the workload type is automatically detected. - -Run the following command to obtain the diagnosis result: - -``` - -gs_xtuner recommend -f connection.json - -``` - -The diagnosis report is generated as follows: - -**Figure 1** Report generated in recommend mode - -![report-generated-in-recommend-mode](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/examples-1.png) - -In the preceding report, the database parameter configuration in the environment is recommended, and a risk warning is provided. The report also generates the current workload features. The following features are for reference: - -- **temp_file_size**: number of generated temporary files. If the value is greater than 0, the system uses temporary files. If too many temporary files are used, the performance is poor. If possible, increase the value of **work_mem**. -- **cache_hit_rate**: cache hit ratio of **shared_buffer**, indicating the cache efficiency of the current workload. -- **read_write_ratio**: read/write ratio of database jobs. -- **search_modify_ratio**: ratio of data query to data modification of a database job. -- **ap_index**: AP index of the current workload. The value ranges from 0 to 10. A larger value indicates a higher preference for data analysis and retrieval. -- **workload_type**: workload type, which can be AP, TP, or HTAP based on database statistics. -- **checkpoint_avg_sync_time**: average duration for refreshing data to the disk each time when the database is at the checkpoint, in milliseconds. -- **load_average**: average load of each CPU core in 1 minute, 5 minutes, and 15 minutes. Generally, if the value is about 1, the current hardware matches the workload. If the value is about 3, the current workload is heavy. If the value is greater than 5, the current workload is too heavy. In this case, you are advised to reduce the load or upgrade the hardware. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Some system catalogs keep recording statistics, which may affect load feature identification. Therefore, you are advised to clear the statistics of some system catalogs, run the workload for a period of time, and then use recommend mode for diagnosis to obtain more accurate results. To clear the statistics, run the following command: -> -> ```sql -> select pg_stat_reset_shared('bgwriter'); -> select pg_stat_reset(); -> ``` -> ->- In recommend mode, information in the **pg\_stat\_database** and **pg\_stat\_bgwriter** system catalogs in the database is read. Therefore, the database login user must have sufficient permissions. (You are advised to own the administrator permission which can be granted to *username* by running **alter user username sysadmin**.) - -## Example of Using train Mode - -This mode is used to train the deep reinforcement learning model. The configuration items related to this mode are as follows: - -- **rl_algorithm**: algorithm used to train the reinforcement learning model. Currently, this parameter can be set to **ddpg**. - -- **rl_model_path**: path for storing the reinforcement learning model generated after training. - -- **rl_steps**: maximum number of training steps in the training process. - -- **max_episode_steps**: maximum number of steps in each episode. - -- **scenario**: specifies the workload type. If the value is **auto**, the system automatically determines the workload type. The recommended parameter tuning list varies according to the mode. - -- **tuning_list**: specifies the parameters to be tuned. If this parameter is not specified, the list of parameters to be tuned is automatically recommended based on the workload type. If this parameter is specified, **tuning_list**indicates the path of the tuning list file. The following is an example of the content of a tuning list configuration file. - - ``` - { - "work_mem": { - "default": 65536, - "min": 65536, - "max": 655360, - "type": "int", - "restart": false - }, - "shared_buffers": { - "default": 32000, - "min": 16000, - "max": 64000, - "type": "int", - "restart": true - }, - "random_page_cost": { - "default": 4.0, - "min": 1.0, - "max": 4.0, - "type": "float", - "restart": false - }, - "enable_nestloop": { - "default": true, - "type": "bool", - "restart": false - } - } - ``` - -After the preceding configuration items are configured, run the following command to start the training: - -``` - -gs_xtuner train -f connection.json - -``` - -After the training is complete, a model file is generated in the directory specified by the **rl_model_path**configuration item. - -## Example of Using tune Mode - -The tune mode supports a plurality of algorithms, including a DDPG algorithm based on reinforcement learning (RL), and a Bayesian optimization algorithm and a particle swarm algorithm (PSO) which are both based on a global optimization algorithm (GOP). - -The configuration items related to tune mode are as follows: - -- **tune_strategy**: specifies the algorithm to be used for optimization. The value can be **rl**(using the reinforcement learning model), **gop**(using the global optimization algorithm), or **auto**(automatic selection). If this parameter is set to **rl**, RL-related configuration items take effect. In addition to the preceding configuration items that take effect in train mode, the **test_episode**configuration item also takes effect. This configuration item indicates the maximum number of episodes in the tuning process. This parameter directly affects the execution time of the tuning process. Generally, a larger value indicates longer time consumption. -- **gop_algorithm**: specifies a global optimization algorithm. The value can be **bayes** or **pso**. -- **max_iterations**: specifies the maximum number of iterations. A larger value indicates a longer search time and better search effect. -- **particle_nums**: specifies the number of particles. This parameter is valid only for the PSO algorithm. -- For details about **scenario** and **tuning_list**, see the description of train mode. - -After the preceding items are configured, run the following command to start tuning: - -``` - -gs_xtuner tune -f connection.json - -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** Before using tune and train modes, you need to import the data required by the benchmark, check whether the benchmark can run properly, and back up the current database parameters. To query the current database parameters, run the following command: select name, setting from pg_settings; diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-obtaining-help-information.md deleted file mode 100644 index 9536cea7..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-obtaining-help-information.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2021-05-19 ---- - -# Obtaining Help Information - -Before starting the tuning program, run the following command to obtain help information: - -```bash -gs_dbmind component xtuner --help -``` - -The command output is as follows: - -```bash -usage: [-h] [--database DATABASE] [--db-user DB_USER] [--db-port DB_PORT] [--db-host DB_HOST] [--host-user HOST_USER] [--host-ssh-port HOST_SSH_PORT] [-f DB_CONFIG_FILE] [-x TUNER_CONFIG_FILE] [-v] - {train,tune,recommend} - -X-Tuner: a self-tuning tool integrated by openGauss. - -positional arguments: - {train,tune,recommend} - Train a reinforcement learning model or tune database by model. And also can recommend best_knobs according to your workload. - -optional arguments: - -h, --help show this help message and exit - -f DB_CONFIG_FILE, --db-config-file DB_CONFIG_FILE - You can pass a path of configuration file otherwise you should enter database information by command arguments manually. Please see the template file share/server.json.template. - -x TUNER_CONFIG_FILE, --tuner-config-file TUNER_CONFIG_FILE - This is the path of the core configuration file of the X-Tuner. You can specify the path of the new configuration file. The default path is /path/to/config/file. You can modify the configuration file to control the tuning process. - -v, --version show program's version number and exit - -Database Connection Information: - --database DATABASE, --db-name DATABASE - The name of database where your workload running on. - --db-user DB_USER Use this user to login your database. Note that the user must have sufficient permissions. - --db-port DB_PORT, --port DB_PORT - Use this port to connect with the database. - --db-host DB_HOST, --host DB_HOST - The IP address of your database InstallationGuide host. - --host-user HOST_USER - The login user of your database InstallationGuide host. - --host-ssh-port HOST_SSH_PORT - The SSH port of your database InstallationGuide host. -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-overview.md deleted file mode 100644 index 17d57b3f..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2021-05-19 ---- - -# Overview - -X-Tuner is a parameter tuning tool integrated into databases. It uses AI technologies such as deep reinforcement learning and global search algorithm to obtain the optimal database parameter settings without manual intervention. This function is not necessarily deployed with the database environment. It can be independently deployed and run without the database installation environment. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-parameter-optimization-and-diagnosis.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-parameter-optimization-and-diagnosis.md deleted file mode 100644 index 0d9ec236..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-parameter-optimization-and-diagnosis.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: X-Tuner -summary: X-Tuner -author: zhang cuiping -date: 2023-04-07 ---- - -# X-Tuner: Parameter Tuning and Diagnosis - -- **[Overview](x-tuner-overview.md)** -- **[Preparations](x-tuner-preparations.md)** -- **[Examples](x-tuner-examples.md)** -- **[Obtaining Help Information](x-tuner-obtaining-help-information.md)** -- **[Command Reference](x-tuner-command-reference.md)** -- **[Troubleshooting](x-tuner-troubleshooting.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-preparations.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-preparations.md deleted file mode 100644 index 6faaeaa9..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-preparations.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Preparations -summary: Preparations -author: Guo Huan -date: 2021-10-21 ---- - -# Preparations - -
- -## Prerequisites and Precautions - -- The database status is normal; the client can be properly connected; and data can be imported to the database. As a result, the optimization program can perform the benchmark test for optimization effect. -- To use this tool, you need to specify the user who logs in to the database. The user who logs in to the database must have sufficient permissions to obtain sufficient database status information. -- If you log in to the database host as a Linux user, add **$GAUSSHOME/bin** to the **PATH** environment variable so that you can directly run database O&M tools, such as gsql, gs_guc, and gs_ctl. -- The recommended Python version is Python 3.6 or later. The required dependency has been installed in the operating environment, and the optimization program can be started properly. You can install a Python 3.6+ environment independently without setting it as a global environment variable. You are not advised to install the tool as the root user. If you install the tool as the root user and run the tool as another user, ensure that you have the read permission on the configuration file. -- This tool can run in three modes. In **tune** and **train** modes, you need to configure the benchmark running environment and import data. This tool will iteratively run the benchmark to check whether the performance is improved after the parameters are modified. -- In **recommend** mode, you are advised to run the command when the database is executing the workload to obtain more accurate real-time workload information. -- By default, this tool provides benchmark running script samples of TPC-C, TPC-H, TPC-DS, and sysbench. If you use the benchmarks to perform pressure tests on the database system, you can modify or configure the preceding configuration files. To adapt to your own service scenarios, you need to compile the script file that drives your customized benchmark based on the **template.py** file in the **benchmark** directory. - -
- -## Principles - -The tuning program is a tool independent of the database kernel. The usernames and passwords for the database and instances are required to control the benchmark performance test of the database. Before starting the tuning program, ensure that the interaction in the test environment is normal, the benchmark test script can be run properly, and the database can be connected properly. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If the parameters to be tuned include the parameters that take effect only after the database is restarted, the database will be restarted multiple times during the tuning. Exercise caution when using **train** and **tune** modes if the database is running jobs. - -X-Tuner can run in any of the following modes: - -- **recommend**: Log in to the database using the specified user name, obtain the feature information about the running workload, and generate a parameter recommendation report based on the feature information. Report improper parameter settings and potential risks in the current database. Output the currently running workload behavior and characteristics. Output the recommended parameter settings. In this mode, the database does not need to be restarted. In other modes, the database may need to be restarted repeatedly. -- **train**: Modify parameters and execute the benchmark based on the benchmark information provided by users. The reinforcement learning model is trained through repeated iteration so that you can load the model in **tune** mode for optimization. -- **tune**: Use an optimization algorithm to tune database parameters. Currently, two types of algorithms are supported: deep reinforcement learning and global search algorithm (global optimization algorithm). The deep reinforcement learning mode requires **train** mode to generate the optimized model after training. However, the global search algorithm does not need to be trained in advance and can be directly used for search and optimization. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** If the deep reinforcement learning algorithm is used in **tune** mode, a trained model must be available, and the parameters for training the model must be the same as those in the parameter list (including max and min) for tuning. - -**Figure 1** X-Tuner structure - -![x-tuner-structure](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/preparations-1.png) - -Figure 1 X-Tuner architecture shows the overall architecture of the X-Tuner. The X-Tuner system can be divided into the following parts: - -- DB: The DB_Agent module is used to abstract database instances. It can be used to obtain the internal database status information and current database parameters and set database parameters. The SSH connection used for logging in to the database environment is included on the database side. -- Algorithm: algorithm package used for optimization, including global search algorithms (such as Bayesian optimization and particle swarm optimization) and deep reinforcement learning (such as DDPG). -- X-Tuner main logic module: encapsulated by the environment module. Each step is an optimization process. The entire optimization process is iterated through multiple steps. -- benchmark: a user-specified benchmark performance test script, which is used to run benchmark jobs. The benchmark result reflects the performance of the database system. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Ensure that the larger the benchmark script score is, the better the performance is. For example, for the benchmark used to measure the overall execution duration of SQL statements, such as TPC-H, the inverse value of the overall execution duration can be used as the benchmark score. - -
- -## Installing and Running X-Tuner - -You can run the X-Tuner in two ways. One is to run the X-Tuner directly through the source code. The other is to install the X-Tuner on the system through the Python setuptools, and then run the **gs_xtuner** command to call the X-Tuner. The following describes two methods of running the X-Tuner. - -Method 1: Run the source code directly. - -1. Switch to the **xtuner** source code directory. For the openGauss community code, the path is **openGauss-server/src/gausskernel/dbmind/tools/xtuner**. For an installed database system, the source code path is *$GAUSSHOME***/bin/dbmind/xtuner**. - -2. You can view the **requirements.txt** file in the current directory. Use the pip package management tool to install the dependency based on the **requirements.txt** file. - - ```bash - pip install -r requirements.txt - ``` - -3. After the installation is successful, add the environment variable PYTHONPATH, and then run **main.py**. For example, to obtain the help information, run the following command: - - ```bash - cd tuner # Switch to the directory where the main.py entry file is located. - export PYTHONPATH='..' # Add the upper-level directory to the path for searching for packages. - python main.py --help # Obtain help information. The methods of using other functions are similar. - ``` - -Method 2: Install the X-Tuner in the system. - -1. You can use the **setup.py** file to install the X-Tuner to the system and then run the **gs_xtuner** command. You need to switch to the root directory of **xtuner**. For details about the directory location, see the preceding description. - -2. Run the following command to install the tool in the Python environment using Python setuptools: - - ```bash - python setup.py install - ``` - - If the **bin** directory of Python is added to the *PATH* environment variable, the **gs_xtuner** command can be directly called anywhere. - -3. For example, to obtain the help information, run the following command: - - ```bash - gs_xtuner --help - ``` - -
- -## Description of the X-Tuner Configuration File - -Before running the X-Tuner, you need to load the configuration file. The default path of the configuration file is tuner/xtuner.conf. You can run the **gs_xtuner -help** command to view the absolute path of the configuration file that is loaded by default. - -``` -... - -x TUNER_CONFIG_FILE, --tuner-config-file TUNER_CONFIG_FILE - This is the path of the core configuration file of the - X-Tuner. You can specify the path of the new - configuration file. The default path is /path/to/xtuner/xtuner.conf. - You can modify the configuration file to control the - tuning process. -... -``` - -You can modify the configuration items in the configuration file as required to instruct the X-Tuner to perform different actions. For details about the configuration items in the configuration file, see Table 2 in [Command Reference](x-tuner-command-reference.md). If you need to change the loading path of the configuration file, you can specify the path through the **-x** command line option. - -
- -## Benchmark Selection and Configuration - -The benchmark drive script is stored in the benchmark subdirectory of the X-Tuner. X-Tuner provides common benchmark driver scripts, such as TPC-C and TPC-H. The X-Tuner invokes the **get_benchmark_instance()** command in the benchmark/__init__.py file to load different benchmark driver scripts and obtain benchmark driver instances. The format of the benchmark driver script is described as follows: - -- Name of the driver script: name of the benchmark. The name is used to uniquely identify the driver script. You can specify the benchmark driver script to be loaded by setting the **benchmark_script** configuration item in the configuration file of the X-Tuner. -- The driver script contains the *path* variable, *cmd* variable, and the **run** function. - -The following describes the three elements of the driver script: - -1. *path*: path for saving the benchmark script. You can modify the path in the driver script or specify the path by setting the **benchmark_path** configuration item in the configuration file. - -2. *cmd*: command for executing the benchmark script. You can modify the command in the driver script or specify the command by setting the **benchmark_cmd** configuration item in the configuration file. Placeholders can be used in the text of cmd to obtain necessary information for running cmd commands. For details, see the TPC-H driver script example. These placeholders include: - - - {host}: IP address of the database host machine - - {port}: listening port number of the database instance - - {user}: user name for logging in to the database - - {password}: password of the user who logs in to the database system - - {db}: name of the database that is being optimized - -3. **run** function: The signature of this function is as follows: - - ``` - def run(remote_server, local_host) -> float: - ``` - - The returned data type is float, indicating the evaluation score after the benchmark is executed. A larger value indicates better performance. For example, the TPC-C test result tpmC can be used as the returned value, the inverse number of the total execution time of all SQL statements in TPC-H can also be used as the return value. A larger return value indicates better performance. - - The *remote_server* variable is the shell command interface transferred by the X-Tuner program to the remote host (database host machine) used by the script. The *local_host* variable is the shell command interface of the local host (host where the X-Tuner script is executed) transferred by the X-Tuner program. Methods provided by the preceding shell command interface include: - - ``` - exec_command_sync(command, timeout) - Function: This method is used to run the shell command on the host. - Parameter list: - command: The data type can be str, and the element can be a list or tuple of the str type. This parameter is optional. - timeout: The timeout interval for command execution in seconds. This parameter is optional. - Return value: - Returns 2-tuple (stdout and stderr). stdout indicates the standard output stream result, and stderr indicates the standard error stream result. The data type is str. - ``` - - ``` - exit_status - Function: This attribute indicates the exit status code after the latest shell command is executed. - Note: Generally, if the exit status code is 0, the execution is normal. If the exit status code is not 0, an error occurs. - ``` - -Benchmark driver script example: - -1. TPC-C driver script - - ```bash - from tuner.exceptions import ExecutionError - - # WARN: You need to download the benchmark-sql test tool to the system, - # replace the PostgreSQL JDBC driver with the openGauss driver, - # and configure the benchmark-sql configuration file. - # The program starts the test by running the following command: - path = '/path/to/benchmarksql/run' # Path for storing the TPC-C test script benchmark-sql - cmd = "./runBenchmark.sh props.gs" # Customize a benchmark-sql test configuration file named props.gs. - - def run(remote_server, local_host): - # Switch to the TPC-C script directory, clear historical error logs, and run the test command. - # You are advised to wait for several seconds because the benchmark-sql test script generates the final test report through a shell script. The entire process may be delayed. - # To ensure that the final tpmC value report can be obtained, wait for 3 seconds. - stdout, stderr = remote_server.exec_command_sync(['cd %s' % path, 'rm -rf benchmarksql-error.log', cmd, 'sleep 3']) - # If there is data in the standard error stream, an exception is reported and the system exits abnormally. - if len(stderr) > 0: - raise ExecutionError(stderr) - - # Find the final tpmC result. - tpmC = None - split_string = stdout.split() # Split the standard output stream result. - for i, st in enumerate(split_string): - # In the benchmark-sql of version 5.0, the value of tpmC is the last two digits of the keyword (NewOrders). In normal cases, the value of tpmC is returned after the keyword is found. - if "(NewOrders)" in st: - tpmC = split_string[i + 2] - break - stdout, stderr = remote_server.exec_command_sync( - "cat %s/benchmarksql-error.log" % path) - nb_err = stdout.count("ERROR:") # Check whether errors occur during the benchmark running and record the number of errors. - return float(tpmC) - 10 * nb_err # The number of errors is used as a penalty item, and the penalty coefficient is 10. A higher penalty coefficient indicates a larger number of errors. - - ``` - -2. TPC-H driver script - - ```bash - import time - - from tuner.exceptions import ExecutionError - - # WARN: You need to import data into the database and SQL statements in the following path will be executed. - # The program automatically collects the total execution duration of these SQL statements. - path = '/path/to/tpch/queries' # Directory for storing SQL scripts used for the TPC-H test - cmd = "gsql -U {user} -W {password} -d {db} -p {port} -f {file}" # The command for running the TPC-H test script. Generally, gsql -f script file is used. - - def run(remote_server, local_host): - # Traverse all test case file names in the current directory. - find_file_cmd = "find . -type f -name '*.sql'" - stdout, stderr = remote_server.exec_command_sync(['cd %s' % path, find_file_cmd]) - if len(stderr) > 0: - raise ExecutionError(stderr) - files = stdout.strip().split('\n') - time_start = time.time() - for file in files: - # Replace {file} with the file variable and run the command. - perform_cmd = cmd.format(file=file) - stdout, stderr = remote_server.exec_command_sync(['cd %s' % path, perform_cmd]) - if len(stderr) > 0: - print(stderr) - # The cost is the total execution duration of all test cases. - cost = time.time() - time_start - # Use the inverse number to adapt to the definition of the run function. The larger the returned result is, the better the performance is. - return - cost - ``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-troubleshooting.md deleted file mode 100644 index df96e0c7..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-troubleshooting.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2021-05-19 ---- - -# Troubleshooting - -- Failure of connection to the database instance: Check whether the database instance is faulty or the security permissions of configuration items in the **pg_hba.conf** file are incorrectly configured. -- Restart failure: Check the health status of the database instance and ensure that the database instance is running properly. -- Dependency installation failure: Upgrade the pip package management tool by running the **python -m pip install -upgrade pip** command. -- Poor performance of TPC-C jobs: In high-concurrency scenarios such as TPC-C, a large amount of data is modified during pressure tests. Each test is not idempotent, for example, the data volume in the TPC-C database increases, invalid tuples are not cleared using VACUUM FULL, checkpoints are not triggered in the database, and drop cache is not performed. Therefore, it is recommended that the benchmark data that is written with a large amount of data, such as TPC-C, be imported again at intervals (depending on the number of concurrent tasks and execution duration). A simple method is to back up the $PGDATA directory. -- When the TPC-C job is running, the TPC-C driver script reports the error "TypeError: float() argument must be a string or a number, not 'NoneType'" (**none** cannot be converted to the float type). This is because the TPC-C pressure test result is not obtained. There are many causes for this problem, manually check whether TPC-C can be successfully executed and whether the returned result can be obtained. If the preceding problem does not occur, you are advised to set the delay time of the **sleep** command in the command list in the TPC-C driver script to a larger value. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai4db-autonomous-database-o&m.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai4db-autonomous-database-o&m.md deleted file mode 100644 index 7a1c0759..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/ai4db-autonomous-database-o&m.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: AI4DB Autonomous Database O&M -summary: AI4DB Autonomous Database O&M -author: Guo Huan -date: 2022-05-06 ---- - -# AI4DB Autonomous Database O&M - -As mentioned above, AI4DB is mainly used for autonomous O&M and management of databases, helping database O&M personnel reduce O&M workload. In implementation, the AI4DB framework of DBMind is monitored and service-oriented. It also provides instant AI toolkits and out-of-the-box AI O&M functions (such as index recommendation). AI4DB mainly uses the open-source Prometheus for monitoring. DBMind provides a monitoring data producer exporter, which can be interconnected with the Prometheus platform. The following figure shows the AI4DB service architecture of DBMind. - -**Figure 1** AI4DB service architecture of DBMind - -![DBMind-AI4DB服务架构](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/AI4DB-1.png) - -Key components in the figure are described as follows: - -- DBMind Service: DBMind background service, which can be used for periodic offline computing, including slow SQL root cause analysis and time series prediction. -- Prometheus-server: server for storing Prometheus monitoring metrics. -- metadatabase: After the offline computing is complete, DBMind stores the computing result. Databases such as MogDB and SQLite are supported. -- client: client used to read the DBMind offline computing results. Currently, only the CLI client is supported. If databases such as MogDB are used to store and calculate the DBMind computing results, you can configure visualization tools such as Grafana to visualize the results. -- openGauss-exporter: collects monitoring metrics from the MogDB database nodes for DBMind to calculate. -- node-exporter: exporter provided by Prometheus, which can be used to monitor system metrics of the node, such as CPU and memory usage. -- reprocessing-exporter: processes metrics collected by Prometheus, for example, calculating the CPU usage. - -## Environment Configuration - -DBMind must run on Python 3.6 or later. The required third-party dependency packages are recorded in the **requirements.txt** file (**requirements-x86.txt** or **requirements-arrch64.txt**, depending on the platform type) in the AI function root directory (*$GAUSSHOME***/bin/dbmind**). You can run the **pip install** command to install the dependencies. For example: - -``` -pip install requirements-x86.txt -``` - -If you do not install all required dependencies, the system will prompt you to install third-party dependencies when you run the **gs_dbmind** command. Note that this file provides the third-party dependencies required by DBMind. If a third-party package conflict exists in the user environment, you can handle the problem based on the actual situation. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/components-that-support-dbmind.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/components-that-support-dbmind.md deleted file mode 100644 index 767a9abb..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/components-that-support-dbmind.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Components that Support DBMind -summary: Components that Support DBMind -author: Guo Huan -date: 2022-05-06 ---- - -# Components that Support DBMind - -DBMind provides components to support the deployment and implementation of the entire service or solution. They are not AI functions, but are an important part of the entire service system. They are used to support the quick implementation of the entire autonomous O&M solution. For example, the exporter is used to collect database metrics. - -- **[Prometheus Exporter](./prometheus-exporter/prometheus-exporter.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-command-reference.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-command-reference.md deleted file mode 100644 index 8eb3f041..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-command-reference.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -title: Command Reference -summary: Command Reference -author: Guo Huan -date: 2022-05-06 ---- - -# Command Reference - -For details about how to use reprocessing-exporter, see the following help information: - -``` -gs_dbmind component reprocessing_exporter --help -usage: [-h] [--disable-https] [--ssl-keyfile SSL_KEYFILE] [--ssl-certfile SSL_CERTFILE] [--ssl-ca-file SSL_CA_FILE] [--web.listen-address WEB.LISTEN_ADDRESS] [--web.listen-port WEB.LISTEN_PORT] - [--collector.config COLLECTOR.CONFIG] [--log.filepath LOG.FILEPATH] [--log.level {debug,info,warn,error,fatal}] [-v] - prometheus_host prometheus_port - -Reprocessing Exporter: A re-processing module for metrics stored in the Prometheus server. - -positional arguments: - prometheus_host from which host to pull data - prometheus_port the port to connect to the Prometheus host - -optional arguments: - -h, --help show this help message and exit - --disable-https disable Https scheme - --ssl-keyfile SSL_KEYFILE - set the path of ssl key file - --ssl-certfile SSL_CERTFILE - set the path of ssl certificate file - --ssl-ca-file SSL_CA_FILE - set the path of ssl ca file - --web.listen-address WEB.LISTEN_ADDRESS - address on which to expose metrics and web interface - --web.listen-port WEB.LISTEN_PORT - listen port to expose metrics and web interface - --collector.config COLLECTOR.CONFIG - according to the content of the yaml file for metric collection - --log.filepath LOG.FILEPATH - the path to log - --log.level {debug,info,warn,error,fatal} - only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal] - -v, --version show program's version number and exit -``` - -**Table 1** reprocessing-exporter parameters - -| Parameter | Description | Value Range | -| :------------------- | :----------------------------------------------------------- | :------------------------------ | -| prometheus_host | Prometheus-server IP address | - | -| prometheus_port | Listening port of Prometheus-server | 1024-65535 | -| -h, --help | Help option | - | -| --disable-https | Disables HTTPS. | - | -| --ssl-keyfile | Path of the HTTPS certificate private key file | - | -| --ssl-certfile | Path of the HTTPS certificate file | - | -| --ssl-ca-file | Path of the HTTPS CA certificate file | | -| --web.listen-address | IP address bound to the exporter service | - | -| --web.listen-port | Listening port of the exporter service | 1024-65535 | -| --collector.config | Path of the configuration file that explicitly specifies the metrics to be collected | - | -| --log.filepath | Path for storing log files. By default, log files are stored in the current directory. | - | -| --log.level | Printing level of the log file. The default level is **INFO**. | debug, info, warn, error, fatal | -| --version | Displays version information. | - | - -For details about how to use openGauss-exporter, see the following help information: - -``` -gs_dbmind component opengauss_exporter --help -usage: [-h] --url URL [--config-file CONFIG_FILE] [--include-databases INCLUDE_DATABASES] [--exclude-databases EXCLUDE_DATABASES] [--constant-labels CONSTANT_LABELS] - [--web.listen-address WEB.LISTEN_ADDRESS] [--web.listen-port WEB.LISTEN_PORT] [--disable-cache] [--disable-settings-metrics] - [--disable-statement-history-metrics] [--disable-https] [--disable-agent] [--ssl-keyfile SSL_KEYFILE] [--ssl-certfile SSL_CERTFILE] [--ssl-ca-file SSL_CA_FILE] [--parallel PARALLEL] - [--log.filepath LOG.FILEPATH] [--log.level {debug,info,warn,error,fatal}] [-v] - -openGauss Exporter (DBMind): Monitoring or controlling for openGauss. - -optional arguments: - -h, --help show this help message and exit - --url URL openGauss database target url. It is recommended to connect to the postgres database through this URL, so that the exporter can actively discover and monitor other databases. - --config-file CONFIG_FILE, --config CONFIG_FILE - path to config file. - --include-databases INCLUDE_DATABASES - only scrape metrics from the given database list. a list of label=value separated by comma(,). - --exclude-databases EXCLUDE_DATABASES - scrape metrics from the all auto-discovered databases excluding the list of database. a list of label=value separated by comma(,). - --constant-labels CONSTANT_LABELS - a list of label=value separated by comma(,). - --web.listen-address WEB.LISTEN_ADDRESS - address on which to expose metrics and web interface - --web.listen-port WEB.LISTEN_PORT - listen port to expose metrics and web interface - --disable-cache force not using cache. - --disable-settings-metrics - not collect pg_settings.yml metrics. - --disable-statement-history-metrics - not collect statement-history metrics (including slow queries). - --disable-https disable Https scheme - --disable-agent by default, this exporter also assumes the role of DBMind-Agent, that is, executing database operation and maintenance actions issued by the DBMind service. With this argument, - users can disable the agent functionality, thereby prohibiting the DBMind service from making changes to the database. - --ssl-keyfile SSL_KEYFILE - set the path of ssl key file - --ssl-certfile SSL_CERTFILE - set the path of ssl certificate file - --ssl-ca-file SSL_CA_FILE - set the path of ssl ca file - --parallel PARALLEL not collect pg_settings.yml metrics. - --log.filepath LOG.FILEPATH - the path to log - --log.level {debug,info,warn,error,fatal} - only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal] - -v, --version show program's version number and exit -``` - -**Table 2** openGauss-exporter parameters - -| Parameter | Description | Value Range | -| :---------------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| --url | URL of the database server, for example, postgres://user:pwd@host:port/dbname. | If the URL contains special characters (such as @ and /), the special characters must be escaped through URL encoding. For example, the at sign (@) in the password must be escaped to %40, and the slash (/) must be escaped to %2F. Otherwise, the meaning of each column will be incorrectly identified and segmented. For details about the escape rules, see the encoding escape rules. The URL address rule complies with the [RFC-1738](https://www.ietf.org/rfc/rfc1738.txt) standard. | -| --constant-labels | Constant list in format of k=v . Constants are separated by commas (,), indicating the constant labels of the exporter. | The format is cluster_name=demo,cluster_id=1. | -| -h, --help | Help option | - | -| --disable-https | Disables HTTPS. | - | -| --ssl-keyfile | Path of the HTTPS certificate private key file | - | -| --ssl-certfile | Path of the HTTPS certificate file | - | -| --ssl-ca-file | Path of the HTTPS CA certificate file | | -| --web.listen-address | IP address bound to the exporter service | - | -| --web.listen-port | Listening port of the exporter service | 1024-65535 | -| --config, --config-file | Path of the configuration file that explicitly specifies the metrics to be collected | - | -| --log.filepath | Path for storing log files. By default, log files are stored in the current directory. | - | -| --log.level | Printing level of the log file. The default level is **INFO**. | debug, info, warn, error, fatal | -| --version | Displays version information. | - | -| --disable-cache | Disables the cache. | - | -| --disable-settings-metrics | Disables the collection of metrics in the **pg_settings** table. | - | -| --disable-statement-history-metrics | Disables the collection of slow SQL statements in the **statement_history** table. | - | -| --disable-agent | Disables the agent behavior. | - | -| --include-databases | Indicates the name of the database whose data is to be collected. If multiple databases are specified, separate them with commas (,). | - | -| --exclude-databases | Indicates the name of the database that is not monitored. If multiple databases are specified, separate them with commas (,).. | - | -| --parallel | Size of the database connection pool connected to openGauss. | Positive integer | - -For details about how to use cmd-exporter, see the following help information: - -``` -usage: [-h] [--constant-labels CONSTANT_LABELS] - [--web.listen-address WEB.LISTEN_ADDRESS] - [--web.listen-port WEB.LISTEN_PORT] - [--disable-https] - [--config CONFIG] [--ssl-keyfile SSL_KEYFILE] - [--ssl-certfile SSL_CERTFILE] [--ssl-ca-file SSL_CA_FILE] - [--parallel PARALLEL] [--log.filepath LOG.FILEPATH] - [--log.level {debug,info,warn,error,fatal}] [-v] - -Command Exporter (DBMind): scrape metrics by performing shell commands. - -optional arguments: - -h, --help show this help message and exit - --constant-labels CONSTANT_LABELS - a list of label=value separated by comma(,). - --web.listen-address WEB.LISTEN_ADDRESS - address on which to expose metrics and web interface - --web.listen-port WEB.LISTEN_PORT - listen port to expose metrics and web interface - --disable-https disable Https scheme - --config CONFIG path to config dir or file. - --ssl-keyfile SSL_KEYFILE - set the path of ssl key file - --ssl-certfile SSL_CERTFILE - set the path of ssl certificate file - --ssl-ca-file SSL_CA_FILE - set the path of ssl ca file - --parallel PARALLEL performing shell command in parallel. - --log.filepath LOG.FILEPATH - the path to log - --log.level {debug,info,warn,error,fatal} - only log messages with the given severity or above. - Valid levels: [debug, info, warn, error, fatal] - -v, --version show program's version number and exit -``` - -**Table 3** cmd-exporter parameters - -| Parameter | Description | Value Range | -| :------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| -h, --help | Help option. | - | -| --disable-https | Disables HTTPS. | - | -| --ssl-keyfile | Path of the HTTPS certificate private key file. | - | -| --ssl-certfile | Path of the HTTPS certificate file. | - | -| --ssl-ca-file | Path of the HTTPS CA certificate file. | | -| --web.listen-address | IP address bound to the exporter service. | - | -| --web.listen-port | Listening port of the exporter service. | 1024-65535 | -| --config | Path of the configuration file that explicitly specifies the metrics to be collected. | The **default.yml** file in the **yamls** directory of this function is used by default. You can refer to the configuration file format. If the configuration is incorrect, an error is reported. | -| --log.filepath | Path for storing log files. By default, log files are stored in the current directory. | - | -| --log.level | Printing level of the log file. The default level is **INFO**. | debug, info, warn, error, fatal | -| --parallel | Concurrency of executing shell commands in parallel. | Positive integer | -| --constant-labels | Constant list in format of k=v . Constants are separated by commas (,), indicating the constant labels of the exporter. | The format is cluster_name=demo,cluster_id=1. | -| --version | Displays the version information. | - | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-environment-deployment.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-environment-deployment.md deleted file mode 100644 index 677604a7..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-environment-deployment.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -title: Environment Deployment -summary: Environment Deployment -author: Guo Huan -date: 2022-05-06 ---- - -# Environment Deployment - -1. You can download Prometheus-server and node-exporter from the Prometheus official website and start them according to the official documents. You can also use the quick deployment tool provided by DBMind to deploy them. If you deploy the software by yourself, go to step 3. -2. Use the CLI to quickly deploy Prometheus and all exporters. For the first installation, ensure that the network connection is available. The input parameter is **–online**. - - ``` - gs_dbmind component deployment --online - ``` - - After an online deployment is successfully performed, you can set this parameter to –offline to perform offline deployment. - - ``` - gs_dbmind component deployment --offline - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If the one-click deployment tool involves multiple nodes, you are advised to use it on the internal network. - - If one-click deployment is used, the download path and decompression path of the Prometheus and node_exporter software packages are in the $HOME directory. - -3. During the deployment, to ensure that the deployment location is correct and the subsequent running and monitoring are normal, the program requires users to interactively enter some parameters. The listening IP address of Prometheus and node-exporter is 0.0.0.0 by default. Prometheus and node-exporter are open-source software, and DBMind cannot obtain the IP address to be bound. Therefore, you need to configure the IP address. During the installation, however, the DBMind prompts the user to bind the listening address to a specific IP address. The following is an example of the configuration file of the deployment tool. The tool performs interactive configuration. You are not advised to manually modify the configuration file. The configuration file is as follows: - - ``` - [DOWNLOADING] - host = https://github.com/prometheus - node_exporter = node_exporter-1.3.1.linux-amd64 - prometheus = prometheus-2.35.0-rc0.linux-amd64 - node_exporter_sha256 = - prometheus_sha256 = - [PROMETHEUS] - host = # IP address of the deployed host for prometheus and reprocessing exporter. - ssh_port = # ssh port - host_username = # Username of the deployed host - path = # Absolute path (No symbolic link) to deploy prometheus and reprocessing exporter - listen_address = # The host IP address for the reprocessing exporter to listen on. - prometheus_port = # The port of prometheus - reprocessing_exporter_port = # The port of reprocessing_exporter - [EXPORTERS] - targets = # The urls of the MogDB database instances, (host:port/database_name,) seperated by ','. eg., 192.168.0.1:200/postgres, 192.168.0.2:200/tpcc10. - ssh_port = # ssh port - host_username = # Username of the deployed hosts - path = # Absolute path (No symbolic link) to deploy node exporters and MogDB exporters - database_username = # Username of the databases - listen_address = # The host IP address for the MogDB exporters to listen on - opengauss_ports_range = # The port range of opengauss_exporters, (start_port-end_port) - node_exporter_port = # The port of node_exporters (their ports are identical) - cmd_exporter_port = # The port of cmd_exporters (their ports are identical) - [EXPORTER-SSL] - enable_ssl = # Whether to use https protocol (True or False) - ssl_certfile = # Absolute path (No symbolic link) to the ssl certificate file - ssl_keyfile = # Absolute path (No symbolic link) to the ssl certificate private key file - ssl_ca_file = # Absolute path (No symbolic link) to the ssl CA file - ``` - - [DOWNLOADING] contains the address for downloading the software package, version name of the software installation package, and integrity verification code of the software installation package. This part is not included in the interactive input range. - - The Prometheus and reprocessing_exporter are deployed together on the main control device. The number of devices is 1. The parameters related to this device are classified as [PROMETHEUS]. - - node_exporter and opengauss_exporter are deployed together on the device where the database is located. Multiple devices may exist. The parameters related to these devices are classified as [EXPORTERS]. The usernames and passwords of these database nodes must be the same as those of the database nodes. The deployment paths and SSL certificate paths must also be the same. - - The **Targets** option of [EXPORTERS] indicates the monitored database instance objects. The format is *IP address***:***Port number***/***DB name*. Multiple database instances are separated by commas (,). Currently, the IP address is strongly verified, and the domain name cannot be entered. - - For SSL certificate management, Prometheus does not support private key files that contain passwords. Therefore, the one-click deployment function does not support private key files that contain passwords on Prometheus. - - You need to place the certificate files required by [PROMETHEUS] and [EXPORTERS] in the same path on each node in advance. If multiple nodes are deployed, the certificate file paths on different nodes must be the same. If the path is incorrect and no certificate file is detected during the running of each component, an alarm is generated and the component exits. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - Prometheus and node-exporter do not support SSL private key files with passwords. Therefore, Prometheus and node-exporter do not support SSL certificate files. Currently, all SSL certificates refer to those used by the exporter component. The current version of Prometheus supports only certificates in sans format. - > - In the case of multiple nodes, to remotely connect to the database, you need to update the address information of the main control node to the **pg_hba.conf** file of the database on the branch node and restart the database. Otherwise, the verification fails. - > - If an SSH password-free account is used, any password can be entered during password verification. - > - If you use **deployment** to automatically deploy multiple nodes, ensure that the Python operating environment and corresponding dependency packages are available for the specified user of the node to be deployed. Otherwise, when you use the **–run** parameter to start the node, modules that require the Python operating environment, such as openGauss-exporter, may fail to be started. - > - The port used by the exporter component must be within the valid range (1024–65535). - - You can run the **--help** command to obtain help information. - - ``` - gs_dbmind component deployment --help - ``` - - ``` - usage: [-h] [--online] [--offline] [--run] [--check] [-c CONF] [-e] [-v] - - To deploy Prometheus, node-exporter, cmd-exporter, openGauss-exporter and - reprocessing-exporter - - optional arguments: - -h, --help show this help message and exit - --online Download the Prometheus and node_exporters online. - Deploy Prometheus and exporters to the nodes locally. - --offline Deploy Prometheus and exporters to the nodes locally. - --run Run Prometheus and all the exporters. - --check Check the status of Prometheus and all the exporters. - -c CONF, --conf CONF Indicates the location of the config file to skip - interactive configuration. Default path is {CONFIG_PATH}. - --edit set this arg to edit the config file. - -v, --version show program's version number and exit - ``` - - When the **-c –conf** parameter is used, the location of the configuration file to be read is specified. {CONFIG_PATH} in CONF varies according to the actual installation path. - -4. After automatic deployment, the **prometheus.yaml** configuration file is automatically generated based on the parameters entered during the configuration. The configuration file is automatically loaded when Prometheus is started. The following is an example: - - ``` - alerting: - alertmanagers: - - static_configs: - - targets: null - global: - evaluation_interval: 15s - rule_files: null - scrape_configs: - - job_name: prometheus - scrape_interval: 5s - static_configs: - - targets: - - 10.90.56.175:9090 - - job_name: reprocessing_exporter - scheme: https - static_configs: - - targets: - - 10.90.56.175:8181 - tls_config: - ca_file:xxx.ca - key_file:xxx.key - cert_file:xxx.crt - - job_name: cmd_exporter - scheme: https - static_configs: - - targets: - - 10.90.56.172:9187 - - 10.90.56.172:9188 - tls_config: - ca_file:xxx.ca - key_file:xxx.key - cert_file:xxx.crt - - job_name: node_exporter - scheme: https - static_configs: - - targets: - - 10.90.56.172:9100 - - - job_name: opengauss_exporter - scheme: https - static_configs: - - targets: - - 10.90.56.172:9187 - - 10.90.56.172:9188 - tls_config: - ca_file:xxx.ca - key_file:xxx.key - cert_file:xxx.crt - ``` - - After the deployment is complete, the program is distributed to all target locations and can run automatically. For details, see the next section. - -The exporter component uses HTTPS for communication by default. Therefore, you need to provide the SSL certificate and key file by default through **--ssl-keyfile**, **--ssl-certfile**, and **--ssl-ca-file**. To disable HTTPS, you can run the **--disable-https** command. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The default communication protocol used by MogDB is incompatible with PostgreSQL. As a result, the PostgreSQL-based Python driver **psycopg2-binary** installed using the PyPI source cannot connect to the MogDB database. Therefore, you need to compile **psycopg2** or modify GUC parameters for adaptation. You can also download **psycopg2** compiled based on openGauss from the openGauss official website. (The official website provides only the compilation packages of some Python versions. You need to check whether the compilation packages are consistent with the current Python version.) -> -> - Download the openGauss Python driver from the official website: -> [https://opengauss.org/en/download/](https://opengauss.org/en/download/) ->- For details about the adaptation of the Python driver, see the openGauss operation guide at: -> [https://mp.weixin.qq.com/s/2TobUQKtw0N9sBpMZJr6zw](https://mp.weixin.qq.com/s/2TobUQKtw0N9sBpMZJr6zw) diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-obtaining-help-information.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-obtaining-help-information.md deleted file mode 100644 index 98fe3384..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-obtaining-help-information.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Obtaining Help Information -summary: Obtaining Help Information -author: Guo Huan -date: 2022-05-06 ---- - -# Obtaining Help Information - -You can run the **–help** command to obtain the help information. For example: - -``` -gs_dbmind component opengauss_exporter --help -gs_dbmind component reprocessing_exporter --help -gs_dbmind component cmd_exporter --help -``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-overview.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-overview.md deleted file mode 100644 index 5e6c48ba..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-overview.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-05-06 ---- - -# Overview - -Prometheus is a popular open-source monitoring system in the industry. It is also a time series database. The collector of Prometheus is called exporter, which is used to collect metrics of monitored modules. To interconnect with the Prometheus platform, the AI tool provides two types of exporters: openGauss-exporter for collecting database metrics and reprocessing-exporter for reprocessing the collected metrics. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** Prometheus and exporter are popular monitoring and collection platforms in the industry. They are deployed on the intranet and do not expose interfaces to external systems. They are used only by internal monitoring platforms. Therefore, to enhance the security of the monitoring platform, users or O&M personnel need to configure firewalls to isolate external access and enhance the security of the monitoring platform. By default, the Prometheus platform adopts HTTP and has no security access restrictions. This is because the platform is generally deployed on the intranet and the attack risk is controllable. If you want to enhance security, you can modify the TLS configuration options of Prometheus. However, you are not advised to expose the access interface to external systems. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-troubleshooting.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-troubleshooting.md deleted file mode 100644 index 08f8e09f..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-troubleshooting.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Troubleshooting -summary: Troubleshooting -author: Guo Huan -date: 2022-05-06 ---- - -# Troubleshooting - -1. A message is displayed, prompting the user to specify the **–ssl-keyfile** and **–ssl-certfile** options. - - By default, the exporter uses HTTPS for communication. Therefore, you must specify the path of the certificate and private key files. To use HTTP for communication, explicitly specify the **–disable-https** option to disable HTTPS. - -2. A message is displayed prompting users to enter the PEM pass phrase. - - In HTTPS communication mode, after specifying the path of the certificate and its key file \(the key file have been encrypted\), you need to enter the password of the encrypted private key file. The password can also be passed through a standard input stream. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-usage-guide.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-usage-guide.md deleted file mode 100644 index ae674eee..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter-usage-guide.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Usage Guide -summary: Usage Guide -author: Guo Huan -date: 2022-05-06 ---- - -# Usage Guide - -You can run the **gs_dbmind** command to start an exporter. The following describes how to set up a complete Prometheus monitoring platform. - -1. Run the following command to start Prometheus and all exporters: - - ``` - gs_dbmind component deployment --run - ``` - - For details about how to start Prometheus and each exporter, see the following steps. If you use only the automatic deployment tool to deploy Prometheus, go to step 6. - -2. Run the following command to deploy the Prometheus main process: - - ``` - prometheus --config.file=prometheus.yml - ``` - -3. Deploy openGauss-exporter. Run the following command to start openGauss-exporter, use the default listening port number 9187, set the listening IP address to 192.168.1.100, and disable HTTPS: - - ``` - gs_dbmind component opengauss_exporter --url postgresql://user:password@ip:port/dbname --web.listen-address 192.168.1.100 --disable-https - ``` - -4. Deploy reprocessing-exporter. Run the following command to start reprocessing-exporter, use the default listening port number 8181, set the listening IP address to 192.168.1.101, the IP address and port number of the Prometheus server to 192.168.1.100 and 9090, and disable HTTPS: - - ``` - gs_dbmind component reprocessing_exporter 192.168.1.100 9090 --web.listen-address 192.168.1.101 --ssl-keyfile server.key --ssl-certfile server.crt - ``` - -5. Deploy cmd-exporter: Start cmd-exporter, use the default parameters, and specify the certificate information. - - ``` - gs_dbmind component cmd_exporter --ssl-keyfile server.key --ssl-certfile server.crt - ``` - -6. Deploy node-exporter. Generally, node-exporter needs to be deployed on the Prometheus monitoring platform to monitor the Linux OS. Some AI functions mentioned in the following sections also depend on node-exporter to collect Linux system metrics. Therefore, node-exporter also needs to be deployed by users. For details, visit [this page](https://prometheus.io/docs/guides/node-exporter/\#installing-and-running-the-node-exporter). - - You can run the node-exporter process. The default port number is 9100. The command for starting the process is as follows: - - ``` - node_exporter - ``` - -7. You can run commands to monitor the running status of Prometheus and all exporters. - - ``` - gs_dbmind component deployment --check - ``` - ->![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** -> -> - To connect to a database from openGauss-exporter, you must have the **monitor admin** permission or higher. Otherwise, some metrics cannot be collected. -> - openGauss-exporter samples slow SQL information from the **dbe\_perf.statement\_history** view. The slow SQL records in **dbe\_perf.statement\_history** are related to **log\_min\_duration\_statement** and **track\_stmnt\_stat\_level**. **log\_min\_duration\_statement** indicates the slow SQL threshold, in milliseconds. You can set it to a specific value as required. **track\_stmnt\_stat\_level** indicates the SQL record level. The default value is **'OFF,L0'**, indicating that only slow SQL statements are recorded. The level is L0. Exercise caution when modifying this parameter. -> - openGauss-exporter collects database information, including data in some system catalogs and views \(for details, see the opengauss\_exporter configuration file\), node-exporter collects system metrics, mainly related to system disks and CPUs, and reprocessing\_exporter performs secondary processing based on some metrics in prometheus-server \(for details, see the reprocessing\_exporter configuration file\). Then, the processed data is provided to users. -> - Prometheus-server has a timeout mechanism when pulling exporter data. The timeout interval is controlled by **scrape\_timeout** \(10s by default\). Therefore, when the exporter collects a large amount of data, you can increase the value of **scrape\_timeout** as required to prevent timeout errors. Note that the value of **scrape\_interval** (collection interval, 15s by default) cannot be smaller than that of **scrape\_timeout**.Otherwise, an exception occurs. -> - Ensure that the time zone of the database is consistent with that of the system; otherwise, the time of time-related metrics may be inconsistent with the system time. -> - When HTTPS is used for communication, the tool checks the permission on the certificate and key file and the validity period of the certificate. If the file permission is greater than 600, an alarm is generated. If the certificate will expire within 90 days, an alarm is generated. -> - When metrics are repeatedly collected, an exception occurs on openGauss-exporter, and the exception information is recorded in logs. -> - When setting the **--config**, **--disable-settings-metrics** and **--disable-statement-history-metrics** parameters of openGauss-exporter, please note that: -> -> 1. If you do not specify any of them, the tool collects metrics in the three configuration files in the **yamls** directory at the same time. -> 2. If you explicitly specify **--config**, the tool does not collect metrics in the **default.yml** file in the **yamls** directory, but collects metrics in the specified configuration file. In addition, metrics in the **pg\_settings.yml** and **statements.yml** files can be properly collected. In this case, ensure that the metrics in the specified configuration file and those in **pg\_settings.yml** and **statements.yml** are not repeatedly collected. -> 3. If you explicitly specify **--disable-settings-metrics**, the tool does not collect metrics in **pg\_settings.yml** in the **yamls** directory. If you explicitly specify **--disable-statement-history-metrics**, the tool does not collect metrics in **statements.yml** \(related to slow SQL\) in the **yamls** directory. -> -> - Generally, after the exporter is started, it does not terminate the process and exit (for example, the address of the connected database is unavailable, or the connected database user is deleted or disabled). Instead, it records the error information in the log and tries again in the background. diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter.md deleted file mode 100644 index b8d0096a..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Prometheus Exporter -summary: Prometheus Exporter -author: zhang cuiping -date: 2023-04-07 ---- - -# Prometheus Exporter - -- **[Overview](prometheus-exporter-overview.md)** -- **[Environment Deployment](prometheus-exporter-environment-deployment.md)** -- **[Usage Guide](prometheus-exporter-usage-guide.md)** -- **[Obtaining Help Information](prometheus-exporter-obtaining-help-information.md)** -- **[Command Reference](prometheus-exporter-command-reference.md)** -- **[Troubleshooting](prometheus-exporter-troubleshooting.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/1-service.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/1-service.md deleted file mode 100644 index 2f66db70..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/1-service.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -title: service -summary: service -author: Guo Huan -date: 2022-05-06 ---- - -# service - -This subcommand can be used to initialize the configuration directory and start and stop background tasks. - -## Initializing the Configuration Directory - -You can run the **gs_dbmind service setup** subcommand to initialize the configuration directory. This directory may contain the configuration files and logs of the DBMind. Some files in the directory are described as follows: - -- **dbmind.conf**: DBMind parameter configuration file. You can modify it using the **gs_dbmind set** command or a text editor. -- **dynamic_config.db**: DBMind service metadata stored on the local node, including algorithm hyperparameters and monitoring thresholds. This file contains DBMind service metadata and cannot be configured by users. -- **metric_map.conf**: monitoring metric mapping table, which can be used to adapt to different collection platforms. For example, in DBMind, the monitored system CPU usage is named **os_cpu_usage**, but a user-defined collection tool may name the CPU usage **my_cpu_usage_rate**. In this case, if you want DBMind to represent the CPU usage **metric my_cpu_usage_rate**, you need to modify this configuration item. That is, add the **os_cpu_usage = my_cpu_usage_rate** configuration item for mapping. For common users, you are advised to use the collection component and solution of the DBMind. In this case, you do not need to modify the configuration file. -- **logs**: This directory stores logs generated by the DBMind service. - -You can initialize the configuration directory in interactive or non-interactive mode. For example, if the name of the configuration directory to be initialized is **confpath**, perform the following operations: - -**Interactive mode** - -``` -gs_dbmind service setup -c confpath --interactive -``` - -After running the preceding command, you can configure the configuration items in interactive mode through the CLI client. - -**Non-interactive mode** - -In non-interactive mode, the initialization consists of three steps: starting configuration, modifying configuration items, and initializing configuration. In the second step, you need to manually edit the configuration file by using the text editor. The procedure is as follows: - -1. Run the following command to start the configuration: - - ``` - gs_dbmind service setup -c confpath - ``` - -2. After the preceding command is executed, the **dbmind.conf** configuration file is generated in the **confpath** directory. You need to use the text editor to manually modify the file. Related parameters are described as follows: - - ``` - # TSDB is used to specify the metric storage location of the monitored database system. Currently, only Prometheus is supported. - # The mandatory parameters are the IP address and port number of Prometheus. Other parameters (such as username, password, and SSL certificate information) are optional. - [TSDB] - name = prometheus # The type of time-series database. Options: prometheus. - host = # Address of time-series database. - port = # Port to connect to time-series database. - username = (null) # User name to connect to time-series database. - password = (null) # Password to connect to time-series database. - ssl_certfile = (null) # The certificate file for ssl connections. - ssl_keyfile = (null) # Certificate private key file. - ssl_keyfile_password = (null) # Password for ssl keyfile. - ssl_ca_file = (null) # CA certificate to validate requests. - - # METADATABASE is used to specify where the analysis results generated by DBMind are stored. - # Currently, SQLite, openGauss, and PostgreSQL databases are supported. If the openGauss database is used, pay attention to the compatibility of the Python driver psycopg2. You can use the driver provided by openGauss, or compile or modify GUC parameters for adaptation. - # Other information is about the connection to the database. Note that the user must have the permission to create the database. - [METADATABASE] - dbtype = sqlite # Database type. Options: sqlite, opengauss, postgresql. - host = # Address of meta-data database. - port = # Port to connect to meta-data database. - username = # User name to connect to meta-data database. - password = (null) # Password to connect to meta-data database. - database = # Database name to connect to meta-data database. - - # WORKER is used to specify the number of worker subprocesses that can be used by DBMind. If 0 is written, adaptation is performed, that is, CPU resources are used as much as possible. - [WORKER] - process_num = 0 # Number of worker processes on a local node. Less than or equal to zero means adaptive. - - # AGENT is used to specify the information for the DBMind to connect to the openGauss Agent. By using this agent, DBMind can obtain the real-time status of the monitored instance, improving the analysis accuracy. In addition, you can deliver some change actions to the DB instance, for example, ending a slow SQL statement (depending on whether the user configured here has sufficient permissions). - # The value of master_url is the IP address of the Agent. Because openGauss-exporter functions as the Agent, the value of master_url is the IP address of openGauss-exporter. - # In addition, openGauss-exporter supports HTTPS. Therefore, you can specify an SSL certificate based on the configuration. - [AGENT] - master_url = # The agent URL of the master node. e.g., https://127.0.0.1:9187. - username = # Username to login the monitoring database. Credential for agent. - password = # Password to login the monitoring database. Credential for agent. - ssl_certfile = (null) # The certificate file for ssl connections. - ssl_keyfile = (null) # Certificate private key file. - ssl_keyfile_password = (null) # Password for ssl keyfile. - ssl_ca_file = (null) # CA certificate to validate requests. - - # SELF-MONITORING is used to configure parameters for monitoring database instances. - # detection_interval indicates the execution frequency of the periodic check task, in seconds. - # last_detection_time indicates the length of the latest data used by each check task. - # forecasting_future_time indicates a length of a future time predicted by the time series forecast feature. - # golden_kpi indicates the monitoring metric that needs to be focused on. - # result_storage_retention indicates the maximum storage duration of diagnosis results. - [SELF-MONITORING] - detection_interval = 600 # Unit is second. The interval for performing health examination on the openGauss through monitoring metrics. - last_detection_time = 600 # Unit is second. The time for last detection. - forecasting_future_time = 3600 # Unit is second. How long the KPI in the future for forecasting. Meanwhile, this is the period for the forecast. - # The following golden_kpi of monitoring system is vital. - golden_kpi = os_cpu_usage, os_mem_usage, os_disk_usage, gaussdb_qps_by_instance # DBMind only measures and detects the golden metrics in the anomaly detection processing. - result_storage_retention = 604800 # Unit is second. How long should the results retain? If retention is more than the threshold, DBMind will delete them. - - # SELF-OPTIMIZATION is used to modify the following parameters to intervene the DBMind optimization result. Generally, the default values are used. - # optimization_interval: interval for executing optimization tasks. - # max_reserved_period: maximum storage duration of optimization results. - # max_index_num: upper limit of the recommended index result. - # max_index_storage: upper limit of the disk space occupied by the recommended index page. - # max_template_num: maximum number of SQL statements recorded in the SQL template recommended for the index. - # kill_slow_query: determines whether to enable automatic scanning and killing of slow SQL statements. If this function is enabled, you can run the set subcommand to set the threshold, for example, 70 seconds. The value must be a positive integer, in seconds. - # gs_dbmind set slow_sql_threshold max_elapsed_time 70 - [SELF-OPTIMIZATION] - optimization_interval = 86400 # Unit is second. The interval for generating report. - max_reserved_period = 100 # Unit is day. Maximum retention time. - max_index_num = 10 # Maximum number of advised indexes. - max_index_storage = 100 # Unit is MB. - max_template_num = 5000 # Maximum number of templates. - kill_slow_query = false # Whether to actively check and kill slow query. The default elapsed time of a slow query to be killed is 1 minute. - - # LOG indicates the DBMind log information. - [LOG] - maxbytes = 10485760 # Default is 10Mb. Maximum size of a single log file. If maxbytes is zero, the file grows indefinitely. - backupcount = 1 # Number of backups of log files. - level = INFO # Options: DEBUG, INFO, WARNING, ERROR. - - # The following information is displayed when you perform interactive configuration. You do not need to configure it. - [COMMENT] - worker = The form of executing compute-intensive tasks. Tasks can be executed locally or distributed to multiple nodes for execution. - tsdb = Configure the data source for time series data, which come from monitoring the openGauss instance. - metadatabase = Configure the database to record meta-data, which the database can store meta-data for the forecasting and diagnosis process. The database should be an openGauss instance. - self-monitoring = Set up parameters for monitoring and diagnosing openGauss instance. - self-optimization = Set up parameters for openGauss optimization. - ``` - -3. After manually modifying the preceding parameters, initialize the configuration items. In this phase, DBMind preliminarily checks the correctness of configuration items, initializes the structure and content of the metadata database table for storing result data, and encrypts the plaintext passwords in the configuration items. - - ``` - gs_dbmind service setup --initialize -c confpath - ``` - -4. Start the DBMind background service based on the configuration directory. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> 1. The comments in the configuration file are used to prompt users in interactive mode. Do not manually modify or delete the comments. -> 2. Make sure that the value of the configuration item and the comment are separated by a space. Otherwise, the system regards the comment as the value of the configuration item. -> 3. If special characters in a configuration item need to be escaped, use the percent sign ( %) to escape the special characters. For example, if the password is **password %**, use the percent sign ( %) to escape the special characters, that is, **password %%**. - -## Starting a Service - -After the configuration directory is initialized, you can start the DBMind background service based on the configuration directory. For example, if the configuration directory is **confpath**, run the following command: - -``` -gs_dbmind service start -c confpath -``` - -After the preceding command is executed, the system displays a message indicating that the service has been started. If no additional parameter is specified, this command starts all background tasks by default. If you want to start only one background task, add the **–only-run** option. For example, if you only want to start the slow SQL root cause analysis service, run the following command: - -``` -gs_dbmind service start -c confpath --only-run slow_query_diagnosis -``` - -## Stopping a Service - -Similar to starting a service, stopping a service has a simpler command line structure. You only need to specify the address of the configuration directory. For example, if the configuration directory is **confpath**, run the following command: - -``` -gs_dbmind service stop -c confpath -``` - -The DBMind service automatically exits after the running task is complete in the background. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** -> -> - The metabase user in **[METADATABASE]** must have the permission to create tables and insert and update data in the database. Otherwise, an exception will occur during tool execution. -> - Currently, multiple services cannot be started separately using the same configuration file. -> - The tool provides the **requirement.txt** file. You can use this file to install required third-party dependencies. - -## Command Reference - -You can use the **–help** option to obtain the help information about this mode. For example: - -``` -gs_dbmind service --help -``` - -``` -usage: service [-h] -c DIRECTORY [--only-run {slow_query_diagnosis,forecast}] [--interactive | --initialize] {setup,start,stop} - -positional arguments: - {setup,start,stop} - perform an action for service - -optional arguments: - -h, --help show this help message and exit - -c DIRECTORY, --conf DIRECTORY - set the directory of configuration files - --only-run {slow_query_diagnosis,forecast} - explicitly set a certain task running in the backend - --interactive configure and initialize with interactive mode - --initialize initialize and check configurations after configuring. -``` - -**Table 1** Parameters of the gs_dbmind service subcommand - -| Parameter | Description | Value Range | -| :------------ | :----------------------------------------- | :----------------------------------------------------------- | -| action | Action parameter | - setup: initializes configuration directory.
- start: starts a service.
- stop: stops a service. | -| -c,--conf | Configuration file directory | - | -| --initialize | Initializes configuration parameters. | - | -| --interactive | Configures parameters in interactive mode. | - | -| --only-run | Selects the module to be run only. | - forecast: prediction module.
- slow_query_diagnosis: root cause analysis module for slow SQL statements. | -| -h, --help | Help information | - | diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/2-component.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/2-component.md deleted file mode 100644 index ea7edcb2..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/2-component.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: component -summary: component -author: Guo Huan -date: 2022-05-06 ---- - -# component - -This subcommand can be used to start DBMind components, including the exporter for monitoring metrics and other AI functions. It forwards the commands passed by the user through the CLI client to the corresponding components. For details about the commands of different components, see the corresponding sections of the components. - -## Command Reference - -You can use the **–help** option to obtain the help information about this mode. For example: - -``` -gs_dbmind component --help -``` - -``` -usage: component [-h] COMPONENT_NAME ... - -positional arguments: - COMPONENT_NAME choice a component to start. ['extract_log', 'forecast', 'index_advisor', 'opengauss_exporter', 'reprocessing_exporter', 'slow_query_diagnosis', 'sqldiag', 'xtuner'] - ARGS arguments for the component to start - -optional arguments: - -h, --help show this help message and exit -``` - -**Table 1** Parameters of the gs_dbmind component subcommand - -| Parameter | Description | Value Range | -| :------------- | :------------------- | :----------------------------------------------------------- | -| COMPONENT_NAME | Component name | extract_log, forecast, index_advisor, opengauss_exporter, reprocessing_exporter, slow_query_diagnosis, sqldiag, xtuner | -| ARGS | Component parameters | Refer to the command description of the corresponding component. | -| -h, –help | Help information | - | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/3-set.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/3-set.md deleted file mode 100644 index 7bf0bb61..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/3-set.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: set -summary: set -author: Guo Huan -date: 2022-05-06 ---- - -# set - -This command is used to change the parameter values in the **dbmind.conf** configuration file. You can also manually modify the **dbmind.conf** configuration file. The two methods have no difference. For example, to change the value of **host** in the **TSDB** configuration item of the **dbmind.conf** file in the **confpath** directory to **127.0.0.1**, run the following command: - -``` -gs_dbmind set TSDB host 127.0.0.1 -c confpath -``` - -You can choose either of the methods to modify common parameters. The DBMind configuration file does not store plaintext passwords. If a user uses a plaintext password, the DBMind displays a message and exits. Therefore, the user can change the password in either of the following ways: - -1. Modify the **dbmind.conf** file first and run the following command to reinitialize the configuration file: - - ``` - gs_dbmind service setup --initialize -c confpath - ``` - -2. Run the **set** subcommand to set the parameters. For example: - - ``` - gs_dbmind set METADATABASE password xxxxx -c confpath - ``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This command is case sensitive to character strings. If you enter an incorrect character string, an error may occur during the command execution. The **set** subcommand involves many types of parameter values. Therefore, only the set values are preliminarily checked. You need to ensure that the entered values are correct. For example, some values must be positive integers instead of negative integers. - -## Command Reference - -You can use the **–help** option to obtain the help information about this mode. For example: - -``` -gs_dbmind set --help -``` - -``` -usage: set [-h] -c DIRECTORY section option target - -positional arguments: - section which section (case sensitive) to set - option which option to set - target the parameter target to set - -optional arguments: - -h, --help show this help message and exit - -c DIRECTORY, --conf DIRECTORY - set the directory of configuration files -``` - -**Table 1** Parameters of the set subcommand: python dbmind/ set xxx - -| Parameter | Description | Value Range | -| :-------- | :---------------------------------------- | :---------- | -| -h, –help | Help information | - | -| -c, –conf | Configuration file directory **confpath** | - | -| section | Setting area | - | -| option | Configuration item | - | -| target | Set value | - | diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/component.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/component.md deleted file mode 100644 index ea7edcb2..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/component.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: component -summary: component -author: Guo Huan -date: 2022-05-06 ---- - -# component - -This subcommand can be used to start DBMind components, including the exporter for monitoring metrics and other AI functions. It forwards the commands passed by the user through the CLI client to the corresponding components. For details about the commands of different components, see the corresponding sections of the components. - -## Command Reference - -You can use the **–help** option to obtain the help information about this mode. For example: - -``` -gs_dbmind component --help -``` - -``` -usage: component [-h] COMPONENT_NAME ... - -positional arguments: - COMPONENT_NAME choice a component to start. ['extract_log', 'forecast', 'index_advisor', 'opengauss_exporter', 'reprocessing_exporter', 'slow_query_diagnosis', 'sqldiag', 'xtuner'] - ARGS arguments for the component to start - -optional arguments: - -h, --help show this help message and exit -``` - -**Table 1** Parameters of the gs_dbmind component subcommand - -| Parameter | Description | Value Range | -| :------------- | :------------------- | :----------------------------------------------------------- | -| COMPONENT_NAME | Component name | extract_log, forecast, index_advisor, opengauss_exporter, reprocessing_exporter, slow_query_diagnosis, sqldiag, xtuner | -| ARGS | Component parameters | Refer to the command description of the corresponding component. | -| -h, –help | Help information | - | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/dbmind-mode.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/dbmind-mode.md deleted file mode 100644 index a60f1a29..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/dbmind-mode.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: DBMind Mode -summary: DBMind Mode -author: Guo Huan -date: 2022-05-06 ---- - -# DBMind Mode - -You can run the **gs_dbmind** command to invoke all functions of AI4DB. This command provides the following basic functions: - -- Service functions: You can use the **service** subcommand to implement service related functions, including creating and initializing configuration directories, starting background services, and stopping background services. -- Invoking components: You can use the **component** subcommand to invoke components. AI4DB functions (such as index recommendation and parameter tuning) can be invoked in real time in this mode. -- Setting parameters: You can use the **set** subcommand to modify the configuration file in the configuration directory. The configuration file can also be modified by using the text editor. - -You can use the **–help** option to obtain the help information about the preceding modes. For example: - -``` -gs_dbmind --help -``` - -``` -usage: [-h] [--version] {service,set,component} ... - -MogDB DBMind: An autonomous platform for MogDB - -optional arguments: - -h, --help show this help message and exit - --version show program's version number and exit - -available subcommands: - {service,set,component} - type ' -h' for help on a specific subcommand - service send a command to DBMind to change the status of the service - set set a parameter - component pass command line arguments to each sub-component. -``` - -**Table 1** gs_dbmind options - -| Parameter | Description | Value Range | -| :--------- | :---------------------------------------------- | :---------- | -| -h, --help | Help information | - | -| --version | Version number | - | -| service | Subcommand related to service functions | - | -| component | Subcommand for invoking components | - | -| set | Subcommand for modifying the configuration file | - | - -- **[Service](service.md)** -- **[Component](component.md)** -- **[Set](set.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/service.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/service.md deleted file mode 100644 index 2f66db70..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/service.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -title: service -summary: service -author: Guo Huan -date: 2022-05-06 ---- - -# service - -This subcommand can be used to initialize the configuration directory and start and stop background tasks. - -## Initializing the Configuration Directory - -You can run the **gs_dbmind service setup** subcommand to initialize the configuration directory. This directory may contain the configuration files and logs of the DBMind. Some files in the directory are described as follows: - -- **dbmind.conf**: DBMind parameter configuration file. You can modify it using the **gs_dbmind set** command or a text editor. -- **dynamic_config.db**: DBMind service metadata stored on the local node, including algorithm hyperparameters and monitoring thresholds. This file contains DBMind service metadata and cannot be configured by users. -- **metric_map.conf**: monitoring metric mapping table, which can be used to adapt to different collection platforms. For example, in DBMind, the monitored system CPU usage is named **os_cpu_usage**, but a user-defined collection tool may name the CPU usage **my_cpu_usage_rate**. In this case, if you want DBMind to represent the CPU usage **metric my_cpu_usage_rate**, you need to modify this configuration item. That is, add the **os_cpu_usage = my_cpu_usage_rate** configuration item for mapping. For common users, you are advised to use the collection component and solution of the DBMind. In this case, you do not need to modify the configuration file. -- **logs**: This directory stores logs generated by the DBMind service. - -You can initialize the configuration directory in interactive or non-interactive mode. For example, if the name of the configuration directory to be initialized is **confpath**, perform the following operations: - -**Interactive mode** - -``` -gs_dbmind service setup -c confpath --interactive -``` - -After running the preceding command, you can configure the configuration items in interactive mode through the CLI client. - -**Non-interactive mode** - -In non-interactive mode, the initialization consists of three steps: starting configuration, modifying configuration items, and initializing configuration. In the second step, you need to manually edit the configuration file by using the text editor. The procedure is as follows: - -1. Run the following command to start the configuration: - - ``` - gs_dbmind service setup -c confpath - ``` - -2. After the preceding command is executed, the **dbmind.conf** configuration file is generated in the **confpath** directory. You need to use the text editor to manually modify the file. Related parameters are described as follows: - - ``` - # TSDB is used to specify the metric storage location of the monitored database system. Currently, only Prometheus is supported. - # The mandatory parameters are the IP address and port number of Prometheus. Other parameters (such as username, password, and SSL certificate information) are optional. - [TSDB] - name = prometheus # The type of time-series database. Options: prometheus. - host = # Address of time-series database. - port = # Port to connect to time-series database. - username = (null) # User name to connect to time-series database. - password = (null) # Password to connect to time-series database. - ssl_certfile = (null) # The certificate file for ssl connections. - ssl_keyfile = (null) # Certificate private key file. - ssl_keyfile_password = (null) # Password for ssl keyfile. - ssl_ca_file = (null) # CA certificate to validate requests. - - # METADATABASE is used to specify where the analysis results generated by DBMind are stored. - # Currently, SQLite, openGauss, and PostgreSQL databases are supported. If the openGauss database is used, pay attention to the compatibility of the Python driver psycopg2. You can use the driver provided by openGauss, or compile or modify GUC parameters for adaptation. - # Other information is about the connection to the database. Note that the user must have the permission to create the database. - [METADATABASE] - dbtype = sqlite # Database type. Options: sqlite, opengauss, postgresql. - host = # Address of meta-data database. - port = # Port to connect to meta-data database. - username = # User name to connect to meta-data database. - password = (null) # Password to connect to meta-data database. - database = # Database name to connect to meta-data database. - - # WORKER is used to specify the number of worker subprocesses that can be used by DBMind. If 0 is written, adaptation is performed, that is, CPU resources are used as much as possible. - [WORKER] - process_num = 0 # Number of worker processes on a local node. Less than or equal to zero means adaptive. - - # AGENT is used to specify the information for the DBMind to connect to the openGauss Agent. By using this agent, DBMind can obtain the real-time status of the monitored instance, improving the analysis accuracy. In addition, you can deliver some change actions to the DB instance, for example, ending a slow SQL statement (depending on whether the user configured here has sufficient permissions). - # The value of master_url is the IP address of the Agent. Because openGauss-exporter functions as the Agent, the value of master_url is the IP address of openGauss-exporter. - # In addition, openGauss-exporter supports HTTPS. Therefore, you can specify an SSL certificate based on the configuration. - [AGENT] - master_url = # The agent URL of the master node. e.g., https://127.0.0.1:9187. - username = # Username to login the monitoring database. Credential for agent. - password = # Password to login the monitoring database. Credential for agent. - ssl_certfile = (null) # The certificate file for ssl connections. - ssl_keyfile = (null) # Certificate private key file. - ssl_keyfile_password = (null) # Password for ssl keyfile. - ssl_ca_file = (null) # CA certificate to validate requests. - - # SELF-MONITORING is used to configure parameters for monitoring database instances. - # detection_interval indicates the execution frequency of the periodic check task, in seconds. - # last_detection_time indicates the length of the latest data used by each check task. - # forecasting_future_time indicates a length of a future time predicted by the time series forecast feature. - # golden_kpi indicates the monitoring metric that needs to be focused on. - # result_storage_retention indicates the maximum storage duration of diagnosis results. - [SELF-MONITORING] - detection_interval = 600 # Unit is second. The interval for performing health examination on the openGauss through monitoring metrics. - last_detection_time = 600 # Unit is second. The time for last detection. - forecasting_future_time = 3600 # Unit is second. How long the KPI in the future for forecasting. Meanwhile, this is the period for the forecast. - # The following golden_kpi of monitoring system is vital. - golden_kpi = os_cpu_usage, os_mem_usage, os_disk_usage, gaussdb_qps_by_instance # DBMind only measures and detects the golden metrics in the anomaly detection processing. - result_storage_retention = 604800 # Unit is second. How long should the results retain? If retention is more than the threshold, DBMind will delete them. - - # SELF-OPTIMIZATION is used to modify the following parameters to intervene the DBMind optimization result. Generally, the default values are used. - # optimization_interval: interval for executing optimization tasks. - # max_reserved_period: maximum storage duration of optimization results. - # max_index_num: upper limit of the recommended index result. - # max_index_storage: upper limit of the disk space occupied by the recommended index page. - # max_template_num: maximum number of SQL statements recorded in the SQL template recommended for the index. - # kill_slow_query: determines whether to enable automatic scanning and killing of slow SQL statements. If this function is enabled, you can run the set subcommand to set the threshold, for example, 70 seconds. The value must be a positive integer, in seconds. - # gs_dbmind set slow_sql_threshold max_elapsed_time 70 - [SELF-OPTIMIZATION] - optimization_interval = 86400 # Unit is second. The interval for generating report. - max_reserved_period = 100 # Unit is day. Maximum retention time. - max_index_num = 10 # Maximum number of advised indexes. - max_index_storage = 100 # Unit is MB. - max_template_num = 5000 # Maximum number of templates. - kill_slow_query = false # Whether to actively check and kill slow query. The default elapsed time of a slow query to be killed is 1 minute. - - # LOG indicates the DBMind log information. - [LOG] - maxbytes = 10485760 # Default is 10Mb. Maximum size of a single log file. If maxbytes is zero, the file grows indefinitely. - backupcount = 1 # Number of backups of log files. - level = INFO # Options: DEBUG, INFO, WARNING, ERROR. - - # The following information is displayed when you perform interactive configuration. You do not need to configure it. - [COMMENT] - worker = The form of executing compute-intensive tasks. Tasks can be executed locally or distributed to multiple nodes for execution. - tsdb = Configure the data source for time series data, which come from monitoring the openGauss instance. - metadatabase = Configure the database to record meta-data, which the database can store meta-data for the forecasting and diagnosis process. The database should be an openGauss instance. - self-monitoring = Set up parameters for monitoring and diagnosing openGauss instance. - self-optimization = Set up parameters for openGauss optimization. - ``` - -3. After manually modifying the preceding parameters, initialize the configuration items. In this phase, DBMind preliminarily checks the correctness of configuration items, initializes the structure and content of the metadata database table for storing result data, and encrypts the plaintext passwords in the configuration items. - - ``` - gs_dbmind service setup --initialize -c confpath - ``` - -4. Start the DBMind background service based on the configuration directory. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> 1. The comments in the configuration file are used to prompt users in interactive mode. Do not manually modify or delete the comments. -> 2. Make sure that the value of the configuration item and the comment are separated by a space. Otherwise, the system regards the comment as the value of the configuration item. -> 3. If special characters in a configuration item need to be escaped, use the percent sign ( %) to escape the special characters. For example, if the password is **password %**, use the percent sign ( %) to escape the special characters, that is, **password %%**. - -## Starting a Service - -After the configuration directory is initialized, you can start the DBMind background service based on the configuration directory. For example, if the configuration directory is **confpath**, run the following command: - -``` -gs_dbmind service start -c confpath -``` - -After the preceding command is executed, the system displays a message indicating that the service has been started. If no additional parameter is specified, this command starts all background tasks by default. If you want to start only one background task, add the **–only-run** option. For example, if you only want to start the slow SQL root cause analysis service, run the following command: - -``` -gs_dbmind service start -c confpath --only-run slow_query_diagnosis -``` - -## Stopping a Service - -Similar to starting a service, stopping a service has a simpler command line structure. You only need to specify the address of the configuration directory. For example, if the configuration directory is **confpath**, run the following command: - -``` -gs_dbmind service stop -c confpath -``` - -The DBMind service automatically exits after the running task is complete in the background. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** -> -> - The metabase user in **[METADATABASE]** must have the permission to create tables and insert and update data in the database. Otherwise, an exception will occur during tool execution. -> - Currently, multiple services cannot be started separately using the same configuration file. -> - The tool provides the **requirement.txt** file. You can use this file to install required third-party dependencies. - -## Command Reference - -You can use the **–help** option to obtain the help information about this mode. For example: - -``` -gs_dbmind service --help -``` - -``` -usage: service [-h] -c DIRECTORY [--only-run {slow_query_diagnosis,forecast}] [--interactive | --initialize] {setup,start,stop} - -positional arguments: - {setup,start,stop} - perform an action for service - -optional arguments: - -h, --help show this help message and exit - -c DIRECTORY, --conf DIRECTORY - set the directory of configuration files - --only-run {slow_query_diagnosis,forecast} - explicitly set a certain task running in the backend - --interactive configure and initialize with interactive mode - --initialize initialize and check configurations after configuring. -``` - -**Table 1** Parameters of the gs_dbmind service subcommand - -| Parameter | Description | Value Range | -| :------------ | :----------------------------------------- | :----------------------------------------------------------- | -| action | Action parameter | - setup: initializes configuration directory.
- start: starts a service.
- stop: stops a service. | -| -c,--conf | Configuration file directory | - | -| --initialize | Initializes configuration parameters. | - | -| --interactive | Configures parameters in interactive mode. | - | -| --only-run | Selects the module to be run only. | - forecast: prediction module.
- slow_query_diagnosis: root cause analysis module for slow SQL statements. | -| -h, --help | Help information | - | diff --git a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/set.md b/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/set.md deleted file mode 100644 index 7bf0bb61..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/ai4db/dbmind-mode/set.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: set -summary: set -author: Guo Huan -date: 2022-05-06 ---- - -# set - -This command is used to change the parameter values in the **dbmind.conf** configuration file. You can also manually modify the **dbmind.conf** configuration file. The two methods have no difference. For example, to change the value of **host** in the **TSDB** configuration item of the **dbmind.conf** file in the **confpath** directory to **127.0.0.1**, run the following command: - -``` -gs_dbmind set TSDB host 127.0.0.1 -c confpath -``` - -You can choose either of the methods to modify common parameters. The DBMind configuration file does not store plaintext passwords. If a user uses a plaintext password, the DBMind displays a message and exits. Therefore, the user can change the password in either of the following ways: - -1. Modify the **dbmind.conf** file first and run the following command to reinitialize the configuration file: - - ``` - gs_dbmind service setup --initialize -c confpath - ``` - -2. Run the **set** subcommand to set the parameters. For example: - - ``` - gs_dbmind set METADATABASE password xxxxx -c confpath - ``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** This command is case sensitive to character strings. If you enter an incorrect character string, an error may occur during the command execution. The **set** subcommand involves many types of parameter values. Therefore, only the set values are preliminarily checked. You need to ensure that the entered values are correct. For example, some values must be positive integers instead of negative integers. - -## Command Reference - -You can use the **–help** option to obtain the help information about this mode. For example: - -``` -gs_dbmind set --help -``` - -``` -usage: set [-h] -c DIRECTORY section option target - -positional arguments: - section which section (case sensitive) to set - option which option to set - target the parameter target to set - -optional arguments: - -h, --help show this help message and exit - -c DIRECTORY, --conf DIRECTORY - set the directory of configuration files -``` - -**Table 1** Parameters of the set subcommand: python dbmind/ set xxx - -| Parameter | Description | Value Range | -| :-------- | :---------------------------------------- | :---------- | -| -h, –help | Help information | - | -| -c, –conf | Configuration file directory **confpath** | - | -| section | Setting area | - | -| option | Configuration item | - | -| target | Set value | - | diff --git a/product/en/docs-mogdb/v5.2/AI-features/db4ai/db4ai.md b/product/en/docs-mogdb/v5.2/AI-features/db4ai/db4ai.md deleted file mode 100644 index 4bf0faca..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/db4ai/db4ai.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: DB4AI Database-driven AI -summary: DB4AI Database-driven AI -author: Guo Huan -date: 2022-05-06 ---- - -# DB4AI Database-driven AI - -DB4AI uses database capabilities to drive AI tasks and implement data storage and technology stack isomorphism. By integrating AI algorithms into the database, MogDB supports the native AI computing engine, model management, AI operators, and native AI execution plan, providing users with inclusive AI technologies. Different from the traditional AI modeling process, DB4AI one-stop modeling eliminates repeated data flowing among different platforms, simplifies the development process, and plans the optimal execution path through the database, so that developers can focus on the tuning of specific services and models. It outcompetes similar products in ease-of-use and performance. - -- **[Native DB4AI Engine](native-db4ai-engine.md)** -- **[Full-process AI](./full-process-ai/full-process-ai.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/db4ai-query-for-model-training-and-prediction.md b/product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/db4ai-query-for-model-training-and-prediction.md deleted file mode 100644 index 0c3bfad0..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/db4ai-query-for-model-training-and-prediction.md +++ /dev/null @@ -1,310 +0,0 @@ ---- -title: DB4AI-Query -summary: DB4AI-Query -author: zhang cuiping -date: 2023-04-07 ---- - -# DB4AI-Query for Model Training and Prediction - -The current version of MogDB supports the native DB4AI capability. By introducing native AI operators, MogDB simplifies the operation process and fully utilizes the optimization and execution capabilities of the database optimizer and executor to obtain the high-performance model training capability in the database. With a simpler model training and prediction process and higher performance, developers can focus on model tuning and data analysis in a shorter period of time, avoiding fragmented technology stacks and redundant code implementation. - -## Keyword Parsing - -**Table 1** DB4AI syntax and keywords - -| Name | Description | -| ------------ | ---------------------------------------------------------- | -| CREATE MODEL | Creates a model, trains it, and saves the model. | -| PREDICT BY | Uses an existing model for prediction. | -| TARGET | Target column name of a training or prediction task. | -| FEATURES | Data feature column name of a training or prediction task. | -| MODEL | Model name of a training task. | - -## Developer Guide - -1. Introduce the algorithms supported in this version. - - DB4AI of the current version supports logistic regression (binary classification tasks), linear regression, and vector machine algorithms (classification tasks) based on the SGD operator, as well as the K-Means clustering algorithm based on the K-Means operator. - -2. Learn about the model training syntax. - - - CREATE MODEL - - You can run the **CREATE MODEL** statement to create and train a model. Taking dataset **kmeans_2d** as an example, the data content of the table is as follows: - - ``` - MogDB=# select * from kmeans_2d; - id | position - ----+------------------------------------- - 1 | {74.5268815685995,88.2141939294524} - 2 | {70.9565760521218,98.8114827475511} - 3 | {76.2756086327136,23.8387574302033} - 4 | {17.8495847294107,81.8449544720352} - 5 | {81.2175785354339,57.1677675866522} - 6 | {53.97752255667,49.3158342130482} - 7 | {93.2475341879763,86.934042100329} - 8 | {72.7659293473698,19.7020415100269} - 9 | {16.5800288529135,75.7475957670249} - 10 | {81.8520747194998,40.3476078575477} - 11 | {76.796671198681,86.3827232690528} - 12 | {59.9231450678781,90.9907738864422} - 13 | {70.161884885747,19.7427458665334} - 14 | {11.1269539105706,70.9988166182302} - 15 | {80.5005071521737,65.2822235273197} - 16 | {54.7030725912191,52.151339428965} - 17 | {103.059707058128,80.8419883321039} - 18 | {85.3574452036992,14.9910179991275} - 19 | {28.6501615960151,76.6922890325077} - 20 | {69.7285806713626,49.5416352967732} - (20 rows) - ``` - - The data type of the **position** field in this table is double precision[]. - - - The following uses K-Means as an example to describe how to train a model. Specify **position** as a feature column in the **kmeans_2d** training set, and use the K-Means algorithm to create and save the **point_kmeans** model. - - ``` - MogDB=# CREATE MODEL point_kmeans USING kmeans FEATURES position FROM kmeans_2d WITH num_centroids=3; - NOTICE: Hyperparameter max_iterations takes value DEFAULT (10) - NOTICE: Hyperparameter num_centroids takes value 3 - NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000010) - NOTICE: Hyperparameter batch_size takes value DEFAULT (10) - NOTICE: Hyperparameter num_features takes value DEFAULT (2) - NOTICE: Hyperparameter distance_function takes value DEFAULT (L2_Squared) - NOTICE: Hyperparameter seeding_function takes value DEFAULT (Random++) - NOTICE: Hyperparameter verbose takes value DEFAULT (0) - NOTICE: Hyperparameter seed takes value DEFAULT (0) - MODEL CREATED. PROCESSED 1 - ``` - - In the preceding command: - - - The **CREATE MODEL** statement is used to train and save a model. - - - **USING** specifies the algorithm name. - - - **FEATURES** specifies the features of the training model and needs to be added based on the column name of the training data table. - - - **TARGET** specifies the training target of the model. It can be the column name of the data table required for training or an expression, for example, **price > 10000**. - - - **WITH** specifies the hyperparameters used for model training. When the hyperparameter is not set by the user, the framework uses the default value. - - The framework supports various hyperparameter combinations for different operators. - - **Table 2** Hyperparameters supported by operators - - | Operator | Hyperparameter | - | :----------------------------------------------------------- | :----------------------------------------------------------- | - | GD(logistic_regression, linear_regression, and svm_classification) | optimizer(char*), verbose(bool), max_iterations(int), max_seconds(double), batch_size(int), learning_rate(double), decay(double), and tolerance(double)SVM limits the hyperparameter **lambda(double)**. | - | K-Means | max_iterations(int), num_centroids(int), tolerance(double), batch_size(int), num_features(int), distance_function(char*), seeding_function(char*), verbose(int), and seed(int) | - - The default value and value range of each hyperparameter are as follows: - - **Table 3** Default values and value ranges of hyperparameters - - | Operator | Default Hyperparameter Value | Value Range | Hyperparameter Description | - | :----------------------------------------------------------- | :------------------------------------------ | :------------------------------------------- | :------------------------- | - | GD (logistic_regression, linear_regression, and svm_classification) | optimizer = gd (gradient descent) | **gd** or **ngd** (natural gradient descent) | Optimizer | - | verbose = false | **T** or **F** | Log display | | - | max_iterations = 100 | (0,INT_MAX_VALUE] | Maximum iterations | | - | max_seconds = 0 (The running duration is not limited.) | [0,INT_MAX_VALUE] | Running duration | | - | batch_size = 1000 | (0,MAX_MEMORY_LIMIT] | Number of data records selected per training | | - | learning_rate = 0.8 | (0,DOUBLE_MAX_VALUE] | Learning rate | | - | decay = 0.95 | (0,DOUBLE_MAX_VALUE] | Weight decay rate | | - | tolerance = 0.0005 | (0,DOUBLE_MAX_VALUE] | Tolerance | | - | seed = 0 (random value of **seed**) | [0,INT_MAX_VALUE] | Seed | | - | just for SVM:lambda = 0.01 | (0,DOUBLE_MAX_VALUE) | Regularization parameter | | - | K-Means | max_iterations = 10 | [1,INT_MAX_VALUE] | Maximum iterations | - | num_centroids = 10 | [1,MAX_MEMORY_LIMIT] | Number of clusters | | - | tolerance = 0.00001 | (0,1) | Central point error | | - | batch_size = 10 | [1,MAX_MEMORY_LIMIT] | Number of data records selected per training | | - | num_features = 2 | [1,GS_MAX_COLS] | Number of sample features | | - | distance_function = "L2_Squared" | **L1**, **L2**, **L2_Squared**, or **Linf** | Regularization method | | - | seeding_function = "Random++" | **"Random++"** or **"KMeans\|\|"** | Method for initializing seed points | | - | verbose = 0U | {0,1,2} | Verbose mode | | - | seed = 0U | [0,INT_MAX_VALUE] | Seed | | - | MAX_MEMORY_LIMIT = Maximum number of tuples loaded in memory | | | | - | GS_MAX_COLS = Maximum number of attributes in a database table | | | | - - - If the model is saved successfully, the following information is returned: - - ``` - MODEL CREATED. PROCESSED x - ``` - -3. View the model information. - - After the training is complete, the model is stored in the **gs_model_warehouse** system catalog. You can view information about the model and training process in the **gs_model_warehouse** system catalog. - - You can view a model by viewing the system catalog. For example, run the following SQL statement to view the model named **point_kmeans**: - - ``` - MogDB=# select * from gs_model_warehouse where modelname='point_kmeans'; - -[ RECORD 1 ]---------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - modelname | point_kmeans - modelowner | 10 - createtime | 2021-04-30 17:30:39.59044 - processedtuples | 20 - discardedtuples | 0 - pre_process_time | 6.2001e-05 - exec_time | .000185272 - iterations | 5 - outputtype | 23 - modeltype | kmeans - query | CREATE MODEL point_kmeans USING kmeans FEATURES position FROM kmeans_2d WITH num_centroids=3; - modeldata | - weight | - hyperparametersnames | {max_iterations,num_centroids,tolerance,batch_size,num_features,distance_function,seeding_function,verbose,seed} - hyperparametersvalues | {10,3,1e-05,10,2,L2_Squared,Random++,0,0} - hyperparametersoids | {23,23,701,23,23,1043,1043,23,23} - coefnames | {original_num_centroids,actual_num_centroids,dimension,distance_function_id,seed,coordinates} - coefvalues | {3,3,2,2,572368998,"(77.282589,23.724434)(74.421616,73.239455)(18.551682,76.320914)"} - coefoids | - trainingscoresname | - trainingscoresvalue | - modeldescribe | {"id:1,objective_function:542.851169,avg_distance_to_centroid:108.570234,min_distance_to_centroid:1.027078,max_distance_to_centroid:297.210108,std_dev_distance_to_centroid:105.053257,cluster_size:5","id:2,objective_function:5825.982139,avg_distance_to_centroid:529.634740,min_distance_to_centroid:100.270449,max_distance_to_centroid:990.300588,std_dev_distance_to_centroid:285.915094,cluster_size:11","id:3,objective_function:220.792591,avg_distance_to_centroid:55.198148,min_distance_to_centroid:4.216111,max_distance_to_centroid:102.117204,std_dev_distance_to_centroid:39.319118,cluster_size:4"} - ``` - -4. Use an existing model to perform a prediction task. - - Use the **SELECT** and **PREDICT BY** keywords to complete the prediction task based on the existing model. - - Query syntax: SELECT… PREDICT BY… (FEATURES…)… FROM…; - - ``` - MogDB=# SELECT id, PREDICT BY point_kmeans (FEATURES position) as pos FROM (select * from kmeans_2d limit 10); - id | pos - ----+----- - 1 | 2 - 2 | 2 - 3 | 1 - 4 | 3 - 5 | 2 - 6 | 2 - 7 | 2 - 8 | 1 - 9 | 3 - 10 | 1 - (10 rows) - ``` - - For the same prediction task, the results of the same model are stable. In addition, models trained based on the same hyperparameter and training set are stable. AI model training is random (random gradient descent of data distribution each batch). Therefore, the computing performance and results of different models can vary slightly. - -5. View the execution plan. - - You can use the **EXPLAIN** statement to analyze the execution plan in the model training or prediction process of **CREATE MODEL** and **PREDICT BY**. The keyword **EXPLAIN** can be followed by a **CREATE MODEL** or **PREDICT BY** clause or an optional parameter. The supported parameters are as follows: - - **Table 4** Parameters supported by EXPLAIN - - | Parameter | Description | - | :-------- | :----------------------------------------------------------- | - | ANALYZE | Boolean variable, which is used to add description information such as the running time and number of loop times | - | VERBOSE | Boolean variable, which determines whether to output the training running information to the client | - | COSTS | Boolean variable | - | CPU | Boolean variable | - | DETAIL | Boolean variable, which is available only in distributed mode | - | NODES | Boolean variable, which is available only in distributed mode | - | NUM_NODES | Boolean variable, which is available only in distributed mode | - | BUFFERS | Boolean variable | - | TIMING | Boolean variable | - | PLAN | Boolean variable | - | FORMAT | Optional format type: TEXT, XML, JSON, and YAML | - - Example: - - ``` - MogDB=# Explain CREATE MODEL patient_logisitic_regression USING logistic_regression FEATURES second_attack, treatment TARGET trait_anxiety > 50 FROM patients WITH batch_size=10, learning_rate = 0.05; - NOTICE: Hyperparameter batch_size takes value 10 - NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) - NOTICE: Hyperparameter learning_rate takes value 0.050000 - NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) - NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) - NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) - NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) - NOTICE: Hyperparameter seed takes value DEFAULT (0) - NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) - NOTICE: GD shuffle cache size 212369 - QUERY PLAN - ------------------------------------------------------------------- - Gradient Descent (cost=0.00..0.00 rows=0 width=0) - -> Seq Scan on patients (cost=0.00..32.20 rows=1776 width=12) - (2 rows) - ``` - -6. Perform troubleshooting in case of exceptions. - - - Training phase - - - Scenario 1: When the value of the hyperparameter exceeds the value range, the model training fails and an error message is returned. For example: - - ``` - MogDB=# CREATE MODEL patient_linear_regression USING linear_regression FEATURES second_attack,treatment TARGET trait_anxiety FROM patients WITH optimizer='aa'; - NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) - NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) - NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) - NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) - NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) - NOTICE: Hyperparameter optimizer takes value aa - ERROR: Invalid hyperparameter value for optimizer. Valid values are: gd, ngd. (default is gd) - ``` - - - Scenario 2: If the model name already exists, the model fails to be saved, and an error message with the cause is displayed: - - ``` - MogDB=# CREATE MODEL patient_linear_regression USING linear_regression FEATURES second_attack,treatment TARGET trait_anxiety FROM patients; - NOTICE: Hyperparameter batch_size takes value DEFAULT (1000) - NOTICE: Hyperparameter decay takes value DEFAULT (0.950000) - NOTICE: Hyperparameter learning_rate takes value DEFAULT (0.800000) - NOTICE: Hyperparameter max_iterations takes value DEFAULT (100) - NOTICE: Hyperparameter max_seconds takes value DEFAULT (0) - NOTICE: Hyperparameter optimizer takes value DEFAULT (gd) - NOTICE: Hyperparameter tolerance takes value DEFAULT (0.000500) - NOTICE: Hyperparameter seed takes value DEFAULT (0) - NOTICE: Hyperparameter verbose takes value DEFAULT (FALSE) - NOTICE: GD shuffle cache size 5502 - ERROR: The model name "patient_linear_regression" already exists in gs_model_warehouse. - ``` - - - Scenario 3: If the value in the **FEATURE** or **TARGETS** column is \*, **ERROR** is returned with the error cause: - - ``` - MogDB=# CREATE MODEL patient_linear_regression USING linear_regression FEATURES * TARGET trait_anxiety FROM - patients; - ERROR: FEATURES clause cannot be * - ----------------------------------------------------------------------------------------------------------------------- - MogDB=# CREATE MODEL patient_linear_regression USING linear_regression FEATURES second_attack,treatment TARGET * FROM patients; - ERROR: TARGET clause cannot be * - ``` - - - Scenario 4: If the keyword **TARGET** is used in the unsupervised learning method or is not applicable to the supervised learning method, **ERROR** is returned with the error cause: - - ``` - MogDB=# CREATE MODEL patient_linear_regression USING linear_regression FEATURES second_attack,treatment FROM patients; - ERROR: Supervised ML algorithms require TARGET clause - ----------------------------------------------------------------------------------------------------------------------------- - CREATE MODEL patient_linear_regression USING linear_regression TARGET trait_anxiety FROM patients; ERROR: Supervised ML algorithms require FEATURES clause - ``` - - - Scenario 5: If the GUC parameter **statement_timeout** is set, the statement that is executed due to training timeout will be terminated. In this case, execute the **CREATE MODEL** statement. Parameters such as the size of the training set, number of training rounds (**iteration**), early termination conditions (**tolerance** and **max_seconds**), and number of parallel threads (**nthread**) affect the training duration. When the duration exceeds the database limit, the statement execution is terminated and model training fails. - - - Prediction phase - - - Scenario 6: If the model name cannot be found in the system catalog, the database reports **ERROR**: - - ``` - MogDB=# select id, PREDICT BY patient_logistic_regression (FEATURES second_attack,treatment) FROM patients; - ERROR: There is no model called "patient_logistic_regression". - ``` - - - Scenario 7: If the data dimension and data type of the **FEATURES** task are inconsistent with those of the training set, **ERROR** is reported and the error cause is displayed. For example: - - ``` - MogDB=# select id, PREDICT BY patient_linear_regression (FEATURES second_attack) FROM patients; - ERROR: Invalid number of features for prediction, provided 1, expected 2 - CONTEXT: referenced column: patient_linear_regression_pred - ------------------------------------------------------------------------------------------------------------------------------------- - MogDB=# select id, PREDICT BY patient_linear_regression (FEATURES 1,second_attack,treatment) FROM patients; - ERROR: Invalid number of features for prediction, provided 3, expected 2 - CONTEXT: referenced column: patient_linear_regression_pre - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/db4ai-snapshots-for-data-version-management.md b/product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/db4ai-snapshots-for-data-version-management.md deleted file mode 100644 index 43fb5f39..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/db4ai-snapshots-for-data-version-management.md +++ /dev/null @@ -1,266 +0,0 @@ ---- -title: DB4AI-Snapshots for Data Version Management -summary: DB4AI-Snapshots for Data Version Management -author: Guo Huan -date: 2022-05-06 ---- - -# DB4AI-Snapshots for Data Version Management - -DB4AI-Snapshots is used by the DB4AI module to manage dataset versions. With the DB4AI-Snapshots component, developers can easily and quickly perform data preprocessing operations such as feature filtering and type conversion. In addition, developers can perform version control on training datasets like Git. After a data table snapshot is created, it can be used as a view. However, once the data table snapshot is released, it is fixed as static data. To modify the content of this data table snapshot, you need to create a data table snapshot with a different version number. - -## Lifecycle of DB4AI-Snapshots - -DB4AI-Snapshots can be published, archived, or purged. After being published, DB4AI-Snapshots can be used. Archived DB4AI-Snapshots are in the archiving period and will not be used to train new models. Instead, old data is used to verify new models. Purged DB4AI-Snapshots will not be found in the database system. - -Note that the DB4AI-Snapshots function is used to provide unified training data for users. Team members can use the specified training data to retrain the machine learning models, facilitating collaboration between users. Therefore, the DB4AI-Snapshots feature is not supported in scenarios where user data conversion is not supported, such as **private users** and **separation of duty** (**enableSeparationOfDuty** set to **ON**). - -You can run the **CREATE SNAPSHOT** statement to create a data table snapshot. The created snapshot is in the published state by default. You can create a table snapshot in either **MSS** or **CSS** mode, which can be configured using the GUC parameter **db4ai_snapshot_mode**. For the MSS mode, it is realized by materialization algorithm where data entity of original datasets is stored. The CSS mode is implemented based on a relative calculation algorithm where incremental data information is stored. The metadata of the data table snapshot is stored in the system directory of DB4AI. You can view it in the **db4ai.snapshot** system catalog. - -You can run the **ARCHIVE SNAPSHOT** statement to mark a data table snapshot as archived, and run the **PUBLISH SNAPSHOT** statement to mark it as published again. The state of a data table snapshot is marked to help data scientists work together as a team. - -If a table snapshot is no longer useful, you can run the **PURGE SNAPSHOT** statement to permanently delete it and restore the storage space. - -## DB4AI-Snapshots Usage Guide - -1. Create a table and insert table data. - - If a data table exists in the database, you can create a data table snapshot based on the existing data table. To facilitate subsequent demonstration, create a data table named **t1** and insert test data into the table. - - ```sql - create table t1 (id int, name varchar); - insert into t1 values (1, 'zhangsan'); - insert into t1 values (2, 'lisi'); - insert into t1 values (3, 'wangwu'); - insert into t1 values (4, 'lisa'); - insert into t1 values (5, 'jack'); - ``` - - Run the following SQL statement to query the content of the collocation data table: - - ```sql - SELECT * FROM t1; - id | name - ----+---------- - 1 | zhangsan - 2 | lisi - 3 | wangwu - 4 | lisa - 5 | jack - (5 rows) - ``` - -2. Use DB4AI-Snapshots. - - - Create DB4AI-Snapshots. - - - Example 1: CREATE SNAPSHOT… AS - - In the following example, the default version separator is an at sign (@), and the default subversion separator is a period (.). You can set the separator by setting the **db4ai_snapshot_version_delimiter** and **db4ai_snapshot_version_separator** parameters. - - ```sql - create snapshot s1@1.0 comment is 'first version' as select * from t1; - schema | name - --------+-------- - public | s1@1.0 - (1 row) - ``` - - The command output indicates that a snapshot has been created for data table **s1** and the version number is **1.0**. A created data table snapshot can be queried in the same way as a common view, but cannot be updated using the **INSERT INTO** statement. For example, you can use any of the following statements to query the content of data table snapshot **s1** of version 1.0: - - ```sql - SELECT * FROM s1@1.0; - SELECT * FROM public.s1@1.0; - SELECT * FROM public . s1 @ 1.0; - id | name - ----+---------- - 1 | zhangsan - 2 | lisi - 3 | wangwu - 4 | lisa - 5 | jack - (5 rows) - ``` - - You can run the following SQL statement to modify the content of the **t1** data table: - - ```sql - UPDATE t1 SET name = 'tom' where id = 4; - insert into t1 values (6, 'john'); - insert into t1 values (7, 'tim'); - ``` - - When content of data table **t1** is retrieved again, it is found that although the content of data table **t1** has changed, the query result of data table snapshot **s1@1.0** does not change. Because data in data table **t1** has changed, to use content of the current data table as a version 2.0, you can create snapshot **s1@2.0** by using the following SQL statement: - - ```sql - create snapshot s1@2.0 as select * from t1; - ``` - - According to the foregoing example, it can be found that the data table snapshot can solidify content of a data table, to avoid instability during training of a machine learning model caused by data modification in the process, and can also avoid a lock conflict caused when multiple users access and modify the same table at the same time. - - - Example 2: CREATE SNAPSHOT… FROM - - You can run an SQL statement to inherit a created data table snapshot and generate a new data table snapshot based on the data modification. Example: - - ```sql - create snapshot s1@3.0 from @1.0 comment is 'inherits from @1.0' using (INSERT VALUES(6, 'john'), (7, 'tim'); DELETE WHERE id = 1); - schema | name - --------+-------- - public | s1@3.0 - (1 row) - ``` - - Where, @ is the data table snapshot version separator and the from clause is followed by the existing data table snapshot, in the format of @ + version number. You can add an operation keyword (such as **INSERT**, **UPDATE**, **DELETE**, or **ALTER**) after the **USING** keyword. In the **INSERT INTO** and **DELETE FROM** statements, clauses related to data table snapshot names, such as **INTO** and **FROM**, can be omitted. For details, see [AI Feature Functions](../../../reference-guide/functions-and-operators/ai-feature-functions.md). - - In the example, based on the **s1@1.0** snapshot, insert two pieces of data and delete one piece of data to generate a new snapshot **s1@3.0**. Then, retrieve **s1@3.0**. - - ```sql - SELECT * FROM s1@3.0; - id | name - ----+---------- - 2 | lisi - 3 | wangwu - 4 | lisa - 5 | jack - 6 | john - 7 | tim - (7 rows) - ``` - - - Delete the data table snapshot **SNAPSHOT**. - - ```sql - purge snapshot s1@3.0; - schema | name - --------+-------- - public | s1@3.0 - (1 row) - ``` - - At this time, no data can be retrieved from **s1@3.0**, and the records of the data table snapshot in the **db4ai.snapshot** view are cleared. Deleting the data table snapshot of this version does not affect the data table snapshots of other versions. - - - Sample from a data table snapshot. - - Example: Use the sampling rate 0.5 to extract data from snapshot **s1**. - - ```sql - sample snapshot s1@2.0 stratify by name as nick at ratio .5; - schema | name - --------+------------ - public | s1nick@2.0 - (1 row) - ``` - - You can use this function to create a training set and a test set. For example: - - ```sql - SAMPLE SNAPSHOT s1@2.0 STRATIFY BY name AS _test AT RATIO .2, AS _train AT RATIO .8 COMMENT IS 'training'; - schema | name - --------+---------------- - public | s1_test@2.0 - public | s1_train@2.0 - (2 rows) - ``` - - - Publish a data table snapshot. - - Run the following SQL statement to mark the data table snapshot **s1@2.0** as published: - - ```sql - publish snapshot s1@2.0; - schema | name - --------+-------- - public | s1@2.0 - (1 row) - ``` - - - Archive a data table snapshot. - - Run the following statement to mark the data table snapshot as archived: - - ```sql - archive snapshot s1@2.0; - schema | name - --------+-------- - public | s1@2.0 - (1 row) - ``` - - You can use the views provided by DB4AI-Snapshots to view the status of the current data table snapshot and other information. - - ```sql - select * from db4ai.snapshot; - id | parent_id | matrix_id | root_id | schema | name | owner | commands | comment | published | archived | created | row_count - ----+-----------+-----------+---------+--------+------------+--------+------------------------------------------+---------+-----------+----------+----------------------------+----------- - 1 | | | 1 | public | s1@2.0 | omm | {"select *","from t1 where id > 3",NULL} | | t | f | 2021-04-17 09:24:11.139868 | 2 - 2 | 1 | | 1 | public | s1nick@2.0 | omm | {"SAMPLE nick .5 {name}"} | | f | f | 2021-04-17 10:02:31.73923 | 0 - ``` - -3. Perform troubleshooting in case of exceptions. - - - The data table or DB4AI snapshot does not exist. - - ```sql - purge snapshot s1nick@2.0; - publish snapshot s1nick@2.0; - --------- - ERROR: snapshot public."s1nick@2.0" does not exist - CONTEXT: PL/pgSQL function db4ai.publish_snapshot(name,name) line 11 at assignment - - archive snapshot s1nick@2.0; - ---------- - ERROR: snapshot public."s1nick@2.0" does not exist - CONTEXT: PL/pgSQL function db4ai.archive_snapshot(name,name) line 11 at assignment - ``` - - - Before deleting a snapshot, ensure that other snapshots that depend on it have been deleted. - - ```sql - purge snapshot s1@1.0; - ERROR: cannot purge root snapshot 'public."s1@1.0"' having dependent snapshots - HINT: purge all dependent snapshots first - CONTEXT: referenced column: purge_snapshot_internal - SQL statement "SELECT db4ai.purge_snapshot_internal(i_schema, i_name)" - PL/pgSQL function db4ai.purge_snapshot(name,name) line 71 at PERFORM - ``` - -4. Set GUC parameters. - - - db4ai_snapshot_mode: - - There are two snapshot modes: MSS (materialized mode, used to store data entities) and CSS (computing mode, used to store incremental information). The snapshot mode can be switched between MSS and CSS. The default snapshot mode is MSS. - - - db4ai_snapshot_version_delimiter: - - Used to set the data table snapshot version separator. The at sign (@) is the default data table snapshot version separator. - - - db4ai_snapshot_version_separator - - Used to set the data table snapshot subversion separator. The period (.) is the default data table snapshot subversion separator. - -5. View the snapshot details of a data table in the DB4AI schema by using **db4ai.snapshot**. - - ```sql - MogDB=# \d db4ai.snapshot - Table "db4ai.snapshot" - Column | Type | Modifiers - -----------+-----------------------------+--------------------------- - id | bigint | - parent_id | bigint | - matrix_id | bigint | - root_id | bigint | - schema | name | not null - name | name | not null - owner | name | not null - commands | text[] | not null - comment | text | - published | boolean | not null default false - archived | boolean | not null default false - created | timestamp without time zone | default pg_systimestamp() - row_count | bigint | not null - Indexes: - "snapshot_pkey" PRIMARY KEY, btree (schema, name) TABLESPACE pg_default - "snapshot_id_key" UNIQUE CONSTRAINT, btree (id) TABLESPACE pg_default - ``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The DB4AI namespace is a private domain of this function. Functional indexes cannot be created in the DB4AI namespace. diff --git a/product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/full-process-ai.md b/product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/full-process-ai.md deleted file mode 100644 index 89167026..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/full-process-ai.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Full-process AI -summary: Full-process AI -author: Guo Huan -date: 2022-05-06 ---- - -# Full-process AI - -A traditional AI task usually has multiple processes. For example, a data collection process includes data collection, data cleaning, and data storage, an algorithm training process includes data preprocessing, training, and model storage and management, and a model training process includes hyperparameter tuning. The entire lifecycle of such machine learning models can be integrated into the database. The model training, management, and optimization processes are performed closest to the data storage. The out-of-the-box AI lifecycle management function of SQL statements is provided on the database side, which is called full-process AI. - -MogDB implements some full-process AI functions, which will be described in detail in this section. - -- **[PLPython Fenced Mode](plpython-fenced-mode.md)** -- **[DB4AI-Snapshots for Data Version Management](db4ai-snapshots-for-data-version-management.md)** -- **[DB4AI-Query for Model Training and Prediction](db4ai-query-for-model-training-and-prediction.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/plpython-fenced-mode.md b/product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/plpython-fenced-mode.md deleted file mode 100644 index 41001d99..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/db4ai/full-process-ai/plpython-fenced-mode.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: PLPython Fenced Mode -summary: PLPython Fenced Mode -author: Guo Huan -date: 2022-05-06 ---- - -# PLPython Fenced Mode - -PL/Python is added to the fenced mode, which is insecure. During database compilation, to integrate Python into the database, you can add the **–with-python** option to **configure**, or specify the Python path for installing PL/Python and add the **–with-includes='/python-dir=path'** option. - -Before starting the database, set the GUC parameter **unix_socket_directory** to specify the file address for communication between unix_socket processes. You need to create a folder in **user-set-dir-path** in advance and grant read, write, and execute permissions on the folder. - -``` -unix_socket_directory = '/user-set-dir-path' -``` - -After the configuration is complete, start the database. - -After PL/Python is added to the database compilation and the GUC parameter **unix_socket_directory** is set, the **fenced-Master** process is automatically created during database startup. If Python compilation is not performed for the database, you need to manually start the master process in fenced mode. After the GUC parameter is set, run the command to create the master process. - -Run the following command to start the **fenced-Master** process: - -``` -mogdb --fenced -k /user-set-dir-path -D /user-set-dir-path & -``` - -After the fenced mode is configured, the UDF calculation is performed in the **fenced-worker** process for the PL/Python-fenced UDF database. - -## Usage Guide - -- Create an extension. - - - When the compiled PL/Python is Python 2: - - ``` - MogDB=# create extension plpythonu; - CREATE EXTENSION - ``` - - - When the compiled PL/Python is Python 3: - - ``` - MogDB=# create extension plpython3u; - CREATE EXTENSION - ``` - - The following uses Python 2 as an example. - -- Create a PL/Python-fenced UDF database. - - ``` - MogDB=# create or replace function pymax(a int, b int) - MogDB-# returns INT - MogDB-# language plpythonu fenced - MogDB-# as $$ - MogDB$# import numpy - MogDB$# if a > b: - MogDB$# return a; - MogDB$# else: - MogDB$# return b; - MogDB$# $$; - CREATE FUNCTION - ``` - -- View UDF information. - - ``` - MogDB=# select * from pg_proc where proname='pymax'; - -[ RECORD 1 ]----+-------------- - proname | pymax - pronamespace | 2200 - proowner | 10 - prolang | 16388 - procost | 100 - prorows | 0 - provariadic | 0 - protransform | - - proisagg | f - proiswindow | f - prosecdef | f - proleakproof | f - proisstrict | f - proretset | f - provolatile | v - pronargs | 2 - pronargdefaults | 0 - prorettype | 23 - proargtypes | 23 23 - proallargtypes | - proargmodes | - proargnames | {a,b} - proargdefaults | - prosrc | - | import numpy - | if a > b: - | return a; - | else: - | return b; - | - probin | - proconfig | - proacl | - prodefaultargpos | - fencedmode | t - proshippable | f - propackage | f - prokind | f - proargsrc | - ``` - -- Run the UDF. - - - Create a data table. - - ``` - MogDB=# create table temp (a int ,b int) ; - CREATE TABLE - MogDB=# insert into temp values (1,2),(2,3),(3,4),(4,5),(5,6); - INSERT 0 5 - ``` - - - Run the UDF. - - ``` - MogDB=# select pymax(a,b) from temp; - pymax - ------- - 2 - 3 - 4 - 5 - 6 - (5 rows) - ``` diff --git a/product/en/docs-mogdb/v5.2/AI-features/db4ai/native-db4ai-engine.md b/product/en/docs-mogdb/v5.2/AI-features/db4ai/native-db4ai-engine.md deleted file mode 100644 index fcca6bd6..00000000 --- a/product/en/docs-mogdb/v5.2/AI-features/db4ai/native-db4ai-engine.md +++ /dev/null @@ -1,313 +0,0 @@ ---- -title: Native DB4AI Engine -summary: Native DB4AI Engine -author: Guo Huan -date: 2022-05-06 ---- - -# Native DB4AI Engine - -The current version of openGauss supports the native DB4AI capability. By introducing native AI operators, openGauss simplifies the operation process and fully utilizes the optimization and execution capabilities of the database optimizer and executor to obtain the high-performance model training capability in the database. With a simpler model training and prediction process and higher performance, developers can focus on model tuning and data analysis in a shorter period of time, avoiding fragmented technology stacks and redundant code implementation. - -## Keyword Parsing - -**Table 1** DB4AI syntax and keywords - -| Name | | Description | -| :------ | :----------- | ---------------------------------------------------------- | -| Syntax | CREATE MODEL | Creates a model, trains it, and saves the model. | -| | PREDICT BY | Uses an existing model for prediction. | -| | DROP MODEL | Deletes a model. | -| Keyword | TARGET | Target column name of a training or prediction task. | -| | FEATURES | Data feature column name of a training or prediction task. | -| | MODEL | Model name of a training task. | - -## Usage Guide - -1. Introduce the algorithms supported in this version. - - The DB4AI of the current version supports the following new algorithms: - - **Table 2** Supported algorithms - - | Optimization Algorithm | Algorithm | - | :--------------------- | :------------------------------ | - | GD | logistic_regression | - | | linear_regression | - | | svm_classification | - | | PCA | - | | multiclass | - | Kmeans | kmeans | - | xgboost | xgboost_regression_logistic | - | | xgboost_binary_logistic | - | | xgboost_regression_squarederror | - | | xgboost_regression_gamma | - -2. Learn about the model training syntax. - - - CREATE MODEL - - You can run the **CREATE MODEL** statement to create and train a model. This SQL statement uses the public Iris dataset for model training. - - - The following uses multiclass as an example to describe how to train a model. Specify **sepal_length**, **sepal_width**, **petal_length**, and **petal_width** as feature columns in the **tb_iris** training set, and use the multiclass algorithm to create and save the **iris_classification_model** model. - - ``` - MogDB=# CREATE MODEL iris_classification_model USING xgboost_regression_logistic FEATURES sepal_length, sepal_width,petal_length,petal_width TARGET target_type < 2 FROM tb_iris_1 WITH nthread=4, max_depth=8; - MODEL CREATED. PROCESSED 1 - ``` - - In the preceding command: - - - The **CREATE MODEL** statement is used to train and save a model. - - **USING** specifies the algorithm name. - - **FEATURES** specifies the features of the training model and needs to be added based on the column name of the training data table. - - **TARGET** specifies the training target of the model. It can be the column name of the data table required for training or an expression, for example, **price > 10000**. - - **WITH** specifies the hyperparameters used for model training. When the hyperparameter is not set by the user, the framework uses the default value. - - The framework supports various hyperparameter combinations for different operators. - - **Table 3** Hyperparameters supported by operators - - | Operator | Hyperparameter | - | :----------------------------------------------------------- | :----------------------------------------------------------- | - | GD
(logistic_regression、linear_regression、svm_classification) | optimizer(char*), verbose(bool), max_iterations(int), max_seconds(double), batch_size(int), learning_rate(double), decay(double), and tolerance(double)* *SVM limits the hyperparameter **lambda(double)**.* | - | K-Means | max_iterations(int), num_centroids(int), tolerance(double), batch_size(int), num_features(int), distance_function(char), seeding_function(char*), verbose(int), and seed(int)* | - | GD(pca) | batch_size(int);max_iterations(int);max_seconds(int);tolerance(float8);verbose(bool);number_components(int);seed(int) | - | GD(multiclass) | classifier(char)
Note: Other hyperparameter types of multiclass depend on the categories in the selected classifier. | - | xgboost_regression_logistic、xgboost_binary_logistic、xgboost_regression_squarederror、xgboost_regression_gamma | batch_size(int);booster(char);tree_method(char);eval_metric(char);seed(int);nthread(int);max_depth(int);gamma(float8);eta(float8);min_child_weight(int);verbosity(int) | - - The default value and value range of each hyperparameter are as follows: - - **Table 4** Default values and value ranges of hyperparameters - - | Operator | Default Hyperparameter Value | Value Range | Hyperparameter Description | - | :----------------------------------------------------------- | :------------------------------------------------------ | :----------------------------------------------------------- | :----------------------------------------------------------- | - | GD:logistic_regression、linear_regression、svm_classification、pca | optimizer = gd (gradient descent) | gd/ngd (natural gradient descent) | Optimizer | - | | verbose = false | T/F | Log display | - | | max_iterations = 100 | (0, 10000] | Maximum iterations | - | | max_seconds = 0 (The running duration is not limited.) | [0,INT_MAX_VALUE] | Running duration | - | | batch_size = 1000 | (0, 1048575] | Number of data records selected per training | - | | learning_rate = 0.8 | (0, DOUBLE_MAX_VALUE] | Learning rate | - | | decay = 0.95 | (0, DOUBLE_MAX_VALUE] | Weight decay rate | - | | tolerance = 0.0005 | (0, DOUBLE_MAX_VALUE] | Tolerance | - | | seed = 0 (random value of **seed**) | [0, INT_MAX_VALUE] | Seed | - | | just for linear、SVM:kernel = “linear” | linear/gaussian/polynomial | Kernel function | - | | just for linear、SVM:components = MAX(2*features, 128) | [0, INT_MAX_VALUE] | Number of high-dimension space dimensions | - | | just for linear、SVM:gamma = 0.5 | (0, DOUBLE_MAX_VALUE] | Gaussian kernel function parameter | - | | just for linear、SVM:degree = 2 | [2, 9] | Polynomial kernel function parameter | - | | just for linear、SVM:coef0 = 1.0 | [0, DOUBLE_MAX_VALUE] | Polynomial kernel function parameter | - | | just for SVM:lambda = 0.01 | (0, DOUBLE_MAX_VALUE) | Regularization parameter | - | | just for pca: number_components | (0,INT_MAX_VALUE] | Target dimension after dimension reduction | - | GD:multiclass | classifier=“svm_classification” | svm_classification\logistic_regression | Classifier for multiclass tasks | - | Kmeans | max_iterations = 10 | [1, 10000] | Maximum iterations | - | | num_centroids = 10 | [1, 1000000] | Number of clusters | - | | tolerance = 0.00001 | (0,1] | Central point error | - | | batch_size = 10 | [1,1048575] | Number of data records selected per training | - | | num_features = 2 | [1, INT_MAX_VALUE] | Number of sample features | - | | distance_function = “L2_Squared” | L1\L2\L2_Squared\Linf | Regularization method | - | | seeding_function = “Random++” | “Random++”\“KMeans | Method for initializing seed points | - | | verbose = 0U | { 0, 1, 2 } | Verbose mode | - | | seed = 0U | [0, INT_MAX_VALUE] | Seed | - | xgboost:
xgboost_regression_logistic、xgboost_binary_logistic、xgboost_regression_gamma、xgboost_regression_squarederror | n_iter=10 | (0, 10000] | Iteration times | - | | batch_size=10000 | (0, 1048575] | Number of data records selected per training | - | | booster=“gbtree” | gbtree\gblinear\dart | Booster type | - | | tree_method=“auto” | auto\exact\approx\hist\gpu_hist
Note: To use the **gpu_hist** parameter, you must configure a GPU library. Otherwise, the DB4AI platform does not support this value. | Tree construction algorithm | - | | eval_metric=“rmse” | rmse\rmsle\map\mae\auc\aucpr | Data verification metric | - | | seed=0 | [0, 100] | Seed | - | | nthread=1 | (0, MAX_MEMORY_LIMIT] | Concurrency | - | | max_depth=5 | (0, MAX_MEMORY_LIMIT] | Maximum depth of the tree. This parameter is valid only for the tree booster. | - | | gamma=0.0 | [0, 1] | Minimum loss required for further partitioning on leaf nodes | - | | eta=0.3 | [0, 1] | Step used in the update to prevent overfitting | - | | min_child_weight=1 | [0, INT_MAX_VALUE] | Minimum sum of instance weights required by child nodes | - | | verbosity=1 | 0 (silent)\1 (warning)\2 (info)\3 (debug) | Printing level | - | MAX_MEMORY_LIMIT = Maximum number of tuples loaded in memory | | | | - | GS_MAX_COLS = Maximum number of attributes in a database table | | | | - - - If the model is saved successfully, the following information is returned: - - ``` - MODEL CREATED. PROCESSED x - ``` - -3. View the model information. - - After the training is complete, the model is stored in the **gs_model_warehouse** system catalog. You can view information about the model and training process in the **gs_model_warehouse** system catalog. - - The model details are stored in the system catalog in binary mode. You can use the **gs_explain_model** function to view the model details. The statement is as follows: - - ``` - MogDB=# select * from gs_explain_model("iris_classification_model"); - DB4AI MODEL - ------------------------------------------------------------- - Name: iris_classification_model - Algorithm: xgboost_regression_logistic - Query: CREATE MODEL iris_classification_model - USING xgboost_regression_logistic - FEATURES sepal_length, sepal_width,petal_length,petal_width - TARGET target_type < 2 - FROM tb_iris_1 - WITH nthread=4, max_depth=8; - Return type: Float64 - Pre-processing time: 0.000000 - Execution time: 0.001443 - Processed tuples: 78 - Discarded tuples: 0 - n_iter: 10 - batch_size: 10000 - max_depth: 8 - min_child_weight: 1 - gamma: 0.0000000000 - eta: 0.3000000000 - nthread: 4 - verbosity: 1 - seed: 0 - booster: gbtree - tree_method: auto - eval_metric: rmse - rmse: 0.2648450136 - model size: 4613 - ``` - -4. Use an existing model to perform a prediction task. - - Use the **SELECT** and **PREDICT BY** keywords to complete the prediction task based on the existing model. - - Query syntax: SELECT… PREDICT BY… (FEATURES…)… FROM…; - - ``` - MogDB=# SELECT id, PREDICT BY iris_classification (FEATURES sepal_length,sepal_width,petal_length,petal_width) as "PREDICT" FROM tb_iris limit 3; - - id | PREDICT - -----+--------- - 84 | 2 - 85 | 0 - 86 | 0 - (3 rows) - ``` - - For the same prediction task, the results of the same model are stable. In addition, models trained based on the same hyperparameter and training set are stable. AI model training is random (random gradient descent of data distribution each batch). Therefore, the computing performance and results of different models can vary slightly. - -5. View the execution plan. - - You can use the **EXPLAIN** statement to analyze the execution plan in the model training or prediction process of **CREATE MODEL** and **PREDICT BY**. The keyword **EXPLAIN** can be followed by a **CREATE MODEL** or **PREDICT BY** clause or an optional parameter. The supported parameters are as follows: - - **Table 5** Parameters supported by EXPLAIN - - | Parameter | Description | - | :-------- | :----------------------------------------------------------- | - | ANALYZE | Boolean variable, which is used to add description information such as the running time and number of loop times | - | VERBOSE | Boolean variable, which determines whether to output the training running information to the client | - | COSTS | Boolean variable | - | CPU | Boolean variable | - | DETAIL | Boolean variable, which is unavailable | - | NODES | Boolean variable, which is unavailable | - | NUM_NODES | Boolean variable, which is unavailable | - | BUFFERS | Boolean variable | - | TIMING | Boolean variable | - | PLAN | Boolean variable | - | FORMAT | Optional format type: TEXT, XML, JSON, and YAML | - - Example: - - ``` - MogDB=# Explain CREATE MODEL patient_logisitic_regression USING logistic_regression FEATURES second_attack, treatment TARGET trait_anxiety > 50 FROM patients WITH batch_size=10, learning_rate = 0.05; - QUERY PLAN - ------------------------------------------------------------------------- - Train Model - logistic_regression (cost=0.00..0.00 rows=0 width=0) - -> Materialize (cost=0.00..41.08 rows=1776 width=12) - -> Seq Scan on patients (cost=0.00..32.20 rows=1776 width=12) - (3 rows) - ``` - -6. Perform troubleshooting in case of exceptions. - - - Training phase - - - Scenario 1: When the value of the hyperparameter exceeds the value range, the model training fails and an error message is returned. For example: - - ``` - MogDB=# CREATE MODEL patient_linear_regression USING linear_regression FEATURES second_attack,treatment TARGET trait_anxiety FROM patients WITH optimizer='aa'; - ERROR: Invalid hyperparameter value for optimizer. Valid values are: gd, ngd. - ``` - - - Scenario 2: If the model name already exists, the model fails to be saved, and an error message with the cause is displayed. For example: - - ``` - MogDB=# CREATE MODEL patient_linear_regression USING linear_regression FEATURES second_attack,treatment TARGET trait_anxiety FROM patients; - ERROR: The model name "patient_linear_regression" already exists in gs_model_warehouse. - ``` - - - Scenario 3: If the value in the **FEATURE** or **TARGETS** column is *****, an error message with the cause is displayed. For example: - - ``` - MogDB=# CREATE MODEL patient_linear_regression USING linear_regression FEATURES * TARGET trait_anxiety FROM patients; - ERROR: FEATURES clause cannot be * - ----------------------------------------------------------------------------------------------------------------------- - MogDB=# CREATE MODEL patient_linear_regression USING linear_regression FEATURES second_attack,treatment TARGET * FROM patients; - ERROR: TARGET clause cannot be * - ``` - - - Scenario 4: If the keyword **TARGET** is used in the unsupervised learning method or is not applicable to the supervised learning method, an error message with the cause is displayed. For example: - - ``` - MogDB=# CREATE MODEL patient_linear_regression USING linear_regression FEATURES second_attack,treatment FROM patients; - ERROR: Supervised ML algorithms require TARGET clause - ----------------------------------------------------------------------------------------------------------------------------- - CREATE MODEL patient_linear_regression USING linear_regression TARGET trait_anxiety FROM patients; - ERROR: Supervised ML algorithms require FEATURES clause - ``` - - - Scenario 5: If there is only one category in the **TARGET** column, an error message with the cause is displayed. For example: - - ``` - MogDB=# CREATE MODEL ecoli_svmc USING multiclass FEATURES f1, f2, f3, f4, f5, f6, f7 TARGET cat FROM (SELECT * FROM db4ai_ecoli WHERE cat='cp'); - ERROR: At least two categories are needed - ``` - - - Scenario 6: DB4AI filters out data that contains null values during training. When the model data involved in training is null, an error message with the cause is displayed. For example: - - ``` - MogDB=# create model iris_classification_model using xgboost_regression_logistic features message_regular target error_level from error_code; - ERROR: Training data is empty, please check the input data. - ``` - - - Scenario 7: The DB4AI algorithm has restrictions on the supported data types. If the data type is not in the whitelist, an error message is returned and the invalid OID is displayed. You can check the OID in **pg_type** to determine the invalid data type. For example: - - ``` - MogDB=# CREATE MODEL ecoli_svmc USING multiclass FEATURES f1, f2, f3, f4, f5, f6, f7, cat TARGET cat FROM db4ai_ecoli ; - ERROR: Oid type 1043 not yet supported - ``` - - - Scenario 8: If the GUC parameter **statement_timeout** is set, the statement that is executed due to training timeout will be terminated. In this case, execute the **CREATE MODEL** statement. Parameters such as the size of the training set, number of training rounds (**iteration**), early termination conditions (**tolerance** and **max_seconds**), and number of parallel threads (**nthread**) affect the training duration. When the duration exceeds the database limit, the statement execution is terminated and model training fails. - - - Model parsing - - - Scenario 9: If the model name cannot be found in the system catalog, an error message with the cause is displayed. For example: - - ``` - MogDB=# select gs_explain_model("ecoli_svmc"); - ERROR: column "ecoli_svmc" does not exist - ``` - - - Prediction phase - - - Scenario 10: If the model name cannot be found in the system catalog, an error message with the cause is displayed. For example: - - ``` - MogDB=# select id, PREDICT BY patient_logistic_regression (FEATURES second_attack,treatment) FROM patients; - ERROR: There is no model called "patient_logistic_regression". - ``` - - - Scenario 11: If the data dimension and data type of the **FEATURES** task are inconsistent with those of the training set, an error message with the cause is displayed. For example: - - ``` - MogDB=# select id, PREDICT BY patient_linear_regression (FEATURES second_attack) FROM patients; - ERROR: Invalid number of features for prediction, provided 1, expected 2 - CONTEXT: referenced column: patient_linear_regression_pred - ------------------------------------------------------------------------------------------------------------------------------------- - MogDB=# select id, PREDICT BY patient_linear_regression (FEATURES 1,second_attack,treatment) FROM patients; - ERROR: Invalid number of features for prediction, provided 3, expected 2 - CONTEXT: referenced column: patient_linear_regression_pre - ``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The DB4AI feature requires data access for computing and is not applicable to encrypted databases. diff --git a/product/en/docs-mogdb/v5.2/_index.md b/product/en/docs-mogdb/v5.2/_index.md deleted file mode 100644 index 176d62e2..00000000 --- a/product/en/docs-mogdb/v5.2/_index.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Introduction -summary: SELECT -author: Zhang Cuiping -date: 2022-08-07 ---- - - - - - -[MogDB Introduction](./overview.md) - -[Characteristic Description](./characteristic-description/characteristic-description-overview.md) - -[Release Notes](./about-mogdb/mogdb-new-feature/release-note.md) - - - - - -[MogDB Playground](./quick-start/mogdb-playground.md) - - - - - -[PTK-based Installation](./installation-guide/ptk-based-installation.md) - -[MogDB in Container](./installation-guide/docker-installation/docker-installation.md) - -[MogDB on Kubernetes](../../docs-mogdb-stack/v1.0/quick-start.md) - - - - - -[Development Based on JDBC](./developer-guide/dev/2-development-based-on-jdbc/3-development-process.md) - -[Data Import and Export](./administrator-guide/importing-and-exporting-data/importing-data/1-import-modes.md) - - - - - -[Routine Maintenance](./administrator-guide/routine-maintenance/0-starting-and-stopping-mogdb.md) - -[Backup and Restoration](./administrator-guide/backup-and-restoration/backup-and-restoration.md) - -[Upgrade Guide](./upgrade-guide/upgrade-guide.md) - -[Common Fault Locating Methods](./common-faults-and-identification/common-fault-locating-methods.md) - -[MogDB Stack](../../docs-mogdb-stack/v1.0/overview.md) - -[MogHA](../../docs-mogha/v2.4/overview.md) - -[MogDB Tools](./reference-guide/tool-reference/tool-overview.md) - - - - - -[System Optimization](./performance-tuning/system-tuning/optimizing-os-parameters.md) - -[SQL Optimization](./performance-tuning/sql-tuning/query-execution-process.md) - -[WDR Snapshot](./performance-tuning/wdr/wdr-snapshot-schema.md) - -[TPCC Performance Tuning Guide](./performance-tuning/TPCC-performance-tuning-guide.md) - - - - - -[PTK - Provisioning Toolkit](../../docs-ptk/v1.1/overview.md) - -[MTK - Database Migration Toolkit](../../docs-mtk/v2.0/overview.md) - -[SCA - SQL Compatibility Analyzer](../../docs-sca/v5.1/overview.md) - -[MVD - MogDB Verify Data](../../docs-mvd/v3.4/overview.md) - -[MDB - MogDB Data Bridge](../../docs-mdb/v2.0/overview.md) - -[Mogeaver - Graphical Tool](./mogeaver/mogeaver-overview.md) - - - - diff --git a/product/en/docs-mogdb/v5.2/about-mogdb/MogDB-compared-to-openGauss.md b/product/en/docs-mogdb/v5.2/about-mogdb/MogDB-compared-to-openGauss.md deleted file mode 100644 index 6a6e8e81..00000000 --- a/product/en/docs-mogdb/v5.2/about-mogdb/MogDB-compared-to-openGauss.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: Comparison Between MogDB and openGauss -summary: Comparison Between MogDB and openGauss -author: Guo Huan -date: 2021-04-02 ---- - -# Comparison Between MogDB and openGauss - -## Relationship Between MogDB and openGauss - -MogDB is an enterprise database that is packaged and improved on the basis of openGauss open source kernel and it is more friendly to enterprise applications. On the basis of openGauss kernel, MogDB adds the MogHA component for automatic management and high availability under the primary-standby architecture, which is crucial for enterprise applications. At the same time, MogDB Manager includes backup and recovery, monitoring, automatic installation and other components for enterprise-level ease of use requirements. - -MogDB is a commercial product that is sold according to an established license pricing system and is supported by Enmo's professional services. - -## Introduction to openGauss - -openGauss is an open source relational database management system. The kernel of openGauss is derived from PostgreSQL and is distributed under Mulan PSL v2. openGauss kernel is open source, anyone and any organization can download the source code for compilation and installation without any cost; The openGauss community also regularly releases compiled binary installation files, and the current release strategy is to release one stable frequently supported version per year (end of March each year) and one radical version with new features (end of September each year). - -openGauss is a standalone database. It has the basic features of relational databases as well as enhanced features. - -For more details, please visit openGauss official website: - -### Basic Features - -- Standard SQLs - - Supports SQL92, SQL99, SQL2003, and SQL2011 standards, GBK and UTF-8 character sets, SQL standard functions and analytic functions, and SQL Procedural Language. - -- Database storage management - - Supports tablespaces where different tables can be stored in different locations. - -- Primary/standby deployment - - Supports the ACID properties, single-node fault recoveries, primary/standby data synchronization, and primary/standby switchover. - -- APIs - - Supports standard JDBC 4.0 and ODBC 3.5. - -- Management tools - - Provides installation and deployment tools, instance start and stop tools, and backup and restoration tools. - -- Security management - - Supports SSL network connections, user permission management, password management, security auditing, and other functions, to ensure data security at the management, application, system, and network layers. - -### Enhanced Features - -- Data Partitioning - - Data partitioning is a general function for most database products. In openGauss, data is partitioned horizontally with a user-specified policy. This operation splits a table into multiple partitions that are not overlapped. - -- Vectorized Executor and Hybrid Row-Column Storage Engine - - In a wide table containing a huge amount of data, a query usually only involves certain columns. In this case, the query performance of the row-store engine is poor. For example, a single table containing the data of a meteorological agency has 200 to 800 columns. Among these columns, only ten of them are frequently accessed. In this case, a vectorized executor and column-store engine can significantly improve performance by saving storage space. - -- High Availability (HA) Transaction Processing - - openGauss manages transactions and guarantees the ACID properties. openGauss provides a primary/standby HA mechanism to reduce the service interruption time when the primary node is faulty. It protects key user programs and continuously provides external services, minimizing the impact of hardware, software, and human faults on services to ensure service continuity. - -- High Concurrency and High Performance - - openGauss supports 10,000 concurrent connections through server thread pools. It supports thread nucleophilicity and millions of tpmC using the NUMA-based kernel data structure, manages TB-level large memory buffers through efficient hot and cold data elimination, achieves multi-version access without read/write blocks using CSN-based snapshots, and avoids performance fluctuation caused by full-page writes using incremental checkpoints. - -- SQL Self-Diagnosis - - To locate performance issues of a query, you can use **EXPLAIN PERFORMANCE** to query its execution plan. However, this method produces many logs, requires to modify service logic, and depends on expertise to locate problems. SQL self-diagnosis enables users to locate performance issues more efficiently. - -- Equality Query in a Fully-encrypted Database - - The encrypted database allows the client to encrypt sensitive data within the client application. During the query period, the entire service data flow exists in the form of ciphertext during data processing. It has the following advantages: - - - Protects data privacy and security throughout the lifecycle on the cloud. - - Resolves trust issues by making the public cloud, consumer cloud, and development users keep their own keys. - - Enables partners to better comply with personal privacy protection laws and regulations with the help of the full encryption capability. - -- Memory Table - - With memory tables, all data access is lock-free and concurrent, optimizing data processing and meeting real-time requirements. - -- Primary/Standby Deployment - - The primary/standby deployment mode supports synchronous and asynchronous replication. Applications are deployed based on service scenarios. For synchronous replication, one primary node and two standby nodes are deployed. This ensures reliability but affects performance. For asynchronous replication, one primary node and one standby node are deployed. This has little impact on performance, but data may be lost when exceptions occur. openGauss supports automatic recovery of damaged pages. When a page on the primary node is damaged, the damaged page can be automatically recovered on the standby node. Besides, openGauss supports concurrent log recovery on the standby node to minimize the service unavailability time when the primary node is down. - - In addition, in primary/standby deployment mode, if the read function of the standby node is enabled, the standby node supports read operations instead of write operations (such as table creation, data insertion, and data deletion), reducing the pressure on the primary node. - -- AI Capabilities - - - Automatic parameter optimization - - Slow SQL discovery - - Index recommendation - - Time series prediction and exception detection - - DB4AI function - - SQL execution time prediction - - Database monitoring - -- Logical Log Replication - - In logical replication, the primary database is called the source database, and the standby database is called the target database. The source database parses the WAL file based on the specified logical parsing rules and parses the DML operations into certain logical change information (standard SQL statements). The source database sends standard SQL statements to the target database. After receiving the SQL statements, the target database applies them to implement data synchronization. Logical replication involves only DML operations. Logical replication can implement cross-version replication, heterogeneous database replication, dual-write database replication, and table-level replication. - -- Automatic WDR Performance Analysis Report - - Periodically and proactively analyzes run logs and WDR reports (which are automatically generated in the background and can be triggered by key indicator thresholds such as the CPU usage, memory usage, and long SQL statement proportions) and generates reports in HTML and PDF formats. The performance report can be automatically generated. The WDR generates a performance report between two different time points based on the system performance snapshot data at two different time points. The report is used to diagnose database kernel performance faults. - -- Incremental Backup and Restoration (beta) - - Supports full backup and incremental backup of the database, manages backup data, and views the backup status. Supports combination of incremental backups and deletion of expired backups. The database server dynamically tracks page changes, and when a relational page is updated, the page is marked for backup. The incremental backup function requires that the GUC parameter enable_cbm_tracking be enabled to allow the server to track the modification page. - -- Point-In-Time Recovery - - Point-in-time recovery (PITR) uses basic hot backup, write-ahead logs (WALs), and archived WALs for backup and recovery. Replaying a WAL record can be stopped at any point of time, so that there is a consistent snapshot of the database at any point of time. That is, you can restore the database to the state at any time since the backup starts. During recovery, you can specify a recovery stop point with a terminal ID (TID), time, and license serial number (LSN). - -## Advantages of MogDB - -openGauss is a standalone database where data is stored on a single physical node and data access tasks are pushed to service nodes. In this way, high concurrency of servers enables quick data processing. In addition, data can be copied to the standby server through log replication, ensuring high reliability and scalability. - -openGauss is a stand-alone database. To use openGauss in formal commercial projects, you need to build complete tool chain capabilities such as database monitoring and primary/standby switchover. - -At the product level, MogDB adds MogHA enterprise-class high availability components and feature-rich graphical management tool MogDB Manager to the original functions of openGauss, and continuously enhances the openGauss kernel along the established route. MogDB can maximize the high-availability deployment capabilities of multiple machine rooms, and can reach 2.5 million tpmC on 4-CPU server. MogDB Manager contains a variety of practical components, such as MogHA, MTK, PTK, MDB, SCA, MVD, etc., which greatly makes up the shortcomings of openGauss open source database and enriches various enterprise-class functions. - -At the service level, Enmo has decades of experience in database operation and maintenance, and can provide complete services to ensure a more stable database, smoother application transformation, and less risk, making up for the disadvantages of openGauss open source database human operation and maintenance shortage, while reducing maintenance costs. diff --git a/product/en/docs-mogdb/v5.2/about-mogdb/about-mogdb.md b/product/en/docs-mogdb/v5.2/about-mogdb/about-mogdb.md deleted file mode 100644 index 2ba6cb17..00000000 --- a/product/en/docs-mogdb/v5.2/about-mogdb/about-mogdb.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: About MogDB -summary: About MogDB -author: Guo Huan -date: 2023-05-22 ---- - -# About MogDB - -+ [MogDB Overview](../overview.md) -+ [Comparison Between MogDB and openGauss](MogDB-compared-to-openGauss.md) -+ [Release Notes](mogdb-new-feature/release-note.md) -+ [Open Source Components](open-source-components/open-source-components.md) -+ [Usage Limitations](usage-limitations.md) -+ [Terms of Use](terms-of-use.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/about-mogdb/mogdb-new-feature/release-note.md b/product/en/docs-mogdb/v5.2/about-mogdb/mogdb-new-feature/release-note.md deleted file mode 100644 index 81a2599a..00000000 --- a/product/en/docs-mogdb/v5.2/about-mogdb/mogdb-new-feature/release-note.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Release Notes -summary: Release Notes -author: Guo Huan -date: 2022-09-27 ---- - -# Release Notes - -| Version | Release Date | Overview | -| ------- | ------------ | -------- | -| 5.2.0 | | | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/2-docker-based-mogdb.md b/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/2-docker-based-mogdb.md deleted file mode 100644 index 8a59bbda..00000000 --- a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/2-docker-based-mogdb.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Container-based MogDB -summary: Container-based MogDB -author: Liuxu -date: 2021-06-09 ---- - -# Container-based MogDB - -
- -## Features - -- As the version of MogDB changes, release the image of the new version as soon as possible. -- The container version of the database image has built-in configuration of initialization parameters for best practices. -- The container version database supports both x86 and ARM CPU architectures. -- MogDB 2.1 container version supports the latest version of compat-tools and plugin functions. - -**Currently, x86-64 and ARM64 architectures are supported. Please get the corresponding container image according to the machine architecture of your host.** - -Starting from version 2.0 (including version 2.0) - -- MogDB of the x86-64 architecture is run on the [Ubuntu 18.04 operating system](https://ubuntu.com/). -- MogDB of the ARM64 architecture is run on the [Debian 10 operating system](https://www.debian.org/). - -Before version 1.1.0 (including version 1.1.0) - -- MogDB of the x86-64 architecture is run on the [CentOS7.6 operating system](https://www.centos.org/). -- MogDB of the ARM64 architecture is run on the [openEuler 20.03 LTS operating system](https://openeuler.org/en/). - -
- -## How to Use an Image? - -For details, visit the following website: - -[Installation Guide - Container-based Installation](../../installation-guide/docker-installation/docker-installation.md) diff --git a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/DBMS-RANDOM.md b/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/DBMS-RANDOM.md deleted file mode 100644 index 9464e7f1..00000000 --- a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/DBMS-RANDOM.md +++ /dev/null @@ -1,531 +0,0 @@ ---- -title: DBMS_RANDOM - Generating Random Data (Numbers, Strings and Dates) in MogDB with compat-tools -summary: DBMS_RANDOM - Generating Random Data (Numbers, Strings and Dates) in MogDB with compat-tools -author: Zhang Cuiping -date: 2021-08-30 ---- - -# DBMS_RANDOM - Generating Random Data (Numbers, Strings and Dates) in MogDB with compat-tools - -
- -## Introduction to compat-tools - -Compat-tools is a set of compatibility tools. It aims to provide compatibility for necessary functions and system views tha are created for OSs migrated from other asynchronous databases to MogDB, thereby facilitating the follow-up system maintenance and application modification. - -
- -## compat-tools Download - -To install compat-tools, please download the tool of the latest version from [https://gitee.com/enmotech/compat-tools](https://gitee.com/enmotech/compat-tools). - -
- -## Features of compat-tools - -1. runMe.sql: General scheduling script -2. Oracle_Views.sql: Compatible with Oracle database data dictionaries and views -3. Oracle_Functions.sql: Compatible with Oracle database functions -4. Oracle_Packages.sql: Compatible with Oracle database packages -5. MySQL_Views.sql: Compatible with MySQL database data dictionaries and views //TODO -6. MySQL_Functions.sql: Compatible with MySQL database functions //TODO - -
- -## MogDB Versions Supported By compat-tools - -- MogDB 2.0 -- MogDB 1.1 - -
- -## Installing and Using compat-tools - -1. Download compat-tools: -2. Store the downloaded files to a customized directory (**/opt/compat-tools-0902** is taken as an example in this article). - - ```bash - [root@mogdb-kernel-0005 compat-tools-0902]# pwd - /opt/compat-tools-0902 - [root@mogdb-kernel-0005 compat-tools-0902]# ls -l - total 228 - -rw-r--r-- 1 root root 9592 Sep 2 14:40 LICENSE - -rw-r--r-- 1 root root 0 Sep 2 14:40 MySQL_Functions.sql - -rw-r--r-- 1 root root 0 Sep 2 14:40 MySQL_Views.sql - -rw-r--r-- 1 root root 41652 Sep 2 14:40 Oracle_Functions.sql - -rw-r--r-- 1 root root 34852 Sep 2 14:40 Oracle_Packages.sql - -rw-r--r-- 1 root root 125799 Sep 2 14:40 Oracle_Views.sql - -rw-r--r-- 1 root root 4708 Sep 2 14:40 README.md - -rw-r--r-- 1 root root 420 Sep 2 14:40 runMe.sql - ``` - -3. Switch to user `omm`. - - ```bash - su - omm - ``` - -4. Run the following command (26000 is the port for connnecting the database). - - ```bash - gsql -d mogdb -p 26000 -f /opt/compat-tools-0902/runMe.sql - ``` - -
- -## Testing DBMS_RANDOM - -### Log In to the mogdb Database - -```sql -[omm@mogdb-kernel-0005 ~]$ gsql -d mogdb -p 26000 -gsql ((MogDB x.x.x build 56189e20) compiled at 2022-01-07 18:47:53 commit 0 last mr ) -Non-SSL connection (SSL connection is recommended when requiring high-security) -Type "help" for help. - -mogdb=# -``` - -
- -- [SEED](#seed) -- [VALUE](#value) -- [STRING](#string) -- [NORMAL](#normal) -- [RANDOM](#random) -- [Generating Random Dates](#generating-random-dates) -- [Generating Random Data](#generating-random-data) - -
- -## SEED - -The `SEED` procedure allows you to seed the pseudo-random number generator, making it more random. `SEED` is limited to binary integers or strings up to 2000 characters. If you want to consistently generate the same set of pseudo-random numbers, always use the same seed. - -```sql -declare -BEGIN - DBMS_OUTPUT.put_line('Run 1 : seed=0'); - DBMS_RANDOM.seed (val => 0); - FOR i IN 1 ..5 LOOP - DBMS_OUTPUT.put_line('i=' || i || ' : value=' || DBMS_RANDOM.value(low => 1, high => 10)); - END LOOP; - - DBMS_OUTPUT.put_line('Run 2 : seed=0'); - DBMS_RANDOM.seed (val => 0); - FOR i IN 1 ..5 LOOP - DBMS_OUTPUT.put_line('i=' || i || ' : value=' || DBMS_RANDOM.value(low => 1, high => 10)); - END LOOP; - -END; -/ -NOTICE: Run 1 : seed=0 -CONTEXT: SQL statement "CALL dbms_output.put_line('Run 1 : seed=0')" -PL/pgSQL function inline_code_block line 3 at PERFORM -NOTICE: i=1 : value=2.53745232429355 -CONTEXT: SQL statement "CALL dbms_output.put_line('i=' || i || ' : value=' || DBMS_RANDOM.value(low => 1, high => 10))" -PL/pgSQL function inline_code_block line 6 at PERFORM -NOTICE: i=2 : value=7.749117821455 -CONTEXT: SQL statement "CALL dbms_output.put_line('i=' || i || ' : value=' || DBMS_RANDOM.value(low => 1, high => 10))" -PL/pgSQL function inline_code_block line 6 at PERFORM -NOTICE: i=3 : value=1.86734489817172 -CONTEXT: SQL statement "CALL dbms_output.put_line('i=' || i || ' : value=' || DBMS_RANDOM.value(low => 1, high => 10))" -PL/pgSQL function inline_code_block line 6 at PERFORM -NOTICE: i=4 : value=8.83418704243377 -CONTEXT: SQL statement "CALL dbms_output.put_line('i=' || i || ' : value=' || DBMS_RANDOM.value(low => 1, high => 10))" -PL/pgSQL function inline_code_block line 6 at PERFORM -NOTICE: i=5 : value=6.19573155790567 -CONTEXT: SQL statement "CALL dbms_output.put_line('i=' || i || ' : value=' || DBMS_RANDOM.value(low => 1, high => 10))" -PL/pgSQL function inline_code_block line 6 at PERFORM -NOTICE: Run 2 : seed=0 -CONTEXT: SQL statement "CALL dbms_output.put_line('Run 2 : seed=0')" -PL/pgSQL function inline_code_block line 9 at PERFORM -NOTICE: i=1 : value=2.53745232429355 -CONTEXT: SQL statement "CALL dbms_output.put_line('i=' || i || ' : value=' || DBMS_RANDOM.value(low => 1, high => 10))" -PL/pgSQL function inline_code_block line 12 at PERFORM -NOTICE: i=2 : value=7.749117821455 -CONTEXT: SQL statement "CALL dbms_output.put_line('i=' || i || ' : value=' || DBMS_RANDOM.value(low => 1, high => 10))" -PL/pgSQL function inline_code_block line 12 at PERFORM -NOTICE: i=3 : value=1.86734489817172 -CONTEXT: SQL statement "CALL dbms_output.put_line('i=' || i || ' : value=' || DBMS_RANDOM.value(low => 1, high => 10))" -PL/pgSQL function inline_code_block line 12 at PERFORM -NOTICE: i=4 : value=8.83418704243377 -CONTEXT: SQL statement "CALL dbms_output.put_line('i=' || i || ' : value=' || DBMS_RANDOM.value(low => 1, high => 10))" -PL/pgSQL function inline_code_block line 12 at PERFORM -NOTICE: i=5 : value=6.19573155790567 -CONTEXT: SQL statement "CALL dbms_output.put_line('i=' || i || ' : value=' || DBMS_RANDOM.value(low => 1, high => 10))" -PL/pgSQL function inline_code_block line 12 at PERFORM -ANONYMOUS BLOCK EXECUTE -mogdb=# -``` - -
- -## VALUE - -The `VALUE` function is used to produce random numbers with a specified range. When called without parameters it produce a number greater than or equal to 0 and less than 1, with 38 digit precision. - -```sql -DECLARE -BEGIN - FOR cur_rec IN 1 ..5 LOOP - DBMS_OUTPUT.put_line('value= ' || DBMS_RANDOM.value()); - END LOOP; -END; -/ -NOTICE: value= .785799258388579 -CONTEXT: SQL statement "CALL dbms_output.put_line('value= ' || DBMS_RANDOM.value())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: value= .692194153089076 -CONTEXT: SQL statement "CALL dbms_output.put_line('value= ' || DBMS_RANDOM.value())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: value= .368766269646585 -CONTEXT: SQL statement "CALL dbms_output.put_line('value= ' || DBMS_RANDOM.value())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: value= .87390407640487 -CONTEXT: SQL statement "CALL dbms_output.put_line('value= ' || DBMS_RANDOM.value())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: value= .745095098391175 -CONTEXT: SQL statement "CALL dbms_output.put_line('value= ' || DBMS_RANDOM.value())" -PL/pgSQL function inline_code_block line 4 at PERFORM -ANONYMOUS BLOCK EXECUTE -``` - -If the parameters are used, the resulting number will be greater than or equal to the low value and less than the high value, with the precision restricted by the size of the high value. - -```sql -declare -BEGIN - FOR cur_rec IN 1 ..5 LOOP - DBMS_OUTPUT.put_line('value(1,100)= ' || DBMS_RANDOM.value(1,100)); - END LOOP; -END; -/ - -NOTICE: value(1,100)= 45.158544998616 -CONTEXT: SQL statement "CALL dbms_output.put_line('value(1,100)= ' || DBMS_RANDOM.value(1,100))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: value(1,100)= 36.0190920610912 -CONTEXT: SQL statement "CALL dbms_output.put_line('value(1,100)= ' || DBMS_RANDOM.value(1,100))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: value(1,100)= 73.5194435422309 -CONTEXT: SQL statement "CALL dbms_output.put_line('value(1,100)= ' || DBMS_RANDOM.value(1,100))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: value(1,100)= 26.7619780991226 -CONTEXT: SQL statement "CALL dbms_output.put_line('value(1,100)= ' || DBMS_RANDOM.value(1,100))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: value(1,100)= 40.035083710216 -CONTEXT: SQL statement "CALL dbms_output.put_line('value(1,100)= ' || DBMS_RANDOM.value(1,100))" -PL/pgSQL function inline_code_block line 4 at PERFORM -ANONYMOUS BLOCK EXECUTE -mogdb=# -``` - -Use `TRUNC` or `ROUND` to alter the precision as required. For example, to produce random integer values between 1 and 10 truncate the output and add 1 to the upper boundary. - -```sql -mogdb=# select TRUNC(DBMS_RANDOM.value(1,11)) ; - - trunc -------- - - 6 - -(1 row) - -mogdb=# -``` - -
- -## STRING - -The `STRING` function returns a string of random characters of the specified length. The `OPT` parameter determines the type of string produced as follows: - -- 'u', 'U' - uppercase alpha characters -- 'l', 'L' - lowercase alpha characters -- 'a', 'A' - mixed case alpha characters -- 'x', 'X' - uppercase alpha-numeric characters -- 'p', 'P' - any printable characters - -The `LEN` parameter, not surprisingly, specifies the length of the string returned. - -```sql -declare -BEGIN - FOR i IN 1 .. 5 LOOP - DBMS_OUTPUT.put_line('string(''x'',10)= ' || DBMS_RANDOM.string('x',10)); - END LOOP; -END; -/ - -NOTICE: string('x',10)= i5S6XOZxrA -CONTEXT: SQL statement "CALL dbms_output.put_line('string(''x'',10)= ' || DBMS_RANDOM.string('x',10))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: string('x',10)= HGvRm75w19 -CONTEXT: SQL statement "CALL dbms_output.put_line('string(''x'',10)= ' || DBMS_RANDOM.string('x',10))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: string('x',10)= N9WsQGJl6l -CONTEXT: SQL statement "CALL dbms_output.put_line('string(''x'',10)= ' || DBMS_RANDOM.string('x',10))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: string('x',10)= hDlPevVgRb -CONTEXT: SQL statement "CALL dbms_output.put_line('string(''x'',10)= ' || DBMS_RANDOM.string('x',10))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: string('x',10)= ZdSd8x8RKx -CONTEXT: SQL statement "CALL dbms_output.put_line('string(''x'',10)= ' || DBMS_RANDOM.string('x',10))" -PL/pgSQL function inline_code_block line 4 at PERFORM -ANONYMOUS BLOCK EXECUTE -mogdb=# -``` - -Combine the `STRING` and `VALUE` functions to get variable length strings. - -```sql -declare -BEGIN - FOR i IN 1 .. 5 LOOP - DBMS_OUTPUT.put_line('string(''L'',?)= ' || DBMS_RANDOM.string('L',TRUNC(DBMS_RANDOM.value(10,21)))); - END LOOP; -END; -/ - -NOTICE: string('L',?)= kcyzowdxqbyzu -CONTEXT: SQL statement "CALL dbms_output.put_line('string(''L'',?)= ' || DBMS_RANDOM.string('L',TRUNC(DBMS_RANDOM.value(10,21))))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: string('L',?)= ohzpljyatsplqtbbus -CONTEXT: SQL statement "CALL dbms_output.put_line('string(''L'',?)= ' || DBMS_RANDOM.string('L',TRUNC(DBMS_RANDOM.value(10,21))))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: string('L',?)= hbrjsfeevoi -CONTEXT: SQL statement "CALL dbms_output.put_line('string(''L'',?)= ' || DBMS_RANDOM.string('L',TRUNC(DBMS_RANDOM.value(10,21))))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: string('L',?)= lfsapmytdamvwcw -CONTEXT: SQL statement "CALL dbms_output.put_line('string(''L'',?)= ' || DBMS_RANDOM.string('L',TRUNC(DBMS_RANDOM.value(10,21))))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: string('L',?)= pcvtxnwzomkqwpfzes -CONTEXT: SQL statement "CALL dbms_output.put_line('string(''L'',?)= ' || DBMS_RANDOM.string('L',TRUNC(DBMS_RANDOM.value(10,21))))" -PL/pgSQL function inline_code_block line 4 at PERFORM -ANONYMOUS BLOCK EXECUTE -mogdb=# -``` - -
- -## NORMAL - -The `NORMAL` function returns random numbers in a normal distribution. - -```sql -declare -BEGIN - FOR cur_rec IN 1 ..5 LOOP - DBMS_OUTPUT.put_line('normal= ' || DBMS_RANDOM.normal()); - END LOOP; -END; -/ - -NOTICE: normal= .838851847718988 -CONTEXT: SQL statement "CALL dbms_output.put_line('normal= ' || DBMS_RANDOM.normal())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: normal= -.523612260373397 -CONTEXT: SQL statement "CALL dbms_output.put_line('normal= ' || DBMS_RANDOM.normal())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: normal= -.241931681458075 -CONTEXT: SQL statement "CALL dbms_output.put_line('normal= ' || DBMS_RANDOM.normal())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: normal= -.120847761874286 -CONTEXT: SQL statement "CALL dbms_output.put_line('normal= ' || DBMS_RANDOM.normal())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: normal= .360125112757284 -CONTEXT: SQL statement "CALL dbms_output.put_line('normal= ' || DBMS_RANDOM.normal())" -PL/pgSQL function inline_code_block line 4 at PERFORM -ANONYMOUS BLOCK EXECUTE -mogdb=# -``` - -
- -## RANDOM - -```sql -declare -BEGIN - FOR i IN 1 .. 5 LOOP - DBMS_OUTPUT.put_line('random= ' || DBMS_RANDOM.random()); - END LOOP; -END; -/ -NOTICE: This function is deprecated with Release 11gR1, although currently supported, it should not be used. -CONTEXT: SQL statement "CALL dbms_output.put_line('random= ' || DBMS_RANDOM.random())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: random= -1023930867 -CONTEXT: SQL statement "CALL dbms_output.put_line('random= ' || DBMS_RANDOM.random())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: This function is deprecated with Release 11gR1, although currently supported, it should not be used. -CONTEXT: SQL statement "CALL dbms_output.put_line('random= ' || DBMS_RANDOM.random())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: random= 1068572119 -CONTEXT: SQL statement "CALL dbms_output.put_line('random= ' || DBMS_RANDOM.random())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: This function is deprecated with Release 11gR1, although currently supported, it should not be used. -CONTEXT: SQL statement "CALL dbms_output.put_line('random= ' || DBMS_RANDOM.random())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: random= 95361253 -CONTEXT: SQL statement "CALL dbms_output.put_line('random= ' || DBMS_RANDOM.random())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: This function is deprecated with Release 11gR1, although currently supported, it should not be used. -CONTEXT: SQL statement "CALL dbms_output.put_line('random= ' || DBMS_RANDOM.random())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: random= -712638729 -CONTEXT: SQL statement "CALL dbms_output.put_line('random= ' || DBMS_RANDOM.random())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: This function is deprecated with Release 11gR1, although currently supported, it should not be used. -CONTEXT: SQL statement "CALL dbms_output.put_line('random= ' || DBMS_RANDOM.random())" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: random= -1251059926 -CONTEXT: SQL statement "CALL dbms_output.put_line('random= ' || DBMS_RANDOM.random())" -PL/pgSQL function inline_code_block line 4 at PERFORM -ANONYMOUS BLOCK EXECUTE -mogdb=# -``` - -
- -## Generating Random Dates - -There are no specific functions for generating random dates currently, but we can add random numbers to an existing date to make it random. The following example generates random dates over the next year. - -```sql -declare -BEGIN - FOR i IN 1 .. 5 LOOP - DBMS_OUTPUT.put_line('date= ' || TRUNC(SYSDATE + DBMS_RANDOM.value(0,366))); - END LOOP; -END; -/ - -NOTICE: date= 2021-10-06 00:00:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('date= ' || TRUNC(SYSDATE + DBMS_RANDOM.value(0,366)))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: date= 2022-05-09 00:00:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('date= ' || TRUNC(SYSDATE + DBMS_RANDOM.value(0,366)))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: date= 2022-04-07 00:00:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('date= ' || TRUNC(SYSDATE + DBMS_RANDOM.value(0,366)))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: date= 2021-11-29 00:00:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('date= ' || TRUNC(SYSDATE + DBMS_RANDOM.value(0,366)))" -PL/pgSQL function inline_code_block line 4 at PERFORM -NOTICE: date= 2022-06-04 00:00:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('date= ' || TRUNC(SYSDATE + DBMS_RANDOM.value(0,366)))" -PL/pgSQL function inline_code_block line 4 at PERFORM -ANONYMOUS BLOCK EXECUTE -mogdb=# -``` - -By doing the correct divisions, we can add random numbers of hours, seconds or minutes to a date. - -```sql -DECLARE - l_hours_in_day NUMBER := 24; - l_mins_in_day NUMBER := 24*60; - l_secs_in_day NUMBER := 24*60*60; -BEGIN - FOR i IN 1 .. 5 LOOP - DBMS_OUTPUT.put_line('hours= ' || (TRUNC(SYSDATE) + (TRUNC(DBMS_RANDOM.value(0,1000))/l_hours_in_day))); - END LOOP; - FOR i IN 1 .. 5 LOOP - DBMS_OUTPUT.put_line('mins = ' || (TRUNC(SYSDATE) + (TRUNC(DBMS_RANDOM.value(0,1000))/l_mins_in_day))); - END LOOP; - FOR i IN 1 .. 5 LOOP - DBMS_OUTPUT.put_line('secs = ' || (TRUNC(SYSDATE) + (TRUNC(DBMS_RANDOM.value(0,1000))/l_secs_in_day))); - END LOOP; -END; -/ -NOTICE: hours= 2021-10-13 22:00:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('hours= ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_hours_in_day)))" -PL/pgSQL function inline_code_block line 6 at PERFORM -NOTICE: hours= 2021-10-10 00:00:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('hours= ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_hours_in_day)))" -PL/pgSQL function inline_code_block line 6 at PERFORM -NOTICE: hours= 2021-09-07 02:00:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('hours= ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_hours_in_day)))" -PL/pgSQL function inline_code_block line 6 at PERFORM -NOTICE: hours= 2021-09-26 11:00:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('hours= ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_hours_in_day)))" -PL/pgSQL function inline_code_block line 6 at PERFORM -NOTICE: hours= 2021-09-19 22:00:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('hours= ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_hours_in_day)))" -PL/pgSQL function inline_code_block line 6 at PERFORM -NOTICE: mins = 2021-09-04 00:01:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('mins = ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_mins_in_day)))" -PL/pgSQL function inline_code_block line 9 at PERFORM -NOTICE: mins = 2021-09-04 11:56:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('mins = ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_mins_in_day)))" -PL/pgSQL function inline_code_block line 9 at PERFORM -NOTICE: mins = 2021-09-04 00:53:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('mins = ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_mins_in_day)))" -PL/pgSQL function inline_code_block line 9 at PERFORM -NOTICE: mins = 2021-09-04 00:21:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('mins = ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_mins_in_day)))" -PL/pgSQL function inline_code_block line 9 at PERFORM -NOTICE: mins = 2021-09-04 12:38:00 -CONTEXT: SQL statement "CALL dbms_output.put_line('mins = ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_mins_in_day)))" -PL/pgSQL function inline_code_block line 9 at PERFORM -NOTICE: secs = 2021-09-04 00:10:28 -CONTEXT: SQL statement "CALL dbms_output.put_line('secs = ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_secs_in_day)))" -PL/pgSQL function inline_code_block line 12 at PERFORM -NOTICE: secs = 2021-09-04 00:15:31 -CONTEXT: SQL statement "CALL dbms_output.put_line('secs = ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_secs_in_day)))" -PL/pgSQL function inline_code_block line 12 at PERFORM -NOTICE: secs = 2021-09-04 00:09:07 -CONTEXT: SQL statement "CALL dbms_output.put_line('secs = ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_secs_in_day)))" -PL/pgSQL function inline_code_block line 12 at PERFORM -NOTICE: secs = 2021-09-04 00:06:54 -CONTEXT: SQL statement "CALL dbms_output.put_line('secs = ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_secs_in_day)))" -PL/pgSQL function inline_code_block line 12 at PERFORM -NOTICE: secs = 2021-09-04 00:06:32 -CONTEXT: SQL statement "CALL dbms_output.put_line('secs = ' || (TRUNC(SYSDATE)+ (TRUNC(DBMS_RANDOM.value(0,1000))/l_secs_in_day)))" -PL/pgSQL function inline_code_block line 12 at PERFORM -ANONYMOUS BLOCK EXECUTE -mogdb=# -``` - -
- -## Generating Random Data - -The `DBMS_RANDOM` package is useful for generating random test data. You can generate large amounts quickly by combining it into a query. - -```sql -mogdb=# CREATE TABLE random_data ( - id NUMBER, - small_number NUMBER(5), - big_number NUMBER, - short_string VARCHAR2(50), - long_string VARCHAR2(400), - created_date DATE, - CONSTRAINT random_data_pk PRIMARY KEY (id) -); -NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "random_data_pk" for table "random_data" -CREATE TABLE -mogdb=# -``` - -```sql -mogdb=# INSERT INTO random_data -SELECT generate_series(1,29999), - TRUNC(DBMS_RANDOM.value(1,5)) AS small_number, - TRUNC(DBMS_RANDOM.value(100,10000)) AS big_number, - DBMS_RANDOM.string('L',TRUNC(DBMS_RANDOM.value(10,50))) AS short_string, - DBMS_RANDOM.string('L',TRUNC(DBMS_RANDOM.value(100,400))) AS long_string, - TRUNC(SYSDATE + DBMS_RANDOM.value(0,366)) AS created_date; -INSERT 0 29999 -mogdb=# -mogdb=# select count(*) from random_data; - count -------- - 29999 -(1 row) - -mogdb=# -``` diff --git a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/compat-tools.md b/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/compat-tools.md deleted file mode 100644 index cf4fa7b6..00000000 --- a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/compat-tools.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: compat-tools -summary: compat-tools -author: Zhang Cuiping -date: 2021-07-14 ---- - -# compat-tools - -This project is a set of compatibility tools. It aims to provide compatibility for necessary functions and system views created for OSs migrated from other asynchronous databases to MogDB, thereby facilitating the follow-up system maintenance and application modification. - -The script is executed based on the version information. When you execute the script, it will be executed in terms of the following three situations: - -1. If the object to be created does not exist in the target database, it will be directly created. -2. If the version of the object to be created is later than that of the object in the target database, the target database will be upgraded and has the object re-created. -3. If the version of the object to be created is earlier than that of the object in the target database, the creation operation will be skipped. - -Please refer to [compat-tools repository page](https://gitee.com/enmotech/compat-tools) for details on how to obtain and use component. diff --git a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/mog_filedump.md b/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/mog_filedump.md deleted file mode 100644 index d7a26a02..00000000 --- a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/mog_filedump.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: mog_filedump User Guide -summary: mog_filedump User Guide -author: Guo Huan -date: 2021-11-15 ---- - - - -# mog_filedump User Guide - -## Introduction - -mog_filedump is a tool for parsing data files ported to MogDB based on the improved compatibility of the pg_filedump tool, which is used to convert MogDB heap/index/control files into user-readable format content. This tool can parse part of the fields in the data columns as needed, and can also dump the data content directly in binary format. The tool can automatically determine the type of the file by the data in the blocks of the file. The **-c** option must be used to format the pg_control file. - -
- -## Principle - -The implementation steps are divided into three main steps. - -1. Reads a data block from a data file. - -2. Parse the data of the corresponding type with the callback function of the corresponding data type. - -3. Call the output of the corresponding data type function to print the data content. - -
- -## Enmo's Improvements - -1. Compatibility porting to MogDB. - -2. Fix official bug: parsing bug of data type **char**. - -3. Fix official bug: In the multi-field scenario, parsing the data file, the data type name will cause a data length mismatch bug. - -
- -## Installation - -Visit [MogDB official website download page](https://www.mogdb.io/en/downloads/mogdb/) to download the corresponding version of the toolkit, and put the tool in the **bin** directory of the MogDB installation path. As shown below, toolkits-xxxxxx.tar.gz is the toolkit that contains mog_filedump. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/about-mogdb/open-source-components-4.png) - -
- -## How to Use - -``` -mog_filedump [-abcdfhikxy] [-R startblock [endblock]] [-D attrlist] [-S blocksize] [-s segsize] [-n segnumber] file -``` - -Valid options for **heap** and **index** files are as follows: - -| options | function | -| ---- | ------------------------------------------------------------ | -| -a | show absolute path | -| -b | output a range of binary block images | -| -d | output file block content | -| -D | The data type of the table.
Currently supported data types are: bigint, bigserial, bool, charN, date, float, float4, float8, int, json, macaddr, name, oid, real, serial, smallint, smallserial, text, time, timestamp, timetz, uuid, varchar, varcharN, xid, xml, ~.
'~' means ignore all the following data types, for example, the tuple has 10 columns, `-D first three column data types, ~` means that only the first three columns of the table tuple are parsed. | -| -f | Output and parse the content of the data block | -| -h | Display instructions and help information | -| -i | Output and parse item details (including XMIN, XMAX, Block Id, linp Index, Attributes, Size, infomask) | -| -k | Verify the checksum of the data block | -| -R | Parse and output the data file contents for the specified LSN range, e.g. **-R startblock [endblock]**. If only has **startblock** and no **endblock**, only output a single data block content | -| -s | Set segment size | -| -n | Set the number of segments | -| -S | Set data block size | -| -x | Parse and output block items as index item format (included by default) | -| -y | Parse and output block items as heap item format (included by default) | - -The options available for the control file are as follows: - -| options | function | -| ------- | ---------------------------------------------- | -| -c | List of directories for parsing control files | -| -f | Output and parse the content of the data block | -| -S | Sets the block size that controls file parsing | - -You can combine the **-i** and **-f** parameters to get more effective data to help operation and maintenance personnel analyze and refer to. - -
- -## Examples - -The test table basically covers the data types contained in mog_filedump. - -Here is a use case to show the data parsing function. Please add other parameters according to actual needs. - -```sql --- Create table test: -create table test(serial serial, smallserial smallserial, bigserial bigserial, bigint bigint, bool bool, char char(3), date date, float float, float4 float4, float8 float8, int int, json json, macaddr macaddr, name name, oid oid, real real, smallint smallint, text text, time time, timestamp timestamp, timetz timetz, uuid uuid, varchar varchar(20), xid xid, xml xml); - --- Insert data: -insert into test(bigint, bool, char, date, float, float4, float8, int, json, macaddr, name, oid, real, smallint, text, time, timestamp, timetz, uuid, varchar, xid, xml) values(123456789, true, 'abc', '2021-4-02 16:45:00', 3.1415926, 3.1415926, 3.14159269828412, 123456789, '{"a":1, "b":2, "c":3}'::json, '04-6C-59-99-AF-07', 'lvhui', 828243, 3.1415926, 12345, 'text', '2021-04-02 16:48:23', '2021-04-02 16:48:23', '2021-04-02 16:48:23', 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11', 'adsfghkjlzc', '9973::xid', 'Book0001'); - --- The directory where the data files of the query table test are located. The data directory specified by gs_initdb here is db_p. So the table test data file is in db_p/base/15098/32904 -mogdb=# select pg_relation_filepath('test'); -base/15098/32904 (1 row) - --- Use the mog_filedump tool to parse the data file content: -./mog_filedump -D serial,smallserial,bigserial,bigint,bool,charN,date,float,float4,float8,int,json,macaddr,name,oid,real,smallint,text,time,timestamp,timetz,uuid,varchar,xid,xml db_p/base/15098/32904 -``` - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/mog_filedump.png) diff --git a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/mog_xlogdump.md b/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/mog_xlogdump.md deleted file mode 100644 index 50669811..00000000 --- a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/mog_xlogdump.md +++ /dev/null @@ -1,319 +0,0 @@ ---- -title: mog_xlogdump User Guide -summary: mog_xlogdump User Guide -author: Guo Huan -date: 2021-11-15 ---- - -# mog_xlogdump User Guide - -## Introduction - -mog_xlogdump is an offline parsing tool for wal logs independently developed by Enmo. It is mainly used in the active-standby cluster scenario, when the database is permanently down and cannot be recovered, reversely analyze the database that cannot be started, and then recover the data that is not synchronized at the end of the wal log in the cluster. - -
- -## R&D Background - -In MogDB primary/standby high availability cluster with one primary database and multiple standby databases, using asynchronous logical replication scenario, when the primary shuts down and its transaction is committed, the data of the transaction operation has been written to the wal log. After the primary is down, the standby will generate incomplete data segment logs because it cannot send to the standby. Therefore, after the primary is down, there is data loss and no logical alignment between the standby and the primary. So there is a risk of data loss in the primary/standby cluster composed of its standby databases and the data in the actual business. - -During the recovery of the primary database, the cluster composed of the standby database will have business data written. At this time, when the primary database is restored immediately, the segment number, start and end position of the incomplete data segment at the end of the wal log of the primary database are the same as those in the standby database. The inconsistency will also cause the data lost when the primary database shuts down and cannot be restored to the standby database. - -
- -## Scenario - -In a MogDB high-availability cluster, when the primary database is down, the walbuffer is triggered to write to the wal log when the walbuffer is filled with a certain percentage, or when the checkpoint or commit occurs. Due to the database downtime, the logically synchronized WalSender thread stops sending logs, and the standby database receives incomplete data segment wal logs. At this time, you need to use the flashback tool to read the data blocks in the wal log of the primary database, and decode the SQL statement corresponding to the data operation, so that the DBA can analyze whether the data is valuable and restore it to the standby database. - -
- -## Principle - -The tool uses two mechanisms, one is the header parsing of the wal log, and the other is the logical replication mechanism. - -The implementation steps are mainly divided into three steps: - -1. Read the wal log file and parse the header. - -2. Read the data in turn, and de-decode the data. - -3. According to the different data types of the data, different types of function outputs are called back. - -
- -## Supported Table Types for Parsing - -Partitioned and normal tables are currently supported. - -
- -## Supported Data Types for Parsing - -bool, bytea, char, name, int8, int2, int, text, oid, tid, xid, cid, xid32, clob, float4, float8, money, inet, varchar, numeric, int4; - -> 注意:Note: Since mog_xlogdump is an offline wal parsing tool, it does not currently support large data types (clob, etc.) that require toast data. The next version will support offline parsing of toast table files. - -
- -## Installation - -Visit [MogDB official website download page](https://www.mogdb.io/en/downloads/mogdb/) to download the corresponding version of the toolkit, and put the tool in the **bin** directory of the MogDB installation path. As shown below, toolkits-xxxxxx.tar.gz is the toolkit that contains mog_xlogdump. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/about-mogdb/open-source-components-4.png) - -
- -## Instructions for Use - -mog_xlogdump is a tool for parsing and displaying MogDB 2.1 or later WAL logs. Auxiliary tool designed to help DBAs analyze and debug database problems. - -The mog_xlogdump parsing tool currently does not support column table. (The research found that the column-stored table will generate two corresponding CUDesc and delta tables in cstore mode. CUDesc is the metadata of the column-stored table, and delta is the temporary table of the column-stored table, which is a row-stored table. These two tables (CUDesc, delta) will be written in the wal log. Although the wal log can parse the corresponding delta table, the table is controlled by the table attribute threshold deltarow_threshold. The default is 100, that is, less than 100 pieces of data will be written in the delta table. Write directly to the cu file) - -> Note: To write the delta table in the column-store table, you need to enable the parameter **enable_delta_store = on** in postgres.conf. - -
- -## How to Use - -``` -mog_xlogdump [OPTION]... [STARTSEG [ENDSEG]] -``` - -
- -## Options - -- -b, --bkp-details - - Details of output file blocks. (By default, the id of the block, rel, fork, blk, and lastlsn is displayed, and this parameter will display the Block Image) - -- -B, --bytea_output - - Specify the display format of bytea type decoding output, there are binary and character formats - -- -c --connectinfo - - Specify a connect string URL, such as postgres://user:password@ip:port/dbname - -- -e, --end=RECPTR - - Specify the end position for parsing the wal log, LSN number - -- -f, --follow - - Indicates that when the specified wal log is parsed to the end, continue parsing to the next file - -- -n, --limit=N - - Specify the number of output data records - -- -o, --oid=OID - - Specifies the OID of the inverse decoding table - -- -p, --path=PATH - - Specify the wal log storage directory - -- -R, --Rel=Relation - - Specifies the data type of the inverse decoding table - -- -r, --rmgr=RMGR - - Show only the contents of records generated by the explorer - -- -s, --start=RECPTR - - Specify the starting position for parsing the wal log, LSN number - -- -T, --CTimeZone_ENV - - Specify the time zone, the default is UTC. - -- -t, --timeline=TLI - - Specify the timeline to start reading the wal log - -- -V, --version - - show version number - -- -w, --write-FPW - - Display the information written on the full page, use with -b - -- -x, --xid=XID - - Output the record with the specified transaction ID - -- -z, --stats - - Output statistics of inserted records - -- -v, --verbose - - show verbose - -- -?, --help - - show help information and exit - -
- -## Use Case 1 - -### Scenario - -When the primary database is down and cannot be recovered, the standby database can be connected normally. At this time, the wal log sent by the primary database may contain tens of thousands of table data operations, and the mog_xlogdump tool needs to start and end according to the specified -s, -e (starting and the ending lsn position), parse out all data operations of the table. - -### Instruction - -``` -mog_xlogdump -c -s -e -``` - -### Parse Settings - -Note: The main purpose is to record old data in the wal log, that is, the data tuple before the update operation is modified, and the data deleted by the delete operation. - -1. Set the **wal_level** in the database configuration file postgres.conf to the **logical** level. -2. Alter table: `alter table table_name replica identity full;` - -### Result - -Output the wal log data parsing result in json format. The tuple display format is `'column name':'data'` - -```json -{'table_name':'xxx','schema_name':'yyy','action':'insert','tuple':{'name':'xx','id':'ss'}} -``` - -### Example - -![fe1b12d080accfb9e54f857e79baebc](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/mog_xlogdump-1.png) - -The red box is the old data that will be parsed according to the parsing settings. If there is no setting, the old data of update and delete will not be parsed. - -The standby connect URL after -c is `postgres://test:Test123456@172.16.0.44:5003/postgres` - -- postgres:// - - connect string tag header - -- test - - connect username - -- Test123456 - - The password of the connect user - -- 172.16.0.44 - - The IP address of the standby node - -- 5003 - - Standby connect port - -- postgres - - The database name of the connect standby node - -- db_p/pg_xlog/000000010000000000000004 - - The wal log file of primary node - -
- -## Use Case 2 - -### Scenario - -When the primary database is down and cannot be recovered, and the standby database can be connected normally, the user may only pay attention to a few tables (individual tables) in the database. The mog_xlogdump tool can parse the table data of the specified oid according to the parameters -o and -R. For example, -o specifies the oid of the table, and -R specifies the field type of the table. - -### Instruction - -Create a table, write data and modify it, and use the mog_xlogdump tool to parse the Wal log. - -```sql -create table t2(id int, money money,inet inet,bool bool,numeric numeric ,text text); -insert into t2 values(1, 24.241, '192.168.255.132', true, 3.1415926, 'ljfsodfo29892ifj'); -insert into t2 values(2, 928.8271, '10.255.132.101', false, 3.1415926, 'vzvzcxwf2424@'); -update t2 set id=111, money=982.371 where id =2; -delete from t2 where id=1; - -postgres=# select * from t2; - id | money | inet | bool | numeric | text -----+---------+-----------------+------+-----------+------------------ - 1 | $24.24 | 192.168.255.132 | t | 3.1415926 | ljfsodfo29892ifj - 2 | $928.83 | 10.255.132.101 | f | 3.1415926 | vzvzcxwf2424@ -(2 rows) - -postgres=# update t2 set id=111, money=982.371 where id =2; -Postgres=# delete from t2 where id=1; -postgres=# select * from t2; - id | money | inet | bool | numeric | text ------+-------------+----------------+------+-----------+--------------- - 111 | $982,371.00 | 10.255.132.101 | f | 3.1415926 | vzvzcxwf2424@ - -(1 rows) -``` - -### Parse Settings - -Same as use case 1, set wal_level and alter table. - -### Result - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/mog_xlogdump-2.png) - -```json -./mog_xlogdump -o 16394 -R int,money,inet,bool,numeric,text ./db_p/pg_xlog/000000010000000000000004 -'insert','tuple':{'(null)':'1','(null)':'$24.24','(null)':'192.168.255.132','(null)':true,'(null)':'3.1415926','(null)':'ljfsodfo29892ifj'}} -'insert','tuple':{'(null)':'2','(null)':'$928.83','(null)':'10.255.132.101','(null)':false,'(null)':'3.1415926','(null)':'vzvzcxwf2424@'}} -'update','old_tuple':{'(null)':'2','(null)':'$928.83','(null)':'10.255.132.101','(null)':false,'(null)':'3.1415926','(null)':'vzvzcxwf2424@'},'new_tuple':{'(null)':'111','(null)':'$982,371.00','(null)':'10.255.132.101','(null)':false,'(null)':'3.1415926','(null)':'vzvzcxwf2424@'}} -'delete','tuple':{'(null)':'1','(null)':'$24.24','(null)':'192.168.255.132','(null)':true,'(null)':'3.1415926','(null)':'ljfsodfo29892ifj'}} -``` - -> Note: Due to the change of the output format, the table name, schema name and column name are queried on the standby node according to the -c connect string, but because the original -o, -R designation of the table oid and field type is completely offline, Therefore, information such as table name, schema name, and column name cannot be obtained, so use -o and -R to parse offline. The table name and schema name are not displayed, and the column name is displayed as null. - -``` -mog_xlogdump -o -R -s -e Wal log file -``` - -The tool also retains the original functionality of pg_xlogdump. - -
- -## Use Case 3 - -### Scenario - -If you want to see the header data content of the wal log, or to count some related information of the wal log, please use the mog_xlogdump original function. - -### Instruction - -1. header information - - ``` - ./mog_xlogdump -n 10 - ``` - - -n 10 indicates that only 10 rows of data are displayed. - -2. Statistics - - ``` - ./mog_xlogdump -z - ``` - -### Results - -- Result 1 - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/mog_xlogdump-3.png) - -- Result 2 - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/reference-guide/mog_xlogdump-4.png) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/mogdb-monitor.md b/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/mogdb-monitor.md deleted file mode 100644 index b5ae7c9f..00000000 --- a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/mogdb-monitor.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: mogdb-monitor -summary: mogdb-monitor -author: Guo Huan -date: 2022-04-14 ---- - -# mogdb-monitor - -mogdb-monitor is a MogDB database cluster monitoring and deployment tool, with the help of the current very popular open source monitoring system prometheus framework, combined with the opengauss_exporter developed by Enmo database team, you can achieve a full range of detection of MogDB database. - -The core monitoring component opengauss_exporter has the following features. - -- Support all versions of MogDB/openGauss database -- Support for monitoring database clusters -- Support primary and standby database judgment within a cluster -- Support for automatic database discovery -- Support for custom query -- Supports online loading of configuration files -- Support for configuring the number of concurrent threads -- Support data collection information caching - -In terms of grafana display, Enmo also provide a complete set of dashboard, both an instance-level dashboard showing detailed information of each instance and a display big screen showing summary information of all instances, which, combined with the alertmanager component, can trigger alerts that meet the rules to relevant personnel in the first place. - -
- -Please refer to [mogdb-monitor repository page](https://gitee.com/enmotech/mogdb-monitor) for details on how to obtain and use component. diff --git a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/open-source-components.md b/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/open-source-components.md deleted file mode 100644 index f230ab15..00000000 --- a/product/en/docs-mogdb/v5.2/about-mogdb/open-source-components/open-source-components.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Open Source Components -summary: Open Source Components -author: Guo Huan -date: 2023-05-22 ---- - -# Open Source Components - -+ **[Container-based MogDB](2-docker-based-mogdb.md)** -+ **[compat-tools](compat-tools.md)** -+ **[mogdb-monitor](mogdb-monitor.md)** -+ **[mog_filedump](mog_filedump.md)** -+ **[mog_xlogdump](mog_xlogdump.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/about-mogdb/terms-of-use.md b/product/en/docs-mogdb/v5.2/about-mogdb/terms-of-use.md deleted file mode 100644 index a6c75a38..00000000 --- a/product/en/docs-mogdb/v5.2/about-mogdb/terms-of-use.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Terms of Use -summary: Terms of Use -author: Guo Huan -date: 2021-06-01 ---- - -# Terms of Use - -**Copyright © 2009-2022 Yunhe Enmo (Beijing) Information Technology Co., Ltd. All rights reserved.** - -Your replication, use, modification, and distribution of this document are governed by the Creative Commons License Attribution-ShareAlike 4.0 International Public License (CC BY-SA 4.0). You can visit to view a human-readable summary of (and not a substitute for) CC BY-SA 4.0. For the complete CC BY-SA 4.0, visit . - -Certain document contents on this website are from the official openGauss website ()。 - -**Trademarks and Permissions** - -MogDB is a trademark of Yunhe Enmo (Beijing) Information Technology Co., Ltd. All other trademarks and registered trademarks mentioned in this document are the property of their respective holders. - -**Disclaimer** - -This document is used only as a guide. Unless otherwise specified by applicable laws or agreed by both parties in written form, all statements, information, and recommendations in this document are provided "AS IS" without warranties, guarantees or representations of any kind, including but not limited to non-infringement, timeliness, and specific purposes. diff --git a/product/en/docs-mogdb/v5.2/about-mogdb/usage-limitations.md b/product/en/docs-mogdb/v5.2/about-mogdb/usage-limitations.md deleted file mode 100644 index f432224d..00000000 --- a/product/en/docs-mogdb/v5.2/about-mogdb/usage-limitations.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Usage Limitations -summary: Usage Limitations -author: Guo Huan -date: 2021-06-01 ---- - -# Usage Limitations - -This document describes the common usage limitations of MogDB. - -| Item | Upper limit | -| ----------------------------------------- | ---------------------------------------------------- | -| Database capacity | Depend on operating systems and hardware | -| Size of a single table | 32 TB | -| Size of a single row | 1 GB | -| Size of a single field in a row | 1 GB | -| Number of rows in a single table | 281474976710656 (248) | -| Number of columns in a single table | 250~1600 (Varies depending on different field types) | -| Number of indexes in a single table | Unlimited | -| Number of columns in a compound index | 32 | -| Number of constraints in a single table | Unlimited | -| Number of concurrent connections | 262143 | -| Number of partitions in a partition table | 1048575 | -| Size of a single partition | 32 TB | -| Number of rows in a single partition | 255 | -| Maximum length of SQL text | 1048576 bytes (1MB) | diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/administrator-guide.md b/product/en/docs-mogdb/v5.2/administrator-guide/administrator-guide.md deleted file mode 100644 index 4eedb25b..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/administrator-guide.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Administrator Guide -summary: Administrator Guide -author: Guo Huan -date: 2023-05-22 ---- - -# Administrator Guide - -- **[Localization](localization/localization.md)** -- **[Routine Maintenance](routine-maintenance/routine-maintenance.md)** -- **[Primary and Standby Management](primary-and-standby-management.md)** -- **[MOT](mot-engine/mot-engine.md)** -- **[Column-store Tables Management](column-store-tables-management.md)** -- **[Backup and Restoration](backup-and-restoration/backup-and-restoration.md)** -- **[Database Deployment Solutions](database-deployment-scenario/database-deployment-scenario.md)** -- **[Importing And Exporting Data](importing-and-exporting-data/importing-and-exporting-data.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/backup-and-restoration-overview.md b/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/backup-and-restoration-overview.md deleted file mode 100644 index 510e21f4..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/backup-and-restoration-overview.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2021-04-27 ---- - -# Overview - -For database security purposes, MogDB provides three backup and restoration types, multiple backup and restoration solutions, and data reliability assurance mechanisms during backup and restoration. - -Backup and restoration can be classified into logical backup and restoration, physical backup and restoration, and flashback. - -- Logical backup and restoration: backs up data by logically exporting data. This method can dump data that is backed up at a certain time point, and restore data only to this backup point. A logical backup does not back up data processed between failure occurrence and the last backup. It applies to scenarios where data rarely changes. Such data damaged due to misoperation can be quickly restored using a logical backup. To restore all the data in a database through logical backup, rebuild a database and import the backup data. Logical backup is not recommended for databases requiring high data availability because it takes a long time for data restoration. Logical backup is a major approach to migrate and transfer data because it can be performed on any platform. - -- Physical backup and restoration: copies physical file in the unit of disk blocks from the primary node to the standby node to back up a database. A database can be restored using backup files such as data files and archive log files. A physical backup is useful when you need to quickly back up and restore the complete database within a short period of time. Backup and restoration can be implemented at low costs through proper planning. - -- Flashback: This function is used to restore dropped tables from the recycle bin. Like in a Window OS, dropped table information is stored in the recycle bin of databases. The MVCC mechanism is used to restore data to a specified point in time or change sequence number (CSN). - - The three data backup and restoration solutions supported by MogDB are as follows. Methods for restoring data in case of an exception differ for different backup and restoration solutions. - - **Table 1** Comparison of three backup and restoration types - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Backup TypeApplication ScenarioMediaTool NameRecovery TimeAdvantage and Disadvantage
Logical backup and restorationSmall volume of data needs to be processed.
You can back up a single table, multiple tables, a single database, or all databases. The backup data needs to be restored using gsql or gs_restore. When the data volume is large, the restoration takes a long time.
- Disk
- SSD
gs_dumpIt takes a long time to restore data in plain-text format. It takes a moderate time to restore data in archive format.This tool is used to export database information. Users can export a database or its objects (such as schemas, tables, and views). The database can be the default postgres database or a user-specified database. The exported file can be in plain-text format or archive format. Data in plain-text format can be restored only by using gsql, which takes a long time. Data in archive format can be restored only by using gs_restore. The restoration time is shorter than that of the plain-text format.
gs_dumpallLong data recovery timeThis tool is used to export all information of the openGauss database, including the data of the default postgres database, data of user-specified databases, and global objects of all openGauss databases.
Only data in plain-text format can be exported. The exported data can be restored only by using gsql, which takes a long time.
Physical backup and restorationHuge volume of data needs to be processed. It is mainly used for full backup and restoration as well as the backup of all WAL archive and run logs in the database.gs_backupSmall data volume and fast data recoveryThe OM tool for exporting database information can be used to export database parameter files and binary files. It helps openGauss to back up and restore important data, and display help and version information. During the backup, you can select the type of the backup content. During the restoration, ensure that the backup file exists in the backup directory of each node. Database instances are restored based on the database instance information in the static configuration file. It takes a short time to restore only parameter files.
gs_basebackupDuring the restoration, you can directly copy and replace the original files, or directly start the database on the backup database. The restoration takes a short time.This too is used to fully copy the binary files of the server database. Only the database at a certain time point can be backed up. With PITR, you can restore data to a time point after the full backup time point.
gs_probackupData can be directly restored to a backup point and the database can be started on the backup database. The restoration takes a short time.**gs_probackup** is a tool used to manage openGauss backup and restoration. It periodically backs up openGauss instances. It supports the physical backup of a standalone database or a primary database instance. It supports the backup of external directories, such as script files, configuration files, log files, and dump files. It supports incremental backup, periodic backup, and remote backup. The time required for incremental backup is shorter than that for full backup. You only need to back up the modified files. Currently, the data directory is backed up by default. If the tablespace is not in the data directory, you need to manually specify the tablespace directory to be backed up. Currently, data can be backed up only on the primary node.
FlashbackApplicable to:
1) A table is deleted by mistake.
2) Data in the tables needs to be restored to a specified time point or CSN.
NoneYou can restore a table to the status at a specified time point or before the table structure is deleted. The restoration takes a short time.Flashback can selectively and efficiently undo the impact of a committed transaction and recover from a human error. Before the flashback technology is used, the committed database modification can be retrieved only by means of restoring backup or PITR. The restoration takes several minutes or even hours. After the flashback technology is used, it takes only seconds to restore the committed data before the database is modified. The restoration time is irrelevant to the database size.
Flashback supports two recovery modes:
- Multi-version data restoration based on MVCC: applicable to the query and restoration of data that is deleted, updated, or inserted by mistake. You can configure the retention period of the old version and run the corresponding query or restoration command to query or restore data to a specified time point or CSN.
- Recovery based on the recycle bin (similar to that on Windows OS): This method is applicable to the recovery of tables that are dropped or truncated by mistake. You can configure the recycle bin switch and run the corresponding restoration command to restore the tables that are dropped or truncated by mistake.
- -While backing up and restoring data, take the following aspects into consideration: - -- Whether the impact of data backup on services is acceptable - -- Database restoration efficiency - - To minimize the impact of database faults, try to minimize the restoration duration, achieving the highest restoration efficiency. - -- Data restorability - - Minimize data loss after the database is invalidated. - -- Database restoration cost - - There are many factors that need to be considered while you select a backup policy on the live network, such as backup objects, data volume, and network configuration. Table 2 lists available backup policies and applicable scenarios for each backup policy. - - **Table 2** Backup policies and scenarios - - | Backup Policy | Key Performance Factor | Typical Data Volume | Performance Specifications | - | :----------------------- | :----------------------------------------------------------- | :--------------------------------------------------------- | :----------------------------------------------------------- | - | Database instance backup | - Data amount
- Network configuration | Data volume: PB level
Object quantity: about 1 million | Backup:
- Data transfer rate on each host: 80 Mbit/s (NBU/EISOO+Disk)
- Disk I/O rate (SSD/HDD): about 90% | - | Table backup | - Schema where the table to be backed up resides
- Network configuration (NBU) | Data volume: 10 TB level | Backup: depends on query performance rate and I/O rate
NOTE:
For multi-table backup, the backup time is calculated as follows:
`Total time = Number of tables x Starting time + Total data volume/Data backup speed`
In the preceding information:
- The starting time of a disk is about 5s. The starting time of an NBU is longer than that of a disk (depending on the NBU deployment).
- The data backup speed is about 50 MB/s on a single node. (The speed is evaluated based on the backup of a 1 GB table from a physical host to a local disk.)
The smaller the table is, the lower the backup performance will be. | diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/backup-and-restoration.md b/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/backup-and-restoration.md deleted file mode 100644 index 7408d8c6..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/backup-and-restoration.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Backup and Restoration -summary: Backup and Restoration -author: Guo Huan -date: 2023-05-22 ---- - -# Backup and Restoration - -+ **[Overview](backup-and-restoration-overview.md)** -+ **[Physical Backup and Restoration](physical-backup-and-restoration.md)** -+ **[Logical Backup and Restoration](logical-backup-and-restoration.md)** -+ **[Flashback Restoration](flashback-restoration.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/flashback-restoration.md b/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/flashback-restoration.md deleted file mode 100644 index 8488aac9..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/flashback-restoration.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -title: Flashback Restoration -summary: Flashback Restoration -author: Guo Huan -date: 2021-10-12 ---- - -# Flashback Restoration - -Flashback restoration is a part of the database recovery technology. It can be used to selectively cancel the impact of a committed transaction and restore data from incorrect manual operations. Before the flashback technology is used, the committed database modification can be retrieved only by means of restoring backup and PITR. The restoration takes several minutes or even hours. After the flashback technology is used, it takes only seconds to restore the submitted data before the database is modified. The restoration time is irrelevant to the database size. - -> Note: The Astore engine does not support the flashback function. - -**Flashback supports two restoration modes:** - -- MVCC-based multi-version data restoration (only Ustore is supported): It is suitable for query and restoration of data deleted, updated and inserted by mistake. Users can configure the retention time of the old version and execute the corresponding query or restore command to query or restore data to Specified point in time or CSN point. -- Restoration based on database recycle bin (only Ustore is supported): It is suitable for the restoration of tables that are DROP and TRUNCATE by mistake. By configuring the recycle bin switch and executing the corresponding restore command, the user can retrieve the tables that were erroneously DROP and TRUNCATE. - -**Related parameters:** - -- [enable_default_ustore_table](../../reference-guide/guc-parameters/miscellaneous-parameters.md#enable_default_ustore_table)=on - - Enable default support for Ustore storage engine - -- [undo_retention_time](../../reference-guide/guc-parameters/flashback.md#undo_retention_time) - - Set the undo old version retention time. Equivalent to the time span for which flashback queries are allowed. Flashback queries beyond that time may report `restore point not found` error. - -- [enable_recyclebin](../../reference-guide/guc-parameters/flashback.md#enable_recyclebin)=on - - enable recycle bin - -- [recyclebin_retention_time](../../reference-guide/guc-parameters/flashback.md#recyclebin_retention_time)=15min - - Set the recycle bin object retention time, and the recycle bin objects that exceed this time will be automatically cleaned up - -
- -## Flashback Query - -### Context - -Flashback query enables you to query a snapshot of a table at a certain time point in the past. This feature can be used to view and logically rebuild damaged data that is accidentally deleted or modified. The flashback query is based on the MVCC mechanism. You can retrieve and query the old version to obtain the data of the specified old version. - -### Prerequisites - -- The **undo_retention_time** parameter has been set for specifying the retention period of undo logs. - -### Syntax - -```ebnf+diagram -FlashBack ::= {[ ONLY ] table_name [ * ] [ partition_clause ] [ [ AS ] alias [ ( column_alias [, ...] ) ] ] -[ TABLESAMPLE sampling_method ( argument [, ...] ) [ REPEATABLE ( seed ) ] ] -[TIMECAPSULE { TIMESTAMP | CSN } expression ] -|( select ) [ AS ] alias [ ( column_alias [, ...] ) ] -|with_query_name [ [ AS ] alias [ ( column_alias [, ...] ) ] ] -|function_name ( [ argument [, ...] ] ) [ AS ] alias [ ( column_alias [, ...] | column_definition [, ...] ) ] -|function_name ( [ argument [, ...] ] ) AS ( column_definition [, ...] ) -|from_item [ NATURAL ] join_type from_item [ ON join_condition | USING ( join_column [, ...] ) ]} -``` - -In the syntax tree, **TIMECAPSULE {TIMESTAMP | CSN} expression** is a new expression for the flashback function. **TIMECAPSULE** indicates that the flashback function is used. **TIMESTAMP** and **CSN** indicate that the flashback function uses specific time point information or commit sequence number (CSN) information. - -### Parameter Description - -- TIMESTAMP - - Specifies a history time point of the table data to be queried. -- CSN - - Specifies a logical commit time point of the data in the entire database to be queried. Each CSN in the database represents a consistency point of the entire database. To query the data under a CSN means to query the data related to the consistency point in the database through SQL statements. - -### Examples - -- Example 1: - - ```sql - SELECT * FROM t1 TIMECAPSULE TIMESTAMP to_timestamp ('2020-02-11 10:13:22.724718', 'YYYY-MM-DD HH24:MI:SS.FF'); - ``` - -- Example 2: - - ```sql - SELECT * FROM t1 TIMECAPSULE CSN 9617; - ``` - -- Example 3: - - ```sql - SELECT * FROM t1 AS t TIMECAPSULE TIMESTAMP to_timestamp ('2020-02-11 10:13:22.724718', 'YYYY-MM-DD HH24:MI:SS.FF'); - ``` - -- Example 4: - - ```sql - SELECT * FROM t1 AS t TIMECAPSULE CSN 9617; - ``` - -## Flashback Table - -### Context - -Flashback table enables you to restore a table to a specific point in time. When only one table or a group of tables are logically damaged instead of the entire database, this feature can be used to quickly restore the table data. Based on the MVCC mechanism, the flashback table deletes incremental data at a specified time point and after the specified time point and retrieves the data deleted at the specified time point and the current time point to restore table-level data. - -### Prerequisites - -- The **undo_retention_time** parameter has been set for specifying the retention period of undo logs. - -### Syntax - -```ebnf+diagram -FlashBack ::= TIMECAPSULE TABLE table_name TO { TIMESTAMP | CSN } expression -``` - -### Examples - -```sql -TIMECAPSULE TABLE t1 TO TIMESTAMP to_timestamp ('2020-02-11 10:13:22.724718', 'YYYY-MM-DD HH24:MI:SS.FF'); -TIMECAPSULE TABLE t1 TO CSN 9617; -``` - -## Flashback DROP/TRUNCATE - -### Context - -Flashback drop enables you to restore tables that are dropped by mistake and their auxiliary structures, such as indexes and table constraints, from the recycle bin. Flashback drop is based on the recycle bin mechanism. You can restore physical table files recorded in the recycle bin to restore dropped tables. - -Flashback truncate enables you to restore tables that are truncated by mistake and restore the physical data of the truncated tables and indexes from the recycle bin. Flashback truncate is based on the recycle bin mechanism. You can restore physical table files recorded in the recycle bin to restore truncated tables. - -### Prerequisites - -- The **enable_recyclebin** parameter has been set for enabling the recycle bin. -- The **recyclebin_retention** parameter has been set for specifying the retention period of objects in the recycle bin. The objects will be automatically deleted after the retention period expires. - -### Syntax - -- Drop a table. - - ```ebnf+diagram - DropTable ::= DROP TABLE table_name [PURGE] - ``` - -- Purge objects in the recycle bin. - - ```ebnf+diagram - PurgeRecyclebin ::= PURGE { TABLE { table_name } - | INDEX { index_name } - | RECYCLEBIN - } - ``` - -- Flash back a dropped table. - - ```ebnf+diagram - TimecapsuleTable ::= TIMECAPSULE TABLE { table_name } TO BEFORE DROP [RENAME TO new_tablename] - ``` - -- Truncate a table. - - ```ebnf+diagram - TruncateTable ::= TRUNCATE TABLE { table_name } [ PURGE ] - ``` - -- Flash back a truncated table. - - ```ebnf+diagram - TimecapsuleTable ::= TIMECAPSULE TABLE { table_name } TO BEFORE TRUNCATE - ``` - -### Parameter Description - -- DROP/TRUNCATE TABLE table_name PURGE - - Purges table data in the recycle bin by default. -- PURGE RECYCLEBIN - - Purges objects in the recycle bin. -- **TO BEFORE DROP** - -Retrieves dropped tables and their subobjects from the recycle bin. - -You can specify either the original user-specified name of the table or the system-generated name assigned to the object when it was dropped. - -- System-generated recycle bin object names are unique. Therefore, if you specify the system-generated name, the database retrieves that specified object. To see the contents of your recycle bin, run **select \* from pg_recyclebin;**. -- If you specify the user-specified name and the recycle bin contains more than one object of that name, the database retrieves the object that was moved to the recycle bin most recently. If you want to retrieve an older version of the table, then do one of these things: - - Specify the system-generated recycle bin name of the table you want to retrieve. - - Run **TIMECAPSULE TABLE ... TO BEFORE DROP** statements until you retrieve the table you want. - - When a dropped table is restored, only the base table name is restored, and the names of other subobjects remain the same as those in the recycle bin. You can run the DDL command to manually change the names of subobjects as required. - - The recycle bin does not support write operations such as DML, DCL, and DDL, and does not support DQL query operations (supported in later versions). - - Between the flashback point and the current point, a statement has been executed to modify the table structure or to affect the physical structure. Therefore, the flashback fails. The error message "ERROR: The table definition of %s has been changed." is displayed when flashback is performed on a table where DDL operations have been performed. The error message "ERROR: recycle object %s desired does not exis" is displayed when flashback is performed on DDL operations, such as changing namespaces and table names. -- **RENAME TO** - -Specifies a new name for the table retrieved from the recycle bin. - -- **TO BEFORE TRUNCATE** - -Flashes back to the point in time before the TRUNCATE operation. - -### Syntax Example - -```sql -DROP TABLE t1 PURGE; - -PURGE TABLE t1; -PURGE TABLE "BIN$04LhcpndanfgMAAAAAANPw==$0"; -PURGE INDEX i1; -PURGE INDEX "BIN$04LhcpndanfgMAAAAAANPw==$0"; -PURGE RECYCLEBIN; - -TIMECAPSULE TABLE t1 TO BEFORE DROP; -TIMECAPSULE TABLE t1 TO BEFORE DROP RENAME TO new_t1; -TIMECAPSULE TABLE "BIN$04LhcpndanfgMAAAAAANPw==$0" TO BEFORE DROP; -TIMECAPSULE TABLE "BIN$04LhcpndanfgMAAAAAANPw==$0" TO BEFORE DROP RENAME TO new_t1; -``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/logical-backup-and-restoration.md b/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/logical-backup-and-restoration.md deleted file mode 100644 index f811ba2e..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/logical-backup-and-restoration.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Logical Backup and Restoration -summary: Logical Backup and Restoration -author: Guo Huan -date: 2021-04-27 ---- - -# Logical Backup and Restoration - -## gs_dump - -For details, see [gs_dump](../../reference-guide/tool-reference/server-tools/gs_dump.md). - -## gs_dumpall - -For details, see [gs_dumpall](../../reference-guide/tool-reference/server-tools/gs_dumpall.md). - -## gs_restore - -For details, see [gs_restore](../../reference-guide/tool-reference/server-tools/gs_restore.md). - -## gs_backup - -For details, see [gs_backup](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_backup.md). diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/physical-backup-and-restoration.md b/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/physical-backup-and-restoration.md deleted file mode 100644 index a5583272..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/backup-and-restoration/physical-backup-and-restoration.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: Physical Backup and Restoration -summary: Physical Backup and Restoration -author: Guo Huan -date: 2021-04-27 ---- - -# Physical Backup and Restoration - -## gs_basebackup - -For details, see [gs_basebackup](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_basebackup.md). - -## PITR Recovery - -### Background - -When a database breaks down or needs to be rolled back to a previous state, the point-in-time recovery (PITR) function of MogDB can be used to restore the database to any point in time after the backup and archive data is generated. - -**NOTE:** - -- PITR can only be restored to a point in time after the physical backup data is generated. -- Only the primary node can be restored using PITR. The standby node needs to be fully built to synchronize data with the primary node. - -### Prerequisites - -- Full data files have been physically backed up. -- WAL log files have been archived. - -### PITR Recovery Process - -1. Replace the target database directory with the physical backup files. -2. Delete all files in the database directory **pg_xlog/**. -3. the archived WAL log file to the **pg_xlog** file. (Or you can configure **restore_command** in the **recovery.conf** file to skip this step.) -4. Create the recovery command file **recovery.conf** in the database directory and specify the database recovery degree. -5. Start the database. -6. Connect to the database and check whether the database is recovered to the expected status. -7. If the expected status is reached, run the **pg_xlog_replay_resume()** command so that the primary node can provide services externally. - -### Configuring the recovery.conf File - -**Archive Recovery Configuration** - -- restore_command = string - - The **shell** command is used to obtain the archived WAL files among the WAL file series. Any %f in the string is replaced by the name of the file to retrieve from the archive, and any %p is replaced by the path name to it to on the server. Any %r is replaced by the name of the file containing the last valid restart point. - - For example: - - ```bash - restore_command = 'cp /mnt/server/archivedir/%f %p' - ``` - -- archive_cleanup_command = string - - This option parameter declares a **shell** command that is executed each time the system is restarted. **archive_cleanup_command** provides a mechanism for deleting unnecessary archived WAL files from the standby database. Any %r is replaced by the name of the file containing the last valid restart point. That is the earliest file that must be kept to allow recovery to be restartable, so all files older than %r can be safely removed. - - For example: - - ```bash - archive_cleanup_command = 'pg_archivecleanup /mnt/server/archivedir %r' - ``` - - If multiple standby servers need to be recovered from the same archive path, ensure that WAL files are not deleted from any standby server before the recovery. - -- recovery_end_command = string - - This parameter is optional and is used to declare a **shell** command that is executed only when the recovery is complete. **recovery_end_command** provides a cleanup mechanism for future replication and recovery. - -**Recovery Object Configuration** - -- recovery_target_name = string - - This parameter declares that the name is recovered to a recovery point created using pg_create_restore_point(). - - For example: - - ```bash - recovery_target_name = 'restore_point_1' - ``` - -- recovery_target_time = timestamp - - This parameter declares that the name is recovered to a specified timestamp. - - For example: - - ```bash - recovery_target_time = '2020-01-01 12:00:00' - ``` - -- recovery_target_xid = string - - This parameter declares that the name is recovered to a transaction ID. - - For example: - - ```bash - recovery_target_xid = '3000' - ``` - -- recovery_target_lsn = string - - This parameter declares that the name is recovered to the LSN specified by log. - - For example: - - ```bash - recovery_target_lsn = '0/0FFFFFF' - ``` - -- recovery_target_inclusive = boolean - - This parameter declares whether to stop the recovery after the recovery target is specified (**true**) or before the recovery target is specified (**false**). This declaration supports only the recovery targets **recovery_target_time**, **recovery_target_xid**, and **recovery_target_lsn**. - - For example: - - ```bash - recovery_target_inclusive = true - ``` - -**NOTE:** - -- Only one of the four configuration items **recovery_target_name**, **recovery_target_time**, **recovery_target_xid**, and **recovery_target_lsn** can be used at a time. -- If no recovery targets are configured or the configured target does not exist, data is recovered to the latest WAL log point by default. - -## gs_probackup - -For details, see [gs_probackup](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_probackup.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/column-store-tables-management.md b/product/en/docs-mogdb/v5.2/administrator-guide/column-store-tables-management.md deleted file mode 100644 index 8e210216..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/column-store-tables-management.md +++ /dev/null @@ -1,386 +0,0 @@ ---- -title: Column-store Tables Management -summary: Column-store Tables Management -author: Guo Huan -date: 2021-04-09 ---- - -# Column-store Tables Management - -## What is Column-store - -Row-store stores tables to disk partitions by row, and column-store stores tables to disk partitions by column. By default, a row-store table is created. For details about differences between row storage and column storage, see Figure 1. - -**Figure 1** Differences between row storage and column storage - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/column-store-tables-management.png) - -In the preceding figure, the upper left part is a row-store table, and the upper right part shows how the row-store table is stored on a disk; the lower left part is a column-store table, and the lower right part shows how the column-store table is stored on a disk. From the above figure, you can clearly see that the data of a row-store table are put together, but they are kept separately in column-store table. - -## Advantages and Disadvantages of Row-store and Column-store Tables and Their Usage Scenario - -Both storage models have benefits and drawbacks. - -| Storage Model | Benefit | Drawback | -| :------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| Row storage | Record data is stored together. Data can be easily inserted and updated. | All the columns of a record are read after the **SELECT** statement is executed even if only certain columns are required. | -| Column storage | Only the columns involved in a query are read. Projections are efficient. Any column can serve as an index. | The selected columns need to be reconstructed after the **SELECT** statement is executed. Data cannot be easily inserted or updated. | - -Generally, if a table contains many columns (called a wide table) and its query involves only a few columns, column storage is recommended. Row storage is recommended if a table contains only a few columns and a query involves most of the fields. - -| Storage Model | Application Scenarios | -| :------------- | :----------------------------------------------------------- | -| Row storage | Point queries (simple index-based queries that only return a few records)Scenarios requiring frequent addition, deletion, and modification | -| Column storage | Statistical analysis queries (requiring a large number of association and grouping operations)Ad hoc queries (using uncertain query conditions and unable to utilize indexes to scan row-store tables) | - -MogDB supports hybrid row storage and column storage. Each storage model applies to specific scenarios. Select an appropriate model when creating a table. Generally, MogDB is used for transactional processing databases. By default, row storage is used. Column storage is used only when complex queries in large data volume are performed. - -## Selecting a Storage Model - -- Update frequency - - If data is frequently updated, use a row-store table. - -- Data insertion frequency - - If a small amount of data is frequently inserted each time, use a row-store table. - -- Number of columns - - If a table is to contain many columns, use a column-store table. - -- Number of columns to be queried - - If only a small number of columns (less than 50% of the total) is queried each time, use a column-store table. - -- Compression ratio - - The compression ratio of a column-store table is higher than that of a row-store table. High compression ratio consumes more CPU resources. - -## Constraints of Column-store Table - -- The column-store table does not support arrays. -- The number of column-store tables is recommended to be no more than 1000. -- The table-level constraints of the column-store table only support **PARTIAL CLUSTER KEY**, and do not support table-level constraints such as primary and foreign keys. -- The field constraints of the column-store table only support **NULL**, **NOT NULL** and **DEFAULT** constant values. -- The column-store table does not support the **alter** command to modify field constraints. -- The column-store table supports the delta table, which is controlled by the parameter **enable_delta_store** whether to enable or not, and the threshold value for entering the delta table is controlled by the parameter **deltarow_threshold**. - -## Related Parameters - -- cstore_buffers - - The size of the shared buffer used by the column-store, the default value: 32768KB. - -- partition_mem_batch - - Specify the number of caches. In order to optimize the batch insertion of column-store partition tables, the data will be cached during the batch insertion process and then written to disk in batches. Default value: 256. - -- partition_max_cache_size - - Specify the size of the data buffer area. In order to optimize the batch insertion of column-store partition tables, the data will be cached during the batch insertion process and then written to disk in batches. Default value: 2GB. - -- enable_delta_store - - In order to enhance the performance of single data import in column-store and solve the problem of disk redundancy, whether it is necessary to enable the function of column-store delta table and use it in conjunction with the parameter **DELTAROW_THRESHOLD**. Default value: off. - -## Create Table Commands - -MogDB creates normal tables as uncompressed row-store tables by default. - -``` -mogdb=# \dt -No relations found. -mogdb=# create table test_t(id serial primary key ,col1 varchar(8),col2 decimal(6,2),create_time timestamptz not null default now()); -NOTICE: CREATE TABLE will create implicit sequence "test_t_id_seq" for serial column "test_t.id" -NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_t_pkey" for table "test_t" -CREATE TABLE -mogdb=# \dt+ - List of relations - Schema | Name | Type | Owner | Size | Storage | Description ---------+--------+-------+-------+---------+----------------------------------+------------- - public | test_t | table | omm | 0 bytes | {orientation=row,compression=no} | -(1 row) - -mogdb=# -``` - -To create a column-store table, you need to specify **orientation=column**, the default compression level is **low**. - -``` -mogdb=# create table column_t(id serial,col1 varchar(8),col2 decimal(6,2),create_time timestamptz not null default now()) with (orientation=column ); -NOTICE: CREATE TABLE will create implicit sequence "column_t_id_seq" for serial column "column_t.id" -CREATE TABLE -mogdb=# \dt+ - List of relations - Schema | Name | Type | Owner | Size | Storage | Description ---------+----------+-------+-------+---------+--------------------------------------+------------- - public | column_t | table | omm | 16 kB | {orientation=column,compression=low} | - public | test_t | table | omm | 0 bytes | {orientation=row,compression=no} | -(2 rows) - -mogdb=# \d+ column_t - Table "public.column_t" - Column | Type | Modifiers | Storage | Stats target | Description --------------+--------------------------+-------------------------------------------------------+----------+--------------+------------- - id | integer | not null default nextval('column_t_id_seq'::regclass) | plain | | - col1 | character varying(8) | | extended | | - col2 | numeric(6,2) | | main | | - create_time | timestamp with time zone | not null default now() | plain | | -Has OIDs: no -Options: orientation=column, compression=low -``` - -Add partial clustered storage columns to the column-store table. - -``` -mogdb=# \d+ column_t - Table "public.column_t" - Column | Type | Modifiers | Storage | Stats target | Description --------------+--------------------------+-------------------------------------------------------+----------+--------------+------------- - id | integer | not null default nextval('column_t_id_seq'::regclass) | plain | | - col1 | character varying(8) | | extended | | - col2 | numeric(6,2) | | main | | - create_time | timestamp with time zone | not null default now() | plain | | -Has OIDs: no -Options: orientation=column, compression=low - -mogdb=# alter table column_t add PARTIAL CLUSTER KEY(id); -ALTER TABLE -mogdb=# \d+ column_t - Table "public.column_t" - Column | Type | Modifiers | Storage | Stats target | Description --------------+--------------------------+-------------------------------------------------------+----------+--------------+------------- - id | integer | not null default nextval('column_t_id_seq'::regclass) | plain | | - col1 | character varying(8) | | extended | | - col2 | numeric(6,2) | | main | | - create_time | timestamp with time zone | not null default now() | plain | | -Partial Cluster : - "column_t_cluster" PARTIAL CLUSTER KEY (id) -Has OIDs: no -Options: orientation=column, compression=low - -mogdb=# -``` - -Create column-store tables with partial clustered storage directly. - -``` -mogdb=# create table column_c(id serial,col1 varchar(8),col2 decimal(6,2),create_time timestamptz not null default now(),PARTIAL CLUSTER KEY(id)) with (orientation=column ); -NOTICE: CREATE TABLE will create implicit sequence "column_c_id_seq" for serial column "column_c.id" -CREATE TABLE -mogdb=# \d+ column_c - Table "public.column_c" - Column | Type | Modifiers | Storage | Stats target | Description --------------+--------------------------+-------------------------------------------------------+----------+--------------+------------- - id | integer | not null default nextval('column_c_id_seq'::regclass) | plain | | - col1 | character varying(8) | | extended | | - col2 | numeric(6,2) | | main | | - create_time | timestamp with time zone | not null default now() | plain | | -Partial Cluster : - "column_c_cluster" PARTIAL CLUSTER KEY (id) -Has OIDs: no -Options: orientation=column, compression=low - -mogdb=# -``` - -Please refer to **Supported Data Types** > **Data Types Supported by Column-store Tables** under the **Reference Guide** for the data types supported by column-store tables. - -## Column-store versus Row-store - -**Used disk space** - -- The default size of the column-store table is 16K, the compression level is **low**. - -- The default size of the row-store table is 0bytes, the compression level is **no**. - -- Insert 1 million pieces of data into the two tables separately , and compare the occupied disk size. - - ``` - mogdb=# \dt+ - List of relations - Schema | Name | Type | Owner | Size | Storage | Description - --------+-----------+-------+-------+---------+-----------------------------------------+------------- - public | column_t | table | omm | 16 kB | {orientation=column,compression=low} | - public | column_th | table | omm | 16 kB | {orientation=column,compression=high} | - public | column_tm | table | omm | 16 kB | {orientation=column,compression=middle} | - public | row_tc | table | omm | 0 bytes | {orientation=row,compression=yes} | - public | test_t | table | omm | 0 bytes | {orientation=row,compression=no} | - (5 rows) - - mogdb=# insert into column_t select generate_series(1,1000000),left(md5(random()::text),8),random()::numeric(6,2); - INSERT 0 1000000 - Time: 11328.880 ms - mogdb=# insert into column_th select generate_series(1,1000000),left(md5(random()::text),8),random()::numeric(6,2); - INSERT 0 1000000 - Time: 10188.634 ms - mogdb=# insert into column_tm select generate_series(1,1000000),left(md5(random()::text),8),random()::numeric(6,2); - INSERT 0 1000000 - Time: 9802.739 ms - mogdb=# insert into test_t select generate_series(1,1000000),left(md5(random()::text),8),random()::numeric(6,2); - INSERT 0 1000000 - Time: 17404.945 ms - mogdb=# insert into row_tc select generate_series(1,1000000),left(md5(random()::text),8),random()::numeric(6,2); - INSERT 0 1000000 - Time: 12394.866 ms - mogdb=# \dt+ - List of relations - Schema | Name | Type | Owner | Size | Storage | Description - --------+-----------+-------+-------+----------+-----------------------------------------+------------- - public | column_t | table | omm | 12 MB | {orientation=column,compression=low} | - public | column_th | table | omm | 8304 kB | {orientation=column,compression=high} | - public | column_tm | table | omm | 10168 kB | {orientation=column,compression=middle} | - public | row_tc | table | omm | 58 MB | {orientation=row,compression=yes} | - public | test_t | table | omm | 58 MB | {orientation=row,compression=no} | - (5 rows) - - mogdb=# - ``` - -- The higher the compression level of the column-store table is, the less the disk space it uses. - -- After the row-store table is compressed, the size of the disk space dose not decrease significantly. - -- Column-store table take up nearly 6 times less disk space than row-store table. - -**DML Comparison** - -Search for a single column: - -``` ---- ----Search by range, column-store is nearly 20 times faster than row-store ---- -mogdb=# select col1 from test_t where id>=100010 and id<100020; - col1 ----------- - 4257a3f3 - 3d397284 - 64343438 - 6eb7bdb7 - d1c9073d - 6aeb037c - 1d424974 - 223235ab - 329de235 - 2f02adc1 -(10 rows) - -Time: 77.341 ms -mogdb=# select col1 from column_t where id>=100010 and id<100020; - col1 ----------- - d4837c30 - 87a46f7a - 2f42a9c9 - 4481c793 - 68800204 - 613b9205 - 9d8f4a0a - 5cc4ff9e - f948cd10 - f2775cee -(10 rows) - -Time: 3.884 ms - ---- ----Search Randomly, column-store is nearly 35 times faster than row-store ---- - -mogdb=# select col1 from test_t limit 10; - col1 ----------- - c2780d93 - 294be14d - 4e53b761 - 2c10f8a2 - ae776743 - 7d683c66 - b3b40054 - 7e56edf9 - a7b7336e - ea3d47d9 -(10 rows) - -Time: 249.887 ms -mogdb=# select col1 from column_t limit 10; - col1 ----------- - a745d77b - 4b6df494 - 76fed9c1 - 70c9664d - 3384de8a - 4158f3bf - 5d1c3b9f - 341876bb - f396f4ed - abfd78bb -(10 rows) - -Time: 7.738 ms -``` - -Search for all the data: - -``` ---- ----Row-store is 30% faster than column-store search ---- -mogdb=# select * from test_t limit 10; - id | col1 | col2 | create_time -----+----------+------+------------------------------- - 1 | c2780d93 | .37 | 2020-10-26 14:27:33.304108+08 - 2 | 294be14d | .57 | 2020-10-26 14:27:33.304108+08 - 3 | 4e53b761 | .98 | 2020-10-26 14:27:33.304108+08 - 4 | 2c10f8a2 | .27 | 2020-10-26 14:27:33.304108+08 - 5 | ae776743 | .97 | 2020-10-26 14:27:33.304108+08 - 6 | 7d683c66 | .58 | 2020-10-26 14:27:33.304108+08 - 7 | b3b40054 | .44 | 2020-10-26 14:27:33.304108+08 - 8 | 7e56edf9 | .43 | 2020-10-26 14:27:33.304108+08 - 9 | a7b7336e | .31 | 2020-10-26 14:27:33.304108+08 - 10 | ea3d47d9 | .42 | 2020-10-26 14:27:33.304108+08 -(10 rows) - -Time: 6.822 ms - -mogdb=# select * from column_t limit 10; - id | col1 | col2 | create_time -----+----------+------+------------------------------- - 1 | a745d77b | .33 | 2020-10-26 14:28:20.633253+08 - 2 | 4b6df494 | .42 | 2020-10-26 14:28:20.633253+08 - 3 | 76fed9c1 | .73 | 2020-10-26 14:28:20.633253+08 - 4 | 70c9664d | .74 | 2020-10-26 14:28:20.633253+08 - 5 | 3384de8a | .48 | 2020-10-26 14:28:20.633253+08 - 6 | 4158f3bf | .59 | 2020-10-26 14:28:20.633253+08 - 7 | 5d1c3b9f | .63 | 2020-10-26 14:28:20.633253+08 - 8 | 341876bb | .97 | 2020-10-26 14:28:20.633253+08 - 9 | f396f4ed | .73 | 2020-10-26 14:28:20.633253+08 - 10 | abfd78bb | .30 | 2020-10-26 14:28:20.633253+08 -(10 rows) - -Time: 9.982 ms -``` - -Update data: - -``` ---- ----Update a field directly, column-store is nearly 7 times faster than row-store ---- -mogdb=# update test_t set col1=col1; -UPDATE 1000000 -Time: 19779.978 ms -mogdb=# update column_t set col1=col1; -UPDATE 1000000 -Time: 2702.339 ms -``` - -## Conclusion - -1. The Column-store table saves nearly 6 times the disk space usage compared to the row-store table. -2. When searching for the specified field, the column-store table is about 20-35 times faster than the row-store table. -3. When searching for all the data, the column-store table is 30% slower than the row-store table. -4. When importing data in batches in the default compression mode, and column-store table is 40% faster than the row-store table. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/common-primary-backup-deployment-scenarios.md b/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/common-primary-backup-deployment-scenarios.md deleted file mode 100644 index 003d3811..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/common-primary-backup-deployment-scenarios.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Common Primary/Standby Deployment Solutions -summary: Common Primary/Standby Deployment Solutions -author: Guo Huan -date: 2023-04-07 ---- - -# Common Primary/Standby Deployment Solutions - -## Single-Center Deployment - -**Figure 1** Single-center deployment -![单中心部署图](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/single-center-deployment.png) - -Networking features: If a single AZ is deployed, one synchronous standby node and one asynchronous standby node can be configured. - -Advantages: - -1. Three nodes are equivalent. If any node is faulty, the other nodes can still provide services. -2. The cost is low. - -Disadvantages: The high availability (HA) is low. If an AZ-level fault occurs, you can only restore the entire node. - -Applicability: Applicable to service systems that have low requirements on HA. - -## Intra-City Dual-Center Deployment - -**Figure 2** Intra-city dual-center deployment -![同城双中心部署图](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/intra-city-dual-center-deployment.png) - -Networking features: Two intra-city AZs are more reliable than a single AZ. A synchronous standby node can be configured for the primary center and the intra-city center respectively. - -Advantages: - -1. Intra-city synchronous replication. If one data center is faulty, the other data center can still provide services without data loss. RPO = 0. -2. The cost is reasonable. - -Disadvantages: - -1. The intra-city distance should not be too long. It is recommended that the distance be within 70 km. The total latency caused by excessive read/write times should be considered during service design. -2. Remote DR is not supported. - -Applicability: Applicable to common service systems. - -## Two-City Three-DC Deployment - -**Figure 3** Two-city three-dc deployment -![两地三中心部署图](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/two-city-three-dc-deployment.png) - -Networking features: In the two-city three-DC deployment, each AZ must have at least one synchronous standby node. The cluster reliability can reach the highest level when the number of cities and data centers increases. - -Advantages: It supports zero data loss in remote DR, and has the highest reliability. RPO = 0. - -Disadvantages: - -1. If the remote DR distance is long and synchronous standby node is configured in the remote center, the performance may be affected. -2. The cost is relatively high. - -Applicability: Applicable to core and important service systems. - -## Two-City Three-DC Streaming DR Solution - -**Figure 4** Two-city three-DC streaming DR solution -![两地三中心流式容灾方案部署图](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/two-city-three-dc-streaming-dr-solution.png) - -Networking features: Two independent clusters are deployed in the dual-cluster DR solution. The primary and DR cluster networking modes can be selected as required. The DR cluster selects the first standby DN to connect to the primary DN of the primary cluster. In the DR cluster, the first standby DN is connected in cascading standby mode. - -Advantages: - -1. The primary cluster has the advantage of single-cluster networking. You need to manually switch to the standby cluster only when the primary cluster is unavailable. -2. There is only one cross-cluster (remote) replication link regardless of whether a DR switchover occurs. Therefore, less network bandwidth is occupied. -3. The networking is more flexible. The primary cluster and DR cluster can use different networking modes. - -Disadvantages: - -1. DR clusters need to be added, increasing costs. -2. Remote DR RPO > 0 - -Applicability: Applicable to core and important service systems. - -For more information, see [Two-City Three-DC DR](./two-city-three-dc-dr.md). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: -> -> The preceding deployments are typical solutions. You can adjust the deployment solutions based on actual service scenarios, for example, adding or deleting standby nodes, adjusting the number of centers, properly deploying synchronous and asynchronous standby nodes, and properly using cascaded standby nodes. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/database-deployment-scenario.md b/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/database-deployment-scenario.md deleted file mode 100644 index 028df8a4..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/database-deployment-scenario.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Database Deployment Solutions -summary: Database Deployment Solutions -author: Guo Huan -date: 2023-05-22 ---- - -# Database Deployment Solutions - -+ **[Common Primary/Standby Deployment Solutions](common-primary-backup-deployment-scenarios.md)** -+ **[Two-City Three-DC DR](two-city-three-dc-dr.md)** -+ **[Resource Pooling Architecture](resource-pooling-architecture/resource-pooling-architecture.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-architecture.md b/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-architecture.md deleted file mode 100644 index ebc5cfc4..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-architecture.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Resource Pooling Architecture -summary: Resource Pooling Architecture -author: Guo Huan -date: 2023-04-07 ---- - -# Resource Pooling Architecture - -This document describes some best practices and precautions in the resource pooling architecture. Developers who are interested in related features can quickly deploy, practice, or perform customized development. It is recommended that developers have at least basic knowledge, be proficient in compiling MogDB source code, and master basic storage knowledge and basic Linux commands. - -The following figure shows the resource pooling architecture. - -**Figure 1: MogDB resource pooling architecture** - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/shared_storage.jpg) - -- The read/write node and the read-only node share the same underlying storage. -- The read/write node and the read-only node use the DMS components to share hot data pages in the shared buffer pool through the TCP or RDMA protocol. -- The read/write node and the read-only node access persistent data in the underlying shared storage through DSS APIs and DSS servers. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-current-architectural-feature-constraints.md b/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-current-architectural-feature-constraints.md deleted file mode 100644 index bcd3f8ee..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-current-architectural-feature-constraints.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Constraints on the Resource Pooling Architecture -summary: Constraints on the Resource Pooling Architecture -author: Guo Huan -date: 2023-04-07 ---- - -# Constraints on the Resource Pooling Architecture - -## Description - -MogDB resource pooling is a new cluster architecture launched by MogDB. The DMS and DSS components are used to implement underlying shared storage among multiple nodes in a cluster and real-time memory sharing among nodes. In this way, underlying storage resources are saved, write-once-read-many is supported in a cluster, and real-time consistent read is supported. This document describes the constraints on the resource pooling architecture. - -## Current Constraints - -- **Note that these are temporary constraints and the features may be supported in the future.** - -| No. | Constraint | Remarks | -| :--: | :----------------------------------------------------------- | :----------------------------------------------------------- | -| 1 | Only segment-page storage is supported. Page-based storage is not supported. | The table creation statement must contain **with (segment = on, xxx)**. | -| 2 | Row-store tables are not supported. | None | -| 3 | FDW is not supported. | None | -| 4 | Unlogged tables are not supported. | None | -| 5 | Local temporary tables and global temporary tables are not supported. | None | -| 6 | Features with compression are not supported. | None | -| 7 | Materialized views are not supported. | None | -| 8 | The standby node does not support the operation of starting a transaction. | None | -| 9 | XA transactions are not supported. | None | -| 10 | When the OM is used for installation, only disk array deployment is supported. Ceph and virtual storage pools are not supported. | None | -| 11 | Publication and subscription are not supported. | None | -| 12 | The traditional primary/standby architecture cannot be deployed at the same time. | That is, a cluster cannot use both the resource pooling primary/standby mode and the traditional primary/standby mode. That is, the **replconninfo** or **hot_standby** parameter is not supported. | -| 13 | Ustore is not supported. | Flashback is not supported because flashback supports only Ustore. | -| 14 | The size of a single Xlog file is changed from 16 MB to 1 GB. | The recycling mechanism is also adapted to 1 GB, and the pg_xlogdump tool is also adapted. | -| 15 | You are not advised to disable Global SysCache. | This function is enabled by default and can be disabled through a configuration item. After this function is disabled, the connection may slow down in the case of high concurrency. | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-developer-environment-deployment-guide.md b/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-developer-environment-deployment-guide.md deleted file mode 100644 index 9e639451..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/resource-pooling-architecture/resource-pooling-developer-environment-deployment-guide.md +++ /dev/null @@ -1,272 +0,0 @@ ---- -title: Resource Pooling Environment Deployment Guide -summary: Resource Pooling Environment Deployment Guide -author: Guo Huan -date: 2023-04-04 ---- - -# Resource Pooling Environment Deployment Guide - -## Description - -MogDB resource pooling is a new cluster architecture launched by MogDB. The DMS and DSS components are used to implement underlying shared storage among multiple nodes in a cluster and real-time memory sharing among nodes. In this way, underlying storage resources are saved, write-once-read-many is supported in a cluster, and real-time consistent read is supported. This document describes how to set up a resource pooling environment for self-learning or development. - -## Background Knowledge - -Developers are advised to: - -- Understand basic Linux commands, such as **dd** and **iscis**. -- Understand disk arrays. -- Be familiar with the traditional MogDB compilation mode. - -## Precautions - -- The following figure shows the resource pooling deployment. - -**Figure 1: MogDB resource pooling deployment** - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/dms1.jpg) - -- In the installation package of the enterprise edition officially released in the community, if the resource pooling architecture needs to be set up, disk arrays, servers, and optical switches need to be prepared, and the CM and OM components need to be deployed. -- This document describes how to set up a compilation environment without the disk array, CM, or OM component. Only a common physical machine is required to set up a resource pooling environment. -- Note that the environment set up in this mode cannot be used to debug primary/standby switchover or failover because no real CM is used. It can only be used to verify the normal running of the cluster. - -## Preparing for the Environment - -- An independent physical machine with at least one disk partition whose free space is greater than 200 GB. -- The MogDB installation package of the debug version with resource pooling code has been compiled.You can check whether dssserver and dsscmd exist in the generated **bin** directory. Check whether **libdms.so**, **libdssapi.so**, and **libdssaio.so** exist in the **lib** directory. Ensure that the DSS and DMS components of the test version are used during MogDB compilation. For details, see the following steps. - -## Independent Compilation and Installation - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION**: -> -> Do not perform the following deployment operations in the production environment. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE**: -> -> - MogDB must be compiled in debug mode instead of release mode. The DMS and DSS components of the test version are used. -> - The manual compilation and installation mode does not contain the CM and OM components. In the formal environment, the DSS and DMS components depend on the CM. Therefore, you need to compile the DSS and DMS components in test mode before compiling the MogDB. For details, see the following compilation mode description. - -1. Compile the DSS component of the test version. - - a. Download the CBB code of the latest version. Then, compile, install, and use it to replace CBB in the third-party library. - - b. Download the DSS code of the latest version and roll back the DSS to the specified version based on the version number in **src/gausskernel/ddes/ddes_commit_id**. - - c. Compile, install, and use the downloaded DSS component to replace the DSS component in the third-party library. (**DSS_CODE_PATH** indicates the directory of the decompressed DSS source code, and **ThirdParty_Binarylibs_Path** indicates the directory of the decompressed third-party library.) - - ```shell - **-3rd** is followed by the absolute path of the third-party library. - cd [DSS_CODE_PATH]/build/linux/MogDB - sh build.sh -3rd [ThirdParty_Binarylibs_Path] -t cmake -m DebugDsstest - ``` - -2. Compile the DMS component of the test version. - - a. Download the CBB code of the latest version. Then, compile, install, and use it to replace CBB in the third-party library. - - b. Download the DMS code of the latest version and roll back the DMS to the specified version based on the version number in **src/gausskernel/ddes/ddes_commit_id**. - - c. Compile, install, and use the downloaded DMS component to replace the DMS component in the third-party library. (**DMS_CODE_PATH** indicates the directory of the decompressed DSS source code, and **ThirdParty_Binarylibs_Path** indicates the directory of the decompressed third-party library.) - - ```shell - cd [DMS_CODE_PATH]/build/linux/MogDB - sh build.sh -3rd [ThirdParty_Binarylibs_Path] -t cmake -m Release - cd [DMS_CODE_PATH] - mkdir -p tmp - export BUILD_MODE=Debug - cmake . -DCMAKE_BUILD_TYPE=Debug -D DMS_TEST=ON -DOPENGAUSS=yes -B ./tmp - cd tmp/ - make -sj - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION**: - > - > After the DSS, DMS, and CBB are compiled, they are automatically updated to the third-party library. You do not need to manually copy them. You only need to compile the database according to the standard procedure. - -3. Configure environment variables. - - The following uses two nodes as an example. **DSS_HOME** indicates the directory required for running dssserver of DN 1 and needs to be manually created. Write the following content to **/home/test/envfile** as environment variables. - - ```shell - export GAUSSHOME=/home/test/openGauss-server/dest/ - export LD_LIBRARY_PATH=$GAUSSHOME/lib:$LD_LIBRARY_PATH - export PATH=$GAUSSHOME/bin:$PATH - export DSS_HOME=/home/test/dss/dss0/dssdba - ``` - -4. Create directories for **dssserver**. - - ```shell - cd /home/test - mkdir -p dss/dss0/dssdba/cfg - mkdir -p dss/dss0/dssdba/log - mkdir -p dss/dss1/dssdba/cfg - mkdir -p dss/dss1/dssdba/log - mkdir -p dss/dev - ``` - -5. Run the **dd** command to create a simulated block device file. - - The following command is used to create a 2 TB disk. Adjust the values of **bs** and **count** as required. The execution time depends on the disk performance. - - ```shell - dd if=/dev/zero of=/home/test/dss/dev/dss-dba bs=2M count=1024000 >/dev/null 2>&1 - ``` - -6. Create the configuration files of DSS 0 and DSS 1 required by the two DNs. - - Create the configuration file of DSS 0. - - ```shell - vim /home/test/dss/dss0/dssdba/cfg/dss_inst.ini - ``` - - The file content is as follows: - - ```shell - INST_ID=0 - _LOG_LEVEL=255 - DSS_NODES_LIST=0:127.0.0.1:17102,1:127.0.0.1:18102 - DISK_LOCK_FILE_PATH=/home/test/dss/dss0 - LSNR_PATH=/home/test/dss/dss0 - _LOG_MAX_FILE_SIZE=20M - _LOG_BACKUP_FILE_COUNT=128 - ``` - - Create the volume configuration file of DSS 0. - - ```shell - vim /home/test/dss/dss0/dssdba/cfg/dss_vg_conf.ini - ``` - - The content in the file is as follows, which is the volume name plus the device name simulated by **dd**: - - ```shell - data:/home/test/dss/dev/dss-dba - ``` - - Create the configuration file of DSS 1. - - ```shell - vim /home/test/dss/dss1/dssdba/cfg/dss_inst.ini - ``` - - The content in the file is as follows. Note that the value of **DISK_LOCK_FILE_PATH** is the same as that in DSS 0. - - ```shell - INST_ID=1 - _LOG_LEVEL=255 - DSS_NODES_LIST=0:127.0.0.1:17102,1:127.0.0.1:18102 - DISK_LOCK_FILE_PATH=/home/test/dss/dss0 - LSNR_PATH=/home/test/dss/dss1 - _LOG_MAX_FILE_SIZE=20M - _LOG_BACKUP_FILE_COUNT=128 - ``` - - Create the volume configuration file of DSS 1. - - ```shell - vim /home/test/dss/dss1/dssdba/cfg/dss_vg_conf.ini - ``` - - The content in the file is as follows, which is the volume name plus the device name simulated by **dd**: - - ```shell - data:/home/test/dss/dev/dss-dba - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif)**CAUTION**: - > - > Multiple DNs (databases) are created on a server. The IP addresses are the same, but the port numbers used by services are different. - -7. Create a data volume for storing database data and start the dssserver service. - - ```shell - ##This is the environment variable configured in step 3. - source /home/test/envfile - dsscmd cv -g data -v /home/test/dss/dev/dss-dba - dssserver -D /home/test/dss/dss0/dssdba & - #If **DSS SERVER STARTED** is displayed in the previous command output, the operation is successful. - dssserver -D /home/test/dss/dss1/dssdba & - #If **DSS SERVER STARTED** is displayed in the previous command output, the operation is successful. - - #Run the following command to check whether the volume is successfully created. - dsscmd lsvg -U UDS:/home/test/dss/dss0/.dss_unix_d_socket - dsscmd ls -m M -p +data -U UDS:/home/test/dss/dss0/.dss_unix_d_socket - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** - > - > The DSS does not support volume group configuration modification after startup. If the volume group configuration needs to be modified, perform the preceding steps again. - -8. Perform the gs_initdb operation on each node in sequence. - - ```shell - mkdir -p /home/test/data - rm -rf node1 node2 - - gs_intdb -D /home/test/data/node1 --nodename=node1 -U tester -w Pasword --vgname=+data --enable-dss --dms_url="0:127.0.0.1:1613,1:127.0.0.1:1614" -I 0 --socketpath='UDS:/home/test/dss/dss0/.dss_unix_d_socket' - - echo "ss_enable_ssl = off - listen_addresses = '*' - port=12210 - ss_enable_reform = off - ss_work_thread_count = 32 - enable_segment = on - ss_log_level = 255 - ss_log_backup_file_count = 100 - ss_log_max_file_size = 1GB - " >> /home/test/data/node1/postgresql.conf - - sed '91 ahost all all 0.0.0.0/0 sha256' -i /home/test/data/node1/pg_hba.conf - - gs_intdb -D /home/test/data/node2 --nodename=node2 -U tester -w Pasword --vgname=+data --enable-dss --dms_url="0:127.0.0.1:1613,1:127.0.0.1:1614" -I 1 --socketpath='UDS:/home/test/dss/dss1/.dss_unix_d_socket' - - echo "ss_enable_ssl = off - listen_addresses = '*' - port=13210 - ss_enable_reform = off - ss_work_thread_count = 32 - enable_segment = on - ss_log_level = 255 - ss_log_backup_file_count = 100 - ss_log_max_file_size = 1GB - " >> /home/test/data/node2/postgresql.conf - - sed '91 ahost all all 0.0.0.0/0 sha256' -i /home/test/data/node2/pg_hba.conf - ``` - -9. Create a file that simulates the CM function and add it to the environment variable created in step 3. - - ```shell - echo "REFORMER_ID = 0" > /home/test/cm_config.ini - echo "BITMAP_ONLINE = 3" >> /home/test/cm_config.ini - echo "export CM_CONFIG_PATH=/home/test/cm_config.ini" >> /home/test/envfile - ``` - -10. Start node 1 and node 2 in sequence. - - ```shell - source /home/test/envfile - gs_ctrl start -D /home/test/data/node1 - gs_ctrl start -D /home/test/data/node2 - ``` - -## Supplementary Information - -The **ss_log_level** parameter is used to determine whether to print DMS and DSS logs. The log directory is **pg_log/DMS**. - -- Ports 17102 and 18102 are used by the dssserver. -- Ports 1613 and 1614 are used for DMS communication. -- Ports 12210 and 13210 are used by the MogDB database to provide services. -- The values of **INST_ID** in the dssserver configuration cannot conflict with each other. For example, the values of **INST_ID** for multiple dssservers cannot be the same. -- The environment set up in this mode does not support HA, and switchover and failover cannot be tested. -- If an error message such as “dms library version is not matched” is displayed during the startup, the DMS or DSS component version is incorrect. In this case, recompile the DMS or DSS component by referring to the compilation procedure. -- In a non-CM environment, node 0 is restricted to be the primary node. Therefore, ensure that node 0 is successfully created in the initdb phase. -- If an error is reported during the installation, the system may display a message indicating that the directory is not empty when you perform the initdb operation again. In this case, you need to clear the residual files in the file system and DSS. You can run the **rm** command to delete the node folder from the file system. In DSS, you can write 0 to the header of the simulated block device file (DSS records the metadata information in the header). After the clearing is complete, start from step 7. The clearing command is as follows: - -```shell -rm -rf /home/test/data/node1 /home/test/data/node2 -dd if=/dev/zero of=/home/test/dss/dev/dss-dba bs=2M count=10 conv=notrunc >/dev/null 2>&1 -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/two-city-three-dc-dr.md b/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/two-city-three-dc-dr.md deleted file mode 100644 index 6559f1ec..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/database-deployment-scenario/two-city-three-dc-dr.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -title: Two-City Three-DC DR -summary: Two-City Three-DC DR -author: GUO HUAN -date: 2022-10-13 ---- - -# Two-City Three-DC DR - -To implement cross-region DR, you need to deploy two database instances, one primary database instance and one DR database instance. The primary database instance and DR database instance are usually deployed in two cities far away from each other. Full and incremental data synchronization is implemented between database instances by using storage media or without using storage media. A regional fault occurs in the primary database instance (production database instance) and data cannot be restored. You can enable the DR database instance to be promoted to primary to take over services. - -MogDB provides the streaming replication-based remote DR solution. - -## Streaming Replication-based Remote DR Solution - -### Overview - -In MogDB 3.1.0 and later versions, the two-city three-DC DR solution is provided. - -### Specifications and Restrictions - -This section describes the feature specifications and restrictions of this solution. Management personnel must pay special attention to this section. - -#### Feature Specifications - -- The network latency within the primary or DR database instance must be less than or equal to 10 ms, and the network latency between the primary and standby database instances must be less than or equal to 100 ms. The DR can run normally within the range of the required network latency. Otherwise, the primary and standby database instances will be disconnected. - -- The following table lists the log generation speeds in the primary database instance supported by different hardware specifications when the network bandwidth is not a bottleneck and the parallel playback function is enabled in the DR database instance. The RPO and RTO can be ensured only under the log generation speed. - - **Table 1** Log generation speed in typical configurations - - | Typical Configuration | Log Generation Speed of the Primary Database Instance | - | :-------------------- | :---------------------------------------------------- | - | 96U/768G/SATA SSD | <=10MB/s | - | 128U/2T/NVMe SSD | <=40MB/s | - -- If hybrid disk deployment is used, use the specifications of the low configuration. For example, if the database instance contains both NVMe and SATA disks, the specifications for SATA disks is used. - -- A certain amount of data can be lost when the DR database instance is promoted to primary, and the RPO is less than or equal to 10 seconds. When the DR database instance is normal, the RTO for promoting the DR database instance to primary is less than or equal to 10 minutes. When the DR database instance is degraded, the RTO for promoting the DR database instance to primary is within 20 minutes. - -- Practice: Planned primary/standby database instance switchover, no data loss, RPO = 0, RTO ≤ 20 minutes (including the processes of demoting the primary database instance to the DR instance and promoting the DR database instance to the primary database instance) - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** Tests show that the maximum write rate of SATA SSDs is about 240 MB/s, that of SAS SSDs is over 500 MB/s, and that of NVMe SSDs is even better. If the hardware conditions do not meet the preceding specifications, the single-shard log generation speed in the primary database instance must be reduced to ensure the RPO and RTO. -> -> Resources such as file handles and memory are used up in the primary and standby database instances. As a result, the RPO and RTO cannot be ensured. - -#### Feature Constraints - -- Before the DR relationship is established, you need to create a DR user with the streaming replication permission on the primary cluster for DR authentication. The primary and standby clusters must use the same DR username and password. After a DR relationship is established, the user password cannot be changed. You can remove the DR relationship, modify the username and password, and establish the DR relationship again. The DR user password cannot contain blank characters and the following characters: |;&$<>`'"{}()[]~*?!\\n -- The versions of the primary and standby clusters for which a DR relationship is to be established must be the same. -- The first standby and cascaded standby nodes cannot exist before streaming DR is established. -- When the DR relationship is established, if the number of cluster copies is less than or equal to 2, **most_available_sync** is set to **on**. After the DR relationship is removed or a failover occurs, **most_available_sync** is not restored to the initial value, ensuring that the cluster is in maximum availability mode. -- When the DR relationship is established, **synchronous_commit** is set to **on**. After the DR relationship is removed or a failover occurs, **synchronous_commit** is restored to the initial value. -- The DR cluster can be read but cannot be written. -- After the DR cluster is promoted to primary by running the failover command, the DR relationship between the DR cluster and the original primary cluster becomes invalid. You need to re-establish the DR relationship. -- The DR relationship can be set up only when the primary and DR database instances are normal. Only when the primary database instance is normal and the DR database instance has been promoted to primary, the DR relationship can be canceled for the primary database instance. When both the primary and DR database instances are normal, you can execute a planned switchover between the primary database instance and the DR database instance. If the DR database instance is neither normal nor degraded, it cannot be promoted to primary and cannot provide DR services. In this case, you need to manually repair or rebuild the DR database instance. -- If the majority DNs of the DR cluster are faulty or all CMS and DNs are faulty, the DR relationship cannot be established, the DR cluster cannot be promoted to primary, and cannot be used. In this case, you need to rebuild the DR cluster. -- If a forcible switchover is performed on the primary cluster, you need to rebuild the DR cluster. -- Both the primary and DR clusters support full backup and incremental backup using gs_probackup. In the DR state, neither the primary cluster nor the DR cluster can be restored. If the primary database instance needs to be restored, remove the DR relationship first. After the backup and restoration are complete, re-establish the DR relationship. -- After the DR relationship is established, the DN port cannot be changed. -- GUC parameters cannot be synchronized between the primary database instance and the DR database instance in a DR relationship. -- The primary and standby clusters do not support node replacement and repair, copy addition and reduction, or DCF mode. -- If the DR database instance has two copies and one copy is damaged, the DR database instance can still be promoted to primary to provide services. If the remaining copies are also damaged, data loss is inevitable. -- In the DR state, only gray upgrade is supported and the original upgrade constraints are inherited. In the DR state, the upgrade must comply with the following sequence: upgrade the primary cluster, upgrade the standby cluster, submit the standby cluster, and then submit the primary cluster. -- When selecting the IP address for streaming replication-based remote DR, you are advised to separate the network plane in the cluster from the cross-cluster network plane to balance pressure and improve security. - -### GUC Parameter Settings That Affect DR Performance - -#### Impact of Checkpoint-related Parameter Settings - -- The DR performance metric described in “Feature Specifications” is measured when the parameters related to checkpoints are set to default values. -- For details about checkpoint parameters, see “GUC Parameters > Write Ahead Log > Checkpoints” in *Developer Guide*. When **enable_incremental_checkpoint** is set to **on**, the maximum interval between automatic WAL checkpoints is determined by the value of **incremental_checkpoint_timeout**. If the default value is not used and you set it to a larger value, a large number of logs need to be replayed when the instance is restarted. As a result, the specified RTO cannot be ensured. - -#### Impact of Ultimate RTO-related Parameter Settings - -For details about the parameters related to ultimate RTO, see the description of the **recovery_parse_workers** and **recovery_redo_workers** parameters in “GUC Parameters > Write Ahead Log > Log Replay” in *Developer Guide*. To enable ultimate RTO, ensure that the number of logical CPUs on each host is greater than the number of extra threads started after ultimate RTO is enabled. Otherwise, threads may contend for CPU resources. As a result, some operations in the DR process take a long time and cannot meet the specified DR performance metric. The formula of calculating the number of extra threads started after ultimate RTO is enabled is as follows: (**recovery_parse_workers** x (**recovery_redo_workers** + 2) + 5) x Number of DN instances on each host. - -### Basic Operations - -#### Establishing a DR Relationship - -##### Evaluating the Service Load of the Primary Database Instance Before Establishing the DR Relationship - -**Data Volume** - -- The amount of data stored in the primary database instance directly affects the amount of data to be transmitted during the establishment of the DR relationship. In addition, the remote network bandwidth also affects the duration of the DR relationship establishment. You can set the timeout interval by changing the value of **time-out** of the DR relationship establishment interface as required. The default value is 20 minutes. The timeout interval is determined by the data volume of the primary database instance before the DR relationship is established and the available remote network bandwidth. The formula is as follows: Data volume/Transmission rate = Time required. - - For example, if the primary database instance has 100 TB data and the available bandwidth between remote database instances is 512 Mbps (transmission rate: 64 MB/s), it takes 1638400s (100 x 1024 x 1024/64, about 19 days) to transmit the data during establishment of the DR relationship. - -**Log Generation Rate** - -- The log generation rate affects the amount of logs that need to be retained in the primary database instance during establishment of the DR relationship. After full data restoration is complete, the DR database instance establishes a streaming replication relationship with the primary database instance. If the primary database instance does not retain the logs, the streaming replication relationship may fail to be established. - - For example, if the establishment process lasts for two days, logs generated within the two days must be retained in the local disk of the primary database instance before the DR relationship is established. - -- If the log generation rate of the primary database instance is greater than the remote transmission bandwidth, or if the bandwidth is sufficient but the log generation rate of the primary database instance is greater than the log replay rate of the DR database instance, the RPO/RTO as specified in “Feature Specifications” cannot be ensured. - -##### Interfaces for Establishing the DR Relationship - -During the establishment of the DR relationship, you must send setup requests to the primary and standby database instances. For details, see the gs_sdr tool in *Tool Reference*. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - During establishment of the DR relationship, the DR username and password need to be delivered to the primary and DR database instances for inter-database instance authentication. The user permission is **Replication**, which is specific for replication. -> - Before setting up a DR relationship, you must create a DR user in the primary cluster. -> - After the DR relationship is established, the user password cannot be modified. It is used throughout the DR lifecycle. You can remove the DR relationship, modify the username and password, and establish the DR relationship again. -> - You can set the timeout interval by changing the value of **time_out** as required. The default value is 20 minutes. The timeout interval is determined by the data volume of the primary database instance before the DR relationship is established and the available remote network bandwidth. The formula is as follows: Data volume/Transmission rate = Time required. For example, if the primary database instance has 100 TB data and the available bandwidth between remote database instances is 512 Mbps (transmission rate: 64 MB/s), it takes 1638400s (100 x 1024 x 1024/64, about 19 days) to transmit the data during establishment of the DR relationship. - -#### DR Database Instance Failover - -Send a request to the DR database instance to promote the DR database instance to primary. For details, see the gs_sdr tool in *Tool Reference*. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - After the DR database instance is promoted to primary, the DR information is cleared. -> - If the primary database instance is normal and is processing services, you can run this command to remove the DR relationship from the DR database instance. After this command is executed, the DR database instance does not receive logs from the primary database instance anymore. As a result, the RPO value keeps increasing until the primary and standby database instances are disconnected. Then, the RPO value is null. For details about how to query the RPO value, see “Querying the DR Status of the Primary and Standby Database Instances.” - -#### Removing DR Information from the Primary Database Instance - -Send a request for clearing DR information to the primary database instance. For details, see the gs_sdr tool in *Tool Reference*. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - This operation will remove the DR information from the primary database instance. -> - This operation can be performed on the primary database instance only after the DR database instance is promoted to primary. If you perform this operation on the DR database instance before it is promoted to primary, the DR relationship will be damaged. - -#### Planned Switchover - -Send a planned switchover request to the primary and standby database instances. For details, see the gs_sdr tool in *Tool Reference*. - -#### Querying the DR Status of the Primary and Standby Database Instances - -Send a DR status query request to the primary and standby database instances. For details, see the gs_sdr tool in *Tool Reference*. - -#### Upgrading the Primary and Standby Database Instances in a DR Relationship - -##### Major Version Upgrade - -1. Upgrade the primary database instance first. After the upgrade of the primary database instance is complete, upgrade the DR database instance. -2. After the upgrade of the DR database instance is complete, commit the DR database instance first and then the primary database instance. - -##### Minor Version Upgrade - -1. Upgrade the primary and standby database instances at the same time. -2. After the upgrade is complete, commit the DR database instance first and then the primary database instance. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - Before committing the standby database instance, ensure that the upgrade of the primary database instance is complete. -> - Commit the standby database instance first and then the primary database instance. -> - After the standby database instance is committed, the primary database instance cannot be rolled back. -> - During the upgrade, do not perform a switchover between the primary and standby database instances. - -### Troubleshooting - -This section lists the frequently asked questions (FAQs) about the streaming replication-based remote DR solution and provides troubleshooting procedures. - -The following tables list the symptoms, causes, and solutions of different operations. - -#### Exception in Establishing the DR Relationship - -**Table** Errors that may occur during establishment of the DR relationship - -| **Symptom** | **Cause and Solution** | -| :----------------------------------------------------------- | :----------------------------------------------------------- | -| The following error is returned when the primary database instance establishes the DR relationship:Result exception error : Failed to do check main standby connection. Because Waiting timeout: XXs. | **Cause:** When the data volume of the primary database instance is large or the remote network bandwidth is small, the primary database instance may exit the DR relationship establishment process due to timeout before data replication is complete in the DR database instance.
**Solution:**
- If the DR database instance is establishing or has established the DR relationship, you can reenter the DR relationship establishment process of the primary database instance. The primary database instance will enter the state of waiting for DR connection again. Before reentering the process, you can estimate the timeout interval again and set the timeout interval parameter based on the data volume of the primary database instance and the remote network bandwidth.
- If the DR database instance also fails to establish the DR relationship, rectify the fault in the DR database instance and then establish the DR relationship again. | -| During the DR relationship establishment, the DR relationship fails to be established because the primary DN in the primary cluster is switched over. | **Cause:** The primary DN of the primary cluster is switched over. The DR cluster is disconnected from the primary cluster for data build. As a result, the DR relationship fails to be established.
**Solution:**
- Check whether the primary/standby switchover in the primary cluster is manually performed. If yes, stop the switchover. If no, ignore it. Issue the establishment command again. | - -#### Exception in Promoting the DR Database Instance to Primary (Failover) - -**Table** Errors that may occur when the DR database instance is promoted to primary (failover) - -| **Symptom** | **Cause and Solution** | -| :----------------------------------------------------------- | :----------------------------------------------------------- | -| A faulty node in the DR database instance is not promoted to primary. | **Cause:** A node is disconnected from the DR database instance due to server breakdown or network interruption and fails to promote to primary.
**Solution:**
1. After the faulty node is recovered, add the node to the database instance again.
2. Modify the parameters related to the database instance DR mode in the CMS and CMA to switch back to the primary database instance configuration:
`gs_guc set -Z cmserver -N all -I all -c "backup_open = 0"`
`gs_guc set -Z cmagent -N all -I all -c "agent_backup_open=0"`
`gs_guc set -Z cmagent -N all -I all -c "disaster_recovery_type= 0"`
3. Connect to the faulty node, query the CMS and CMA process IDs, and run the **kill -9** command to kill the processes. The processes will be restarted by the OM Monitor to make the setting of CMS and CMA parameters take effect.
4. Run **cm_ctl start-n NODEID -D DATADIR** to manually restore the node. | - -#### Planned Switchover Exception - -**Table** Errors that may occur during planned switchover - -| **Symptom** | **Cause and Solution** | -| :----------------------------------------------------------- | :----------------------------------------------------------- | -| The following error is returned during a planned switchover in the primary database instance:Result exception error : Failed to generate switchover barrier before switchover | **Cause:** When the primary database instance receives a planned switchover command, a switchover barrier is generated before the primary database instance is demoted to standby. This barrier ensures that the replication of logs of all CNs and DN shards in the primary and standby database instances stops at a specified checkpoint. If a switchover barrier fails to be generated in the primary database instance due to reasons such as network jitter in the primary database instance, the planned switchover will be canceled.
**Solution:**
- If a planned switchover command times out in the DR database instance, the planned switchover can be reentered between the primary and DR database instances.
- If logs fail to be truncated after multiple switchover operations are performed, you need to further analyze the log files related to streaming DR. | -| Result exception error : Failed to do check switchover_barrier on all main standby dn and cn. Because check timeout: XXs | **Cause:** After the DR database instance receives a planned switchover command, it checks whether a switchover barrier has been received on all CNs and the first standby DN before being promoted to primary. This ensures that the replication of logs of all CNs and DN shards in the primary and standby database instances stops at a specified checkpoint. If the DR database instance fails to obtain the switchover barrier within the specified timeout interval due to reasons such as abnormal remote network, the planned switchover will be canceled.
**Solution:**
- If a planned switchover command times out in the primary database instance, the planned switchover can be reentered between the primary and DR database instances.
- If the DR database instance fails to obtain the switchover barrier after multiple switchover operations are performed, you need to further analyze the log files related to streaming DR. | - -#### Database Instance Fault in the DR Cluster - -**Table** Error information about database instances in the DR cluster - -| **Symptom** | **Cause and Solution** | -| :----------------------------------------------------------- | :----------------------------------------------------------- | -| A CM Agent fault occurs in the DR cluster. The DN status on the node is **Unknown**. The status of some first standby DNs is **Main Standby Need repair(Connecting)**. | **Cause:** The CM Agent on the node is faulty.
- The DN status on the node cannot be reported to the CM Server, and the DN instance status is displayed as **Unknown**.
- If the first standby instance (Main Standby) exists on the node, the first standby switchover is triggered. The original first standby instance is normal and has a normal streaming replication relationship with the primary DN in the primary database instance. However, the primary DN in the primary database instance allows only one first standby connection. As a result, the new first standby instance cannot be connected to the primary DN in the primary database instance, and the instance status is displayed as **Main Standby Need repair(Connecting)**.
**Solution:**
- Check the CM Agent alarm ALM_AI_AbnormalCMSProcess in the DR cluster and try to recover the faulty CM Agent. After the fault is rectified, the connection to the new first standby instance is restored.
- If the faulty CM Agent cannot be recovered within a short period of time, run the **gs_ctl stop -D DATADIR or kill** command to manually stop the DN process on the node and recover the node. | diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/1-using-gs_dump-and-gs_dumpall-to-export-data-overview.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/1-using-gs_dump-and-gs_dumpall-to-export-data-overview.md deleted file mode 100644 index e963ac79..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/1-using-gs_dump-and-gs_dumpall-to-export-data-overview.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Using gs_dump and gs_dumpall to Export Data Overview -summary: Using gs_dump and gs_dumpall to Export Data Overview -author: Guo Huan -date: 2021-03-04 ---- - -# Using gs_dump and gs_dumpall to Export Data Overview - -MogDB provides **gs_dump** and **gs_dumpall** to export required database objects and related information. You can use a tool to import the exported data to a target database for database migration. **gs_dump** exports a single database or its objects. **gs_dumpall** exports all databases or global objects in MogDB. For details, see [Table 1](#Scenarios). - -**Table 1** Scenarios - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Application ScenarioExport GranularityExport FormatImport Method
Exporting a single databaseDatabase-level export
- - Export full information of a cluster.
You can use the exported information to create a database containing the same data as the current one.
- - Export all object definitions of a database, including the definitions of the database, functions, schemas, tables, indexes, and stored procedures. -
You can use the exported object definitions to quickly create a database that is the same as the current one, except that the new database does not have data.
- - Export data of a cluster.
- - Plaintext
- - Custom
- - Directory
- - .tar -
- - For details about how to import data files in text format, see Using a gsql Meta-Command to Import Data.
- - For details about how to import data files in .tar, directory, or custom format, see Using gs_restore to Import Data.
Schema-level export
- - Export full information of a schema.
- - Export data of a schema.
- - Export all object definitions of a schema, including the definitions of tables, stored procedures, and indexes. -
Table-level export
- - Export full information of a table.
- - Export data of a table.
- - Export the definition of a table. -
Exporting all databasesDatabase-level export
- - Export full information of a cluster.
You can use the exported information to create a host environment containing the same databases, global objects, and data as the current one.
- - Export all object definitions of a database, including the definitions of tablespaces, databases, functions, schemas, tables, indexes, and stored procedures.
- You can use the exported object definitions to quickly create a host environment that is the same as the current one, containing the same databases and tablespaces but no data.
- - Export data only.
PlaintextFor details about how to import data files, see Using a gsql Meta-Command to Import Data.
Global object export
-- Export tablespaces.
-- Export roles.
-- Export tablespaces and roles.
- -**gs_dump** and **gs_dumpall** use **-U** to specify the user that performs the export. If the specified user does not have the required permissions, data cannot be exported. In this case, you can set **-role** in the **gs_dump** or **gs_dumpall** command to the role that has the permissions. Then, **gs_dump** or **gs_dumpall** uses the specified role to export data. See Table 1 for application scenarios and [Data Export By a User Without Required Permissions](4-data-export-by-a-user-without-required-permissions.md) for operation details. - -**gs_dump** and **gs_dumpall** encrypt the exported data files. These files are decrypted before being imported. In this way, data disclosure is prevented, protecting database security. - -When **gs_dump** or **gs_dumpall** is used to export data from a cluster, other users can still access (read and write) databases in MogDB. - -**gs_dump** and **gs_dumpall** can export complete, consistent data. For example, if **gs_dump** is executed to export database A or **gs_dumpall** is executed to export all databases from MogDB at T1, data of database A or all databases in MogDB at that time point will be exported, and modifications on the databases after that time point will not be exported. - -**Precautions** - -- Do not modify an exported file or its content. Otherwise, restoration may fail. - -- If there are more than 500,000 objects (data tables, views, and indexes) in a database, you are advised to use **gs_guc** to set the following parameters for database nodes. This operation is not required if the parameter values are greater than the recommended ones. - - ```bash - gs_guc set -N all -I all -c 'max_prepared_transactions = 1000' - gs_guc set -N all -I all -c 'max_locks_per_transaction = 512' - ``` - -- For data consistency and integrity, **gs_dump** and **gs_dumpall** set a share lock for a table to dump. If a share lock has been set for the table in other transactions, **gs_dump** and **gs_dumpall** lock the table after it is released. If the table cannot be locked within the specified time, the dump fails. You can customize the timeout duration to wait for lock release by specifying the **-lock-wait-timeout** parameter. - -- During an export, **gs_dumpall** reads all tables in a database. Therefore, you need to connect to the database as a MogDB administrator to export a complete file. When you use **gsql** to execute SQL scripts, cluster administrator permissions are also required to add users and user groups, and create databases. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/2-exporting-a-single-database.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/2-exporting-a-single-database.md deleted file mode 100644 index 67f73234..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/2-exporting-a-single-database.md +++ /dev/null @@ -1,288 +0,0 @@ ---- -title: Exporting a Single Database -summary: Exporting a Single Database -author: Guo Huan -date: 2021-03-04 ---- - -# Exporting a Single Database - -## Exporting a Database - -You can use **gs_dump** to export data and all object definitions of a database from MogDB. You can specify the information to export as follows: - -- Export full information of a database, including its data and all object definitions. - - You can use the exported information to create a database containing the same data as the current one. - -- Export all object definitions of a database, including the definitions of the database, functions, schemas, tables, indexes, and stored procedures. - - You can use the exported object definitions to quickly create a database that is the same as the current one, except that the new database does not have data. - -- Export data of a database. - -### Procedure - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Use **gs_dump** to export data of the **userdatabase** database. - - ```bash - gs_dump -U jack -f /home/omm/backup/userdatabase_backup.tar -p 8000 postgres -F t - Password: - ``` - - **Table 1** Common parameters - - | Parameter | Description | Example Value | - | :-------- | :----------------------------------------------------------- | :------------------------------------------ | - | -U | Username for database connection.
NOTE:
If the username is not specified, the initial system administrator created during installation is used for connection by default. | -U jack | - | -W | User password for database connection.
- This parameter is not required for database administrators if the trust policy is used for authentication.
- If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password. | -W abcd@123 | - | -f | Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output. | -f /home/omm/backup/**postgres**_backup.tar | - | -p | TCP port or local Unix-domain socket file extension on which the server is listening for connections. | -p 8000 | - | dbname | Name of the database to export. | postgres | - | -F | Select the format of file to export. The values of **-F** are as follows:
- **p**: plaintext
- **c**: custom
- **d**: directory
- **t**: .tar | -F t | - - For details about other parameters, see "Tool Reference > Server Tools > [gs_dump](../../../reference-guide/tool-reference/server-tools/gs_dump.md)" in the **Reference Guide**. - -### Examples - -Example 1: Run **gs_dump** to export full information of the **postgres** database. The exported files are in .sql format. - -```bash -gs_dump -f /home/omm/backup/postgres_backup.sql -p 8000 postgres -F p -Password: -gs_dump[port='8000'][postgres][2017-07-21 15:36:13]: dump database postgres successfully -gs_dump[port='8000'][postgres][2017-07-21 15:36:13]: total time: 3793 ms -``` - -Example 2: Run **gs_dump** to export data of the **postgres** database, excluding object definitions. The exported files are in a custom format. - -```bash -gs_dump -f /home/omm/backup/postgres_data_backup.dmp -p 8000 postgres -a -F c -Password: -gs_dump[port='8000'][postgres][2017-07-21 15:36:13]: dump database postgres successfully -gs_dump[port='8000'][postgres][2017-07-21 15:36:13]: total time: 3793 ms -``` - -Example 3: Run **gs_dump** to export object definitions of the **postgres** database. The exported files are in .sql format. - -```bash -gs_dump -f /home/omm/backup/postgres_def_backup.sql -p 8000 postgres -s -F p -Password: -gs_dump[port='8000'][postgres][2017-07-20 15:04:14]: dump database postgres successfully -gs_dump[port='8000'][postgres][2017-07-20 15:04:14]: total time: 472 ms -``` - -Example 4: Run **gs_dump** to export object definitions of the **postgres** database. The exported files are in text format and are encrypted. - -```bash -gs_dump -f /home/omm/backup/postgres_def_backup.sql -p 8000 postgres --with-encryption AES128 --with-key 1234567812345678 -s -F p -Password: -gs_dump[port='8000'][postgres][2018-11-14 11:25:18]: dump database postgres successfully -gs_dump[port='8000'][postgres][2018-11-14 11:25:18]: total time: 1161 ms -``` - -## Exporting a Schema - -You can use **gs_dump** to export data and all object definitions of a schema from MogDB. You can export one or more specified schemas as needed. You can specify the information to export as follows: - -- Export full information of a schema, including its data and object definitions. -- Export data of a schema, excluding its object definitions. -- Export the object definitions of a schema, including the definitions of tables, stored procedures, and indexes. - -### Procedure - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run **gs_dump** to export the **hr** and **public** schemas. - - ```bash - gs_dump -W Bigdata@123 -U jack -f /home/omm/backup/MPPDB_schema_backup -p 8000 human_resource -n hr -n public -F d - ``` - - **Table 1** Common parameters - - | Parameter | Description | Example Value | - | :-------- | :----------------------------------------------------------- | :----------------------------------------------------------- | - | -U | Username for database connection. | -U jack | - | -W | User password for database connection.
- This parameter is not required for database administrators if the trust policy is used for authentication.
- If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password. | -W Bigdata@123 | - | -f | Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output. | -f /home/omm/backup/MPPDB*_*schema_backup | - | -p | TCP port or local Unix-domain socket file extension on which the server is listening for connections. | -p 8000 | - | dbname | Name of the database to export. | human_resource | - | -n | Names of schemas to export. Data of the specified schemas will also be exported.
- Single schema: Enter **-n** **schemaname**.
- Multiple schemas: Enter **-n** **schemaname** for each schema. | - Single schemas:**-n hr**
- Multiple schemas:**-n hr -n public** | - | -F | Select the format of file to export. The values of **-F** are as follows:
- **p**: plaintext
- **c**: custom
- **d**: directory
- **t**: .tar | -F d | - - For details about other parameters, see "Tool Reference > Server Tools > [gs_dump](../../../reference-guide/tool-reference/server-tools/gs_dump.md)" in the **Reference Guide**. - -### Examples - -Example 1: Run **gs_dump** to export full information of the **hr** schema. The exported files stored in text format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_schema_backup.sql -p 8000 human_resource -n hr -F p -gs_dump[port='8000'][human_resource][2017-07-21 16:05:55]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2017-07-21 16:05:55]: total time: 2425 ms -``` - -Example 2: Run **gs_dump** to export data of the **hr** schema. The exported files are in .tar format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_schema_data_backup.tar -p 8000 human_resource -n hr -a -F t -gs_dump[port='8000'][human_resource][2018-11-14 15:07:16]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2018-11-14 15:07:16]: total time: 1865 ms -``` - -Example 3: Run **gs_dump** to export the object definitions of the **hr** schema. The exported files are stored in a directory. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_schema_def_backup -p 8000 human_resource -n hr -s -F d -gs_dump[port='8000'][human_resource][2018-11-14 15:11:34]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2018-11-14 15:11:34]: total time: 1652 ms -``` - -Example 4: Run **gs_dump** to export the **human_resource** database excluding the **hr** schema. The exported files are in a custom format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_schema_backup.dmp -p 8000 human_resource -N hr -F c -gs_dump[port='8000'][human_resource][2017-07-21 16:06:31]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2017-07-21 16:06:31]: total time: 2522 ms -``` - -Example 5: Run **gs_dump** to export the object definitions of the **hr** and **public** schemas. The exported files are in .tar format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_schema_backup1.tar -p 8000 human_resource -n hr -n public -s -F t -gs_dump[port='8000'][human_resource][2017-07-21 16:07:16]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2017-07-21 16:07:16]: total time: 2132 ms -``` - -Example 6: Run **gs_dump** to export the **human_resource** database excluding the **hr** and **public** schemas. The exported files are in a custom format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_schema_backup2.dmp -p 8000 human_resource -N hr -N public -F c -gs_dump[port='8000'][human_resource][2017-07-21 16:07:55]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2017-07-21 16:07:55]: total time: 2296 ms -``` - -Example 7: Run **gs_dump** to export all tables (views, sequences, and foreign tables are also included) in the **public** schema and the **staffs** table in the **hr** schema, including data and table definition. The exported files are in a custom format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_backup3.dmp -p 8000 human_resource -t public.* -t hr.staffs -F c -gs_dump[port='8000'][human_resource][2018-12-13 09:40:24]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2018-12-13 09:40:24]: total time: 896 ms -``` - -## Exporting a Table - -You can use **gs_dump** to export data and definition of a table-level object from MogDB. Views, sequences, and foreign tables are special tables. You can export one or more specified tables as needed. You can specify the information to export as follows: - -- Export full information of a table, including its data and definition. -- Export data of a table. -- Export the definition of a table. - -### Procedure - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run **gs_dump** to export the **hr.staffs** and **hr.employments** tables. - - ```bash - gs_dump -W Bigdata@123 -U jack -f /home/omm/backup/MPPDB_table_backup -p 8000 human_resource -t hr.staffs -t hr.employments -F d - ``` - - **Table 1** Common parameters - - | Parameter | Description | Example Value | - | :-------- | :----------------------------------------------------------- | :----------------------------------------------------------- | - | -U | Username for database connection. | -U jack | - | -W | User password for database connection.
- This parameter is not required for database administrators if the trust policy is used for authentication.
- If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password. | -W Bigdata@123 | - | -f | Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output. | -f /home/omm/backup/MPPDB_table_backup | - | -p | TCP port or local Unix-domain socket file extension on which the server is listening for connections. | -p 8000 | - | dbname | Name of the database to export. | human_resource | - | -t | Table (or view, sequence, foreign table) to export. You can specify multiple tables by listing them or using wildcard characters. When you use wildcard characters, quote wildcard patterns with single quotation marks (") to prevent the shell from expanding the wildcard characters.
- Single table: Enter **-t** **schema.table**.
- Multiple tables: Enter **-t** **schema.table** for each table. | - Single table: **-t hr.staffs**
- **Multiple tables:**-t hr.staffs -t hr.employments** | - | -F | Select the format of file to export. The values of **-F** are as follows:
- **p**: plaintext
- **c**: custom
- **d**: directory
- **t**: .tar | -F d | - - For details about other parameters, see "Tool Reference > Server Tools > [gs_dump](../../../reference-guide/tool-reference/server-tools/gs_dump.md)" in the **Reference Guide**. - -### Examples - -Example 1: Run **gs_dump** to export full information of the **hr.staffs** table. The exported files are in text format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_table_backup.sql -p 8000 human_resource -t hr.staffs -F p -gs_dump[port='8000'][human_resource][2017-07-21 17:05:10]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2017-07-21 17:05:10]: total time: 3116 ms -``` - -Example 2: Run **gs_dump** to export data of the **hr.staffs** table. The exported files are in .tar format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_table_data_backup.tar -p 8000 human_resource -t hr.staffs -a -F t -gs_dump[port='8000'][human_resource][2017-07-21 17:04:26]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2017-07-21 17:04:26]: total time: 2570 ms -``` - -Example 3: Run **gs_dump** to export the definition of the **hr.staffs** table. The exported files are stored in a directory. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_table_def_backup -p 8000 human_resource -t hr.staffs -s -F d -gs_dump[port='8000'][human_resource][2017-07-21 17:03:09]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2017-07-21 17:03:09]: total time: 2297 ms -``` - -Example 4: Run **gs_dump** to export the **human_resource** database excluding the **hr.staffs** table. The exported files are in a custom format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_table_backup4.dmp -p 8000 human_resource -T hr.staffs -F c -gs_dump[port='8000'][human_resource][2017-07-21 17:14:11]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2017-07-21 17:14:11]: total time: 2450 ms -``` - -Example 5: Run **gs_dump** to export the **hr.staffs** and **hr.employments** tables. The exported files are in text format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_table_backup1.sql -p 8000 human_resource -t hr.staffs -t hr.employments -F p -gs_dump[port='8000'][human_resource][2017-07-21 17:19:42]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2017-07-21 17:19:42]: total time: 2414 ms -``` - -Example 6: Run **gs_dump** to export the **human_resource** database excluding the **hr.staffs** and **hr.employments** tables. The exported files are in text format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_table_backup2.sql -p 8000 human_resource -T hr.staffs -T hr.employments -F p -gs_dump[port='8000'][human_resource][2017-07-21 17:21:02]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2017-07-21 17:21:02]: total time: 3165 ms -``` - -Example 7: Run **gs_dump** to export data and definition of the **hr.staffs** table, and the definition of the **hr.employments** table. The exported files are in .tar format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_table_backup3.tar -p 8000 human_resource -t hr.staffs -t hr.employments --exclude-table-data hr.employments -F t -gs_dump[port='8000'][human_resource][2018-11-14 11:32:02]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2018-11-14 11:32:02]: total time: 1645 ms -``` - -Example 8: Run **gs_dump** to export data and definition of the **hr.staffs** table, encrypt the exported files, and store them in text format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_table_backup4.sql -p 8000 human_resource -t hr.staffs --with-encryption AES128 --with-key 1212121212121212 -F p -gs_dump[port='8000'][human_resource][2018-11-14 11:35:30]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2018-11-14 11:35:30]: total time: 6708 ms -``` - -Example 9: Run **gs_dump** to export all tables (views, sequences, and foreign tables are also included) in the **public** schema and the **staffs** table in the **hr** schema, including data and table definition. The exported files are in a custom format. - -```bash -gs_dump -W Bigdata@123 -f /home/omm/backup/MPPDB_table_backup5.dmp -p 8000 human_resource -t public.* -t hr.staffs -F c -gs_dump[port='8000'][human_resource][2018-12-13 09:40:24]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2018-12-13 09:40:24]: total time: 896 ms -``` - -Example 10: Run **gs_dump** to export the definition of the view referencing to the **test1** table in the **t1** schema. The exported files are in a custom format. - -```bash -gs_dump -W Bigdata@123 -U jack -f /home/omm/backup/MPPDB_view_backup6 -p 8000 human_resource -t t1.test1 --include-depend-objs --exclude-self -F d -gs_dump[port='8000'][jack][2018-11-14 17:21:18]: dump database human_resource successfully -gs_dump[port='8000'][jack][2018-11-14 17:21:23]: total time: 4239 ms -``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/3-exporting-all-databases.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/3-exporting-all-databases.md deleted file mode 100644 index e5c2d574..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/3-exporting-all-databases.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: Exporting All Databases -summary: Exporting All Databases -author: Guo Huan -date: 2021-03-04 ---- - -# Exporting All Databases - -## Exporting All Databases - -You can use **gs_dumpall** to export full information of all databases in MogDB, including information about each database and global objects in MogDB. You can specify the information to export as follows: - -- Export full information of all databases, including information about each database and global objects (such as roles and tablespaces) in MogDB. - - You can use the exported information to create a host environment containing the same databases, global objects, and data as the current one. - -- Export data of all databases, excluding all object definitions and global objects. - -- Export all object definitions of all databases, including the definitions of tablespaces, databases, functions, schemas, tables, indexes, and stored procedures. - - You can use the exported object definitions to quickly create a host environment that is the same as the current one, containing the same databases and tablespaces but no data. - -### Procedure - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run **gs_dumpall** to export full information of all databases. - - ```bash - gs_dumpall -W Bigdata@123 -U omm -f /home/omm/backup/MPPDB_backup.sql -p 8000 - ``` - - **Table 1** Common parameters - - | Parameter | Description | Example Value | - | :-------- | :----------------------------------------------------------- | :----------------------------------- | - | -U | Username for database connection. The user must be an MogDB administrator. | -U omm | - | -W | User password for database connection.
- This parameter is not required for database administrators if the trust policy is used for authentication.
- If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password. | -W Bigdata@123 | - | -f | Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output. | -f /home/omm/backup/MPPDB_backup.sql | - | -p | TCP port or local Unix-domain socket file extension on which the server is listening for connections. | -p 8000 | - - For details about other parameters, see "Tool Reference > Server Tools > [gs_dumpall](../../../reference-guide/tool-reference/server-tools/gs_dumpall.md)" in the **Reference Guide**. - -### Examples - -Example 1: Run **gs_dumpall** as the cluster administrator **omm** to export full information of all databases in a cluster. After the command is executed, a large amount of output information will be displayed. **total time** will be displayed at the end of the information, indicating that the backup is successful. In this example, only relative output information is included. - -```bash -gs_dumpall -W Bigdata@123 -U omm -f /home/omm/backup/MPPDB_backup.sql -p 8000 -gs_dumpall[port='8000'][2017-07-21 15:57:31]: dumpall operation successful -gs_dumpall[port='8000'][2017-07-21 15:57:31]: total time: 9627 ms -``` - -Example 2: Run **gs_dumpall** as the cluster administrator **omm** to export object definitions of all databases in a cluster. The exported files are in text format. After the command is executed, a large amount of output information will be displayed. **total time** will be displayed at the end of the information, indicating that the backup is successful. In this example, only relative output information is included. - -```bash -gs_dumpall -W Bigdata@123 -U omm -f /home/omm/backup/MPPDB_backup.sql -p 8000 -s -gs_dumpall[port='8000'][2018-11-14 11:28:14]: dumpall operation successful -gs_dumpall[port='8000'][2018-11-14 11:28:14]: total time: 4147 ms -``` - -Example 3: Run **gs_dumpall** to export data of all databases in a cluster, encrypt the exported files, and store them in text format. After the command is executed, a large amount of output information will be displayed. **total time** will be displayed at the end of the information, indicating that the backup is successful. In this example, only relative output information is included. - -```bash -gs_dumpall -f /home/omm/backup/MPPDB_backup.sql -p 8000 -a --with-encryption AES128 --with-key 1234567812345678 -gs_dumpall[port='8000'][2018-11-14 11:32:26]: dumpall operation successful -gs_dumpall[port='8000'][2018-11-14 11:23:26]: total time: 4147 ms -``` - -## Exporting Global Objects - -You can use **gs_dumpall** to export global objects, including database users, user groups, tablespaces, and attributes (for example, global access permissions), from MogDB. - -### Procedure - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run **gs_dumpall** to export global tablespaces. - - ```bash - gs_dumpall -W Bigdata@123 -U omm -f /home/omm/backup/MPPDB_tablespace.sql -p 8000 -t - ``` - - **Table 1** Common parameters - - | Parameter | Description | Example Value | - | :-------- | :----------------------------------------------------------- | :------------------------------------------- | - | -U | Username for database connection. The user must be an MogDB administrator. | -U omm | - | -W | User password for database connection.
- This parameter is not required for database administrators if the trust policy is used for authentication.
- If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password. | -W Bigdata@123 | - | -f | Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output. | -f /home/omm/backup/**MPPDB_tablespace**.sql | - | -p | TCP port or local Unix-domain socket file extension on which the server is listening for connections. | -p 8000 | - | -t | Dumps only tablespaces. You can also use **-tablespaces-only** alternatively. | - | - - For details about other parameters, see "Tool Reference > Server Tools > [gs_dumpall](../../../reference-guide/tool-reference/server-tools/gs_dumpall.md)" in the **Reference Guide**. - -### Examples - -Example 1: Run **gs_dumpall** as the cluster administrator **omm** to export global tablespaces and users of all databases. The exported files are in text format. - -```bash -gs_dumpall -W Bigdata@123 -U omm -f /home/omm/backup/MPPDB_globals.sql -p 8000 -g -gs_dumpall[port='8000'][2018-11-14 19:06:24]: dumpall operation successful -gs_dumpall[port='8000'][2018-11-14 19:06:24]: total time: 1150 ms -``` - -Example 2: Run **gs_dumpall** as the cluster administrator **omm** to export global tablespaces of all databases, encrypt the exported files, and store them in text format. - -```bash -gs_dumpall -W Bigdata@123 -U omm -f /home/omm/backup/MPPDB_tablespace.sql -p 8000 -t --with-encryption AES128 --with-key 1212121212121212 -gs_dumpall[port='8000'][2018-11-14 19:00:58]: dumpall operation successful -gs_dumpall[port='8000'][2018-11-14 19:00:58]: total time: 186 ms -``` - -Example 3: Run **gs_dumpall** as the cluster administrator **omm** to export global users of all databases. The exported files are in text format. - -```bash -gs_dumpall -W Bigdata@123 -U omm -f /home/omm/backup/MPPDB_user.sql -p 8000 -r -gs_dumpall[port='8000'][2018-11-14 19:03:18]: dumpall operation successful -gs_dumpall[port='8000'][2018-11-14 19:03:18]: total time: 162 ms -``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/4-data-export-by-a-user-without-required-permissions.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/4-data-export-by-a-user-without-required-permissions.md deleted file mode 100644 index 9127f312..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/4-data-export-by-a-user-without-required-permissions.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: Data Export By a User Without Required Permissions -summary: Data Export By a User Without Required Permissions -author: Guo Huan -date: 2021-03-04 ---- - -# Data Export By a User Without Required Permissions - -**gs_dump** and **gs_dumpall** use **-U** to specify the user that performs the export. If the specified user does not have the required permissions, data cannot be exported. In this case, you need to assign the permission to a user who does not have the permission, and then set the **-role** parameter in the export command to specify the role with the permission. Then, **gs_dump** or **gs_dumpall** uses the **-role** parameter to specify a role to export data. - -## Procedure - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Use **gs_dump** to export data of the **human_resource** database. - - User **jack** does not have the permissions to export data of the **human_resource** database and the role **role1** has this permission. To export data of the **human_resource** database, you need to assign the permission of **role1** to **jack** and set **-role** to **role1** in the export command. The exported files are in .tar format. - - ```bash - gs_dump -U jack -f /home/omm/backup/MPPDB_backup.tar -p 8000 human_resource --role role1 --rolepassword abc@1234 -F t - Password: - ``` - - **Table 1** Common parameters - - | Parameter | Description | Example Value | - | :------------ | :----------------------------------------------------------- | :----------------------------------- | - | -U | Username for database connection. | -U jack | - | -W | User password for database connection.
- This parameter is not required for database administrators if the trust policy is used for authentication.
- If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password. | -W Bigdata@123 | - | -f | Folder to store exported files. If this parameter is not specified, the exported files are stored in the standard output. | -f /home/omm/backup/MPPDB_backup.tar | - | -p | TCP port or local Unix-domain socket file extension on which the server is listening for connections. | -p 8000 | - | dbname | Name of the database to export. | human_resource | - | -role | Role name for the export operation. After this parameter is set, the **SET ROLE** command will be issued after **gs_dump** or **gs_dumpall** connects to the database. It is useful when the user specified by **-U** does not have the permissions required by **gs_dump** or **gs_dumpall**. This parameter allows you to switch to a role with the required permissions. | -r role1 | - | -rolepassword | Role password. | -rolepassword abc@1234 | - | -F | Select the format of file to export. The values of **-F** are as follows:
- **p**: plaintext
- **c**: custom
- **d**: directory
- **t**: .tar | -F t | - - For details about other parameters, see "Tool Reference > Server Tools > [gs_dump](../../../reference-guide/tool-reference/server-tools/gs_dump.md)" or "[gs_dumpall](../../../reference-guide/tool-reference/server-tools/gs_dumpall.md)" in the **Reference Guide**. - -## Examples - -Example 1: User **jack** does not have the permissions required to export data of the **human_resource** database using **gs_dump** and the role **role1** has the permissions. To export data of the **human_resource** database, you can set **-role** to **role1** in the **gs_dump** command. The exported files are in .tar format. - -```bash -$ human_resource=# CREATE USER jack IDENTIFIED BY "1234@abc"; -CREATE ROLE -human_resource=# GRANT role1 TO jack; -GRANT ROLE - -$ gs_dump -U jack -f /home/omm/backup/MPPDB_backup11.tar -p 8000 human_resource --role role1 --rolepassword abc@1234 -F t -Password: -gs_dump[port='8000'][human_resource][2017-07-21 16:21:10]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2017-07-21 16:21:10]: total time: 4239 ms -``` - -Example 2: User **jack** does not have the permissions required to export the **public** schema using **gs_dump** and the role **role1** has the permissions. To export the **public** schema, you can set **-role** to **role1** in the **gs_dump** command. The exported files are in .tar format. - -```bash -$ human_resource=# CREATE USER jack IDENTIFIED BY "1234@abc"; -CREATE ROLE -human_resource=# GRANT role1 TO jack; -GRANT ROLE - -$ gs_dump -U jack -f /home/omm/backup/MPPDB_backup12.tar -p 8000 human_resource -n public --role role1 --rolepassword abc@1234 -F t -Password: -gs_dump[port='8000'][human_resource][2017-07-21 16:21:10]: dump database human_resource successfully -gs_dump[port='8000'][human_resource][2017-07-21 16:21:10]: total time: 3278 ms -``` - -Example 3: User **jack** does not have the permissions required to export all databases in a cluster using **gs_dumpall** and the role **role1** (cluster administrator) has the permissions. To export all the databases, you can set **-role** to **role1** in the **gs_dumpall** command. The exported files are in text format. - -```bash -$ human_resource=# CREATE USER jack IDENTIFIED BY "1234@abc"; -CREATE ROLE -human_resource=# GRANT role1 TO jack; -GRANT ROLE - -$ gs_dumpall -U jack -f /home/omm/backup/MPPDB_backup.sql -p 8000 --role role1 --rolepassword abc@1234 -Password: -gs_dumpall[port='8000'][human_resource][2018-11-14 17:26:18]: dumpall operation successful -gs_dumpall[port='8000'][human_resource][2018-11-14 17:26:18]: total time: 6437 ms -``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/exporting-data.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/exporting-data.md deleted file mode 100644 index 4e84f494..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/exporting-data/exporting-data.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Exporting Data -summary: Exporting Data -author: Guo Huan -date: 2023-05-22 ---- - -# Exporting Data - -+ **[Using gs_dump and gs_dumpall to Export Data Overview](1-using-gs_dump-and-gs_dumpall-to-export-data-overview.md)** -+ **[Exporting a Single Database](2-exporting-a-single-database.md)** -+ **[Exporting All Databases](3-exporting-all-databases.md)** -+ **[Data Export By a User Without Required Permissions](4-data-export-by-a-user-without-required-permissions.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-and-exporting-data.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-and-exporting-data.md deleted file mode 100644 index f2960ca5..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-and-exporting-data.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Importing And Exporting Data -summary: Importing And Exporting Data -author: Guo Huan -date: 2023-05-22 ---- - -# Importing And Exporting Data - -- **[Importing Data](importing-data/importing-data.md)** -- **[Exporting Data](exporting-data/exporting-data.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/1-import-modes.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/1-import-modes.md deleted file mode 100644 index db486ed3..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/1-import-modes.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Import Modes -summary: Import Modes -author: Guo Huan -date: 2021-03-04 ---- - -# Import Modes - -You can use **INSERT**, **COPY**, or **\copy** (a **gsql** meta-command) to import data to the MogDB database. The methods have different characteristics. For details, see Table 1. - -**Table 1** Import modes - -| Mode | Characteristics | -| :--------------------------------- | :----------------------------------------------------------- | -| INSERT | Insert one or more rows of data, or insert data from a specified table. | -| COPY | Run the **COPY FROM STDIN** statement to write data into the MogDB database.
Service data does not need to be stored in files when it is written from other databases to the MogDB database through the CopyManager interface driven by JDBC. | -| **\copy**, a **gsql** meta-command | Different from the SQL **COPY** statement, the **\copy** command can read data from or write data into only local files on a **gsql** client.
NOTE:
**\copy** applies only to small-scale data import in good format. It does not preprocess invalid characters or provide error tolerance. Therefore, **\copy** cannot be used in scenarios where abnormal data exists. **COPY** is preferred for data import. | diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/10-managing-concurrent-write-operations.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/10-managing-concurrent-write-operations.md deleted file mode 100644 index 5cd138eb..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/10-managing-concurrent-write-operations.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: Managing Concurrent Write Operations -summary: Managing Concurrent Write Operations -author: Guo Huan -date: 2021-03-04 ---- - -# Managing Concurrent Write Operations - -## Transaction Isolation - -MogDB manages transactions based on MVCC and two-phase locks, avoiding conflicts between read and write operations. SELECT is a read-only operation, whereas UPDATE and DELETE are read/write operations. - -- There is no conflict between read/write and read-only operations, or between read/write operations. Each concurrent transaction creates a snapshot when it starts. Concurrent transactions cannot detect updates made by each other. - - At the **READ COMMITTED** level, if transaction T1 is committed, transaction T2 can see changes made by T1. - - At the **REPEATABLE READ** level, if T2 starts before T1 is committed, T2 will not see changes made by T1 even after T1 is committed. The query results in a transaction are consistent and unaffected by other transactions. -- Read/Write operations use row-level locks. Different transactions can concurrently update the same table but not the same row. A row update transaction will start only after the previous one is committed. - - **READ COMMITTED**: At this level, a transaction can access only committed data. This is the default level. - - **REPEATABLE READ**: Only data committed before transaction start is read. Uncommitted data or data committed in other concurrent transactions cannot be read. - -## Write and Read/Write Operations - -Statements for write-only and read/write operations are as follows: - -- **INSERT**, used to insert one or more rows of data into a table -- **UPDATE**, used to modify existing data in a table -- **DELETE**, used to delete existing data from a table -- **COPY**, used to import data - -INSERT and COPY are write-only operations. Only one of them can be performed at a time. If INSERT or COPY of transaction T1 locks a table, INSERT or COPY of transaction T2 needs to wait until T1 unlocks the table. - -UPDATE and DELETE operations are read/write operations. They need to query for the target rows before modifying data. Concurrent transactions cannot see changes made by each other, and UPDATE and DELETE operations read snapshots of data committed before their transactions start. Write operations use row-level locks. If T2 starts after T1 and is to update the same row as T1 does, T2 waits for T1 to finish update. If T1 is not complete within the specified timeout duration, T2 will time out. If T1 and T2 update different rows in a table, they can be concurrently executed. - -## Potential Deadlocks During Concurrent Write - -Whenever transactions involve updates of more than one table, there is always the possibility that concurrently running transactions become deadlocked when they both try to write to the same set of tables. A transaction releases all of its locks at once when it either commits or rolls back; it does not relinquish locks one at a time. For example, transactions T1 and T2 start at roughly the same time. - -- If T1 starts writing to table A and T2 starts writing to table B, both transactions can proceed without conflict. However, if T1 finishes writing to table A and needs to start writing to the same rows as T2 does in table B, it will not be able to proceed because T2 still holds the lock on B. Conversely, if T2 finishes writing to table B and needs to start writing to the same rows as T1 does in table A, it will not be able to proceed either because T1 still holds the lock on A. In this case, a deadlock occurs. If T1 is committed and releases the lock within the lock timeout duration, subsequent update can proceed. If a lock times out, an error is reported and the corresponding transaction exits. -- If T1 updates rows 1 to 5 and T2 updates rows 6 to 10 in the same table, the two transactions do not conflict. However, if T1 finishes the update and proceeds to update rows 6 to 10, and T2 proceeds to update rows 1 to 5, neither of them can continue. If either of the transactions is committed and releases the lock within the lock timeout duration, subsequent update can proceed. If a lock times out, an error is reported and the corresponding transaction exits. - -## Concurrent Write Examples - -This section uses the **test** table as an example to describe how to perform concurrent **INSERT** and **DELETE** in the same table, concurrent **INSERT** in the same table, concurrent **UPDATE** in the same table, and concurrent import and queries. - -```sql -CREATE TABLE test(id int, name char(50), address varchar(255)); -``` - -### Concurrent INSERT and DELETE in the Same Table - -Transaction T1: - -```sql -START TRANSACTION; -INSERT INTO test VALUES(1,'test1','test123'); -COMMIT; -``` - -Transaction T2: - -```sql -START TRANSACTION; -DELETE test WHERE NAME='test1'; -COMMIT; -``` - -Scenario 1: - -T1 is started but not committed. At this time, T2 is started. After **INSERT** of T1 is complete, **DELETE** of T2 is performed. In this case, **DELETE 0** is displayed, because T1 is not committed and T2 cannot see the data inserted by T1. - -Scenario 2: - -- **READ COMMITTED** level - - T1 is started but not committed. At this time, T2 is started. After **INSERT** of T1 is complete, T1 is committed and **DELETE** of T2 is executed. In this case, **DELETE 1** is displayed, because T2 can see the data inserted by T1. - -- **REPEATABLE READ** level - - T1 is started but not committed. At this time, T2 is started. After **INSERT** of T1 is complete, T1 is committed and **DELETE** of T2 is executed. In this case, **DELETE 0** is displayed, because the data obtained in queries is consistent in a transaction. - -### Concurrent INSERT in the Same table - -Transaction T1: - -```sql -START TRANSACTION; -INSERT INTO test VALUES(2,'test2','test123'); -COMMIT; -``` - -Transaction T2: - -```sql -START TRANSACTION; -INSERT INTO test VALUES(3,'test3','test123'); -COMMIT; -``` - -Scenario 1: - -T1 is started but not committed. At this time, T2 is started. After **INSERT** of T1 is complete, **INSERT** of T2 is executed and succeeds. At the **READ COMMITTED** and **REPEATABLE READ** levels, the **SELECT** statement of T1 cannot see data inserted by T2, and a query in T2 cannot see data inserted by T1. - -Scenario 2: - -- **READ COMMITTED** level - - T1 is started but not committed. At this time, T2 is started. After **INSERT** of T1 is complete, T1 is committed. In T2, a query executed after **INSERT** can see the data inserted by T1. - -- **REPEATABLE READ** level - - T1 is started but not committed. At this time, T2 is started. After **INSERT** of T1 is complete, T1 is committed. In T2, a query executed after **INSERT** cannot see the data inserted by T1. - -### Concurrent UPDATE in the Same Table - -Transaction T1: - -```sql -START TRANSACTION; -UPDATE test SET address='test1234' WHERE name='test1'; -COMMIT; -``` - -Transaction T2: - -```sql -START TRANSACTION; -UPDATE test SET address='test1234' WHERE name='test2'; -COMMIT; -``` - -Transaction T3: - -```sql -START TRANSACTION; -UPDATE test SET address='test1234' WHERE name='test1'; -COMMIT; -``` - -Scenario 1: - -T1 is started but not committed. At this time, T2 is started. **UPDATE** of T1 and then T2 starts, and both of them succeed. This is because the **UPDATE** operations use row-level locks and do not conflict when they update different rows. - -Scenario 2: - -T1 is started but not committed. At this time, T3 is started. **UPDATE** of T1 and then T3 starts, and **UPDATE** of T1 succeeds. **UPDATE** of T3 times out. This is because T1 and T3 update the same row and the lock is held by T1 at the time of the update. - -### Concurrent Data Import and Queries - -Transaction T1: - -```sql -START TRANSACTION; -COPY test FROM '...'; -COMMIT; -``` - -Transaction T2: - -```sql -START TRANSACTION; -SELECT * FROM test; -COMMIT; -``` - -Scenario 1: - -T1 is started but not committed. At this time, T2 is started. **COPY** of T1 and then **SELECT** of T2 starts, and both of them succeed. In this case, T2 cannot see the data added by **COPY** of T1. - -Scenario 2: - -- **READ COMMITTED** level - - T1 is started but not committed. At this time, T2 is started. **COPY** of T1 is complete and T1 is committed. In this case, T2 can see the data added by **COPY** of T1. - -- **REPEATABLE READ** level - - T1 is started but not committed. At this time, T2 is started. **COPY** of T1 is complete and T1 is committed. In this case, T2 cannot see the data added by **COPY** of T1. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/2-running-the-INSERT-statement-to-insert-data.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/2-running-the-INSERT-statement-to-insert-data.md deleted file mode 100644 index 9149247c..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/2-running-the-INSERT-statement-to-insert-data.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Running the INSERT Statement to Insert Data -summary: Running the INSERT Statement to Insert Data -author: Guo Huan -date: 2021-03-04 ---- - -# Running the INSERT Statement to Insert Data - -Run the **INSERT** statement to write data into the MogDB database in either of the following ways: - -- Use the client tool provided by the MogDB database to write data into MogDB. - - For details, see Inserting Data to Tables. - -- Connect to the database using the JDBC or ODBC driver and run the **INSERT** statement to write data into the MogDB database. - - For details, see Connecting to a Database. - -You can add, modify, and delete database transactions for the MogDB database. **INSERT** is the simplest way to write data and applies to scenarios with small data volume and low concurrency. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/3-running-the-COPY-FROM-STDIN-statement-to-import-data.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/3-running-the-COPY-FROM-STDIN-statement-to-import-data.md deleted file mode 100644 index 4cd4fbce..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/3-running-the-COPY-FROM-STDIN-statement-to-import-data.md +++ /dev/null @@ -1,318 +0,0 @@ ---- -title: Running the COPY FROM STDIN Statement to Import Data -summary: Running the COPY FROM STDIN Statement to Import Data -author: Guo Huan -date: 2021-03-04 ---- - -# Running the COPY FROM STDIN Statement to Import Data - -
- -## Data Import Using COPY FROM STDIN - -Run the **COPY FROM STDIN** statement to import data to MogDB in either of the following ways: - -- Write data into the MogDB database by typing. For details, see COPY. -- Import data from a file or database to MogDB through the CopyManager interface driven by JDBC. You can use any parameters in the **COPY** syntax. - -
- -## Introduction to the CopyManager Class - -CopyManager is an API class provided by the JDBC driver in MogDB. It is used to import data to the MogDB database in batches. - -
- -### Inheritance Relationship of CopyManager - -The CopyManager class is in the **org.opengauss.copy** package and inherits the java.lang.Object class. The declaration of the class is as follows: - -```java -public class CopyManager -extends Object -``` - -
- -### Construction Method - -```java -public CopyManager(BaseConnection connection) -throws SQLException -``` - -
- -### Common Methods - -**Table 1** Common methods of CopyManager - -| Return Value | Method | Description | throws | -| :----------- | :--------------------------------------------------- | :----------------------------------------------------------- | :----------------------- | -| CopyIn | copyIn(String sql) | - | SQLException | -| long | copyIn(String sql, InputStream from) | Uses **COPY FROM STDIN** to quickly import data to tables in a database from InputStream. | SQLException,IOException | -| long | copyIn(String sql, InputStream from, int bufferSize) | Uses **COPY FROM STDIN** to quickly import data to tables in a database from InputStream. | SQLException,IOException | -| long | copyIn(String sql, Reader from) | Uses **COPY FROM STDIN** to quickly import data to tables in a database from Reader. | SQLException,IOException | -| long | copyIn(String sql, Reader from, int bufferSize) | Uses **COPY FROM STDIN** to quickly import data to tables in a database from Reader. | SQLException,IOException | -| CopyOut | copyOut(String sql) | - | SQLException | -| long | copyOut(String sql, OutputStream to) | Sends the result set of **COPY TO STDOUT** from the database to the OutputStream class. | SQLException,IOException | -| long | copyOut(String sql, Writer to) | Sends the result set of **COPY TO STDOUT** from the database to the Writer class. | SQLException,IOException | - -
- -## Handling Import Errors - -### Scenarios - -Handle errors that occurred during data import. - -### Querying Error Information - -Errors that occur when data is imported are divided into data format errors and non-data format errors. - -- Data format errors - - When creating a foreign table, specify **LOG INTO error_table_name**. Data format errors during data import will be written into the specified table. You can run the following SQL statement to query error details: - - ```sql - mogdb=# SELECT * FROM error_table_name; - ``` - - Table 1 lists the columns of the *error_table_name* table. - - **Table 1** Columns in the error information table - - | Column Name | Type | Description | - | :---------- | :----------------------- | :----------------------------------------------------------- | - | nodeid | integer | ID of the node where an error is reported | - | begintime | timestamp with time zone | Time when a data format error was reported | - | filename | character varying | Name of the source data file where a data format error occurs | - | rownum | numeric | Number of the row where a data format error occurs in a source data file | - | rawrecord | text | Raw record of a data format error in the source data file | - | detail | text | Error details | - -- Non-data format errors - - A non-data format error leads to the failure of an entire data import task. You can locate and troubleshoot a non-data format error based on the error message displayed during data import. - -### Handling Data Import Errors - -Troubleshoot data import errors based on obtained error information and descriptions in the following table. - -**Table 2** Handling data import errors - -| Error Message | Cause | Solution | -| :----------------------------------------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| missing data for column "r_reason_desc" | 1. The number of columns in the source data file is less than that in the foreign table.
2. In a TEXT-format source data file, an escape character (for example, ) leads to delimiter or quote mislocation.
**Example:** The target table contains three columns, and the following data is imported. The escape character () converts the delimiter (\|) into the value of the second column, causing the value of the third column to lose.
`BE|Belgium|1` | 1. If an error is reported due to missing columns, perform the following operations:
- Add the value of the **r_reason_desc** column to the source data file.
- When creating a foreign table, set the parameter **fill_missing_fields** to **on**. In this way, if the last column of a row in the source data file is missing, it will be set to **NULL** and no error will be reported.
2. Check whether the row where an error is reported contains the escape character (). If the row contains such a character, you are advised to set the parameter **noescaping** to **true** when creating a foreign table, indicating that the escape character () and the characters following it are not escaped. | -| extra data after last expected column | The number of columns in the source data file is greater than that in the foreign table. | - Delete extra columns from the source data file.
- When creating a foreign table, set the parameter **ignore_extra_data** to **on**. In this way, if the number of columns in the source data file is greater than that in the foreign table, the extra columns at the end of rows will not be imported. | -| invalid input syntax for type numeric: "a" | The data type is incorrect. | In the source data file, change the data type of the columns to import. If this error information is displayed, change the data type to **numeric**. | -| null value in column "staff_id" violates not-null constraint | The not-null constraint is violated. | In the source data file, add values to the specified columns. If this error information is displayed, add values to the **staff_id** column. | -| duplicate key value violates unique constraint "reg_id_pk" | The unique constraint is violated. | - Delete duplicate rows from the source data file.
- Run the **SELECT** statement with the **DISTINCT** keyword to ensure that all imported rows are unique.
`mogdb=# INSERT INTO reasons SELECT DISTINCT * FROM foreign_tpcds_reasons;` | -| value too long for type character varying(16) | The column length exceeds the upper limit. | In the source data file, change the column length. If this error information is displayed, reduce the column length to no greater than 16 bytes (VARCHAR2). | - -
- -## Example 1: Importing and Exporting Data Through Local Files - -When the JAVA language is used for secondary development based on MogDB, you can use the CopyManager interface to export data from the database to a local file or import a local file to the database by streaming. The file can be in CSV or TEXT format. - -The sample program is as follows. Load the MogDB JDBC driver before executing it. - -```java -import java.sql.Connection; -import java.sql.DriverManager; -import java.io.IOException; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.sql.SQLException; -import org.opengauss.copy.CopyManager; -import org.opengauss.core.BaseConnection; - -public class Copy{ - - public static void main(String[] args) - { - String urls = new String("jdbc:opengauss://localhost:8000/postgres"); // URL of the database - String username = new String("username"); // Username - String password = new String("passwd"); // Password - String tablename = new String("migration_table"); // Table information - String tablename1 = new String("migration_table_1"); // Table information - String driver = "org.opengauss.Driver"; - Connection conn = null; - - try { - Class.forName(driver); - conn = DriverManager.getConnection(urls, username, password); - } catch (ClassNotFoundException e) { - e.printStackTrace(System.out); - } catch (SQLException e) { - e.printStackTrace(System.out); - } - - // Export data from the migration_table table to the d:/data.txt file. - try { - copyToFile(conn, "d:/data.txt", "(SELECT * FROM migration_table)"); - } catch (SQLException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - // Import data from the d:/data.txt file to the migration_table_1 table. - try { - copyFromFile(conn, "d:/data.txt", tablename1); - } catch (SQLException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - // Export data from the migration_table_1 table to the d:/data1.txt file. - try { - copyToFile(conn, "d:/data1.txt", tablename1); - } catch (SQLException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - - public static void copyFromFile(Connection connection, String filePath, String tableName) - throws SQLException, IOException { - - FileInputStream fileInputStream = null; - - try { - CopyManager copyManager = new CopyManager((BaseConnection)connection); - fileInputStream = new FileInputStream(filePath); - copyManager.copyIn("COPY " + tableName + " FROM STDIN with (" + "DELIMITER"+"'"+ delimiter + "'" + "ENCODING " + "'" + encoding + "')", fileInputStream); - } finally { - if (fileInputStream != null) { - try { - fileInputStream.close(); - } catch (IOException e) { - e.printStackTrace(); - } - } - } - } - public static void copyToFile(Connection connection, String filePath, String tableOrQuery) - throws SQLException, IOException { - - FileOutputStream fileOutputStream = null; - - try { - CopyManager copyManager = new CopyManager((BaseConnection)connection); - fileOutputStream = new FileOutputStream(filePath); - copyManager.copyOut("COPY " + tableOrQuery + " TO STDOUT", fileOutputStream); - } finally { - if (fileOutputStream != null) { - try { - fileOutputStream.close(); - } catch (IOException e) { - e.printStackTrace(); - } - } - } - } -} -``` - -
- -## Example 2: Migrating Data from a MySQL Database to the MogDB Database - -The following example shows how to use CopyManager to migrate data from MySQL to the MogDB database. - -```java -import java.io.StringReader; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; - -import org.opengauss.copy.CopyManager; -import org.opengauss.core.BaseConnection; - -public class Migration{ - - public static void main(String[] args) { - String url = new String("jdbc:opengauss://localhost:8000/postgres"); // URL of the database - String user = new String("username"); // MogDB database user name - String pass = new String("passwd"); // MogDB database password - String tablename = new String("migration_table_1"); // Table information - String delimiter = new String("|"); // Delimiter - String encoding = new String("UTF8"); // Character set - String driver = "org.opengauss.Driver"; - StringBuffer buffer = new StringBuffer(); // Buffer to store formatted data - - try { - // Obtain the query result set of the source database. - ResultSet rs = getDataSet(); - - // Traverse the result set and obtain records row by row. - // The values of columns in each record are separated by the specified delimiter and end with a linefeed, forming strings. - // Add the strings to the buffer. - while (rs.next()) { - buffer.append(rs.getString(1) + delimiter - + rs.getString(2) + delimiter - + rs.getString(3) + delimiter - + rs.getString(4) - + "\n"); - } - rs.close(); - - try { - // Connect to the target database. - Class.forName(driver); - Connection conn = DriverManager.getConnection(url, user, pass); - BaseConnection baseConn = (BaseConnection) conn; - baseConn.setAutoCommit(false); - - // Initialize the table. - String sql = "Copy " + tablename + " from STDIN with (DELIMITER " + "'" + delimiter + "'" +","+ " ENCODING " + "'" + encoding + "'"); - - // Commit data in the buffer. - CopyManager cp = new CopyManager(baseConn); - StringReader reader = new StringReader(buffer.toString()); - cp.copyIn(sql, reader); - baseConn.commit(); - reader.close(); - baseConn.close(); - } catch (ClassNotFoundException e) { - e.printStackTrace(System.out); - } catch (SQLException e) { - e.printStackTrace(System.out); - } - - } catch (Exception e) { - e.printStackTrace(); - } - } - - //******************************** - // Return the query result set from the source database. - //********************************* - private static ResultSet getDataSet() { - ResultSet rs = null; - try { - Class.forName("com.MY.jdbc.Driver").newInstance(); - Connection conn = DriverManager.getConnection("jdbc:MY://10.119.179.227:3306/jack?useSSL=false&allowPublicKeyRetrieval=true", "jack", "Enmo@123"); - Statement stmt = conn.createStatement(); - rs = stmt.executeQuery("select * from migration_table"); - } catch (SQLException e) { - e.printStackTrace(); - } catch (Exception e) { - e.printStackTrace(); - } - return rs; - } -} -``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/4-using-a-gsql-meta-command-to-import-data.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/4-using-a-gsql-meta-command-to-import-data.md deleted file mode 100644 index 43ff9af1..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/4-using-a-gsql-meta-command-to-import-data.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: Using a gsql Meta-Command to Import Data -summary: Using a gsql Meta-Command to Import Data -author: Guo Huan -date: 2021-03-04 ---- - -# Using a gsql Meta-Command to Import Data - -The **gsql** tool provides the **\copy** meta-command to import data. - -**\copy Command** - -## Syntax - -``` -\copy { table [ ( column_list ) ] | - -( query ) } { from | to } { filename | - -stdin | stdout | pstdin | pstdout } - -[ with ] [ binary ] [ delimiter - -[ as ] 'character' ] [ null [ as ] 'string' ] - -[ csv [ header ] [ quote [ as ] - -'character' ] [ escape [ as ] 'character' ] - -[ force quote column_list | * ] [ force - -not null column_list ] ] -``` - -You can run this command to import or export data after logging in to a database on any gsql client. Different from the **COPY** statement in SQL, this command performs read/write operations on local files rather than files on database servers. The accessibility and permissions of the local files are restricted to local users. - -> **NOTE:** -> -> **\copy** applies only to small-scale data import in good format. It does not preprocess invalid characters or provide error tolerance. Therefore, **\copy** cannot be used in scenarios where abnormal data exists. **GDS** or **COPY** is preferred for data import. - -**Parameter Description** - -- table - - Specifies the name (possibly schema-qualified) of an existing table. - - Value range: an existing table name - -- column_list - - Specifies an optional list of columns to be copied. - - Value range: any field in the table. If no column list is specified, all columns of the table will be copied. - -- query - - Specifies that the results are to be copied. - - Value range: a **SELECT** or **VALUES** command in parentheses - -- filename - - Specifies the absolute path of a file. To run the **COPY** command, the user must have the write permission for this path. - -- stdin - - Specifies that input comes from the standard input. - -- stdout - - Specifies that output goes to the standard output. - -- pstdin - - Specifies that input comes from the gsql client. - -- pstout - -- Specifies that output goes to the gsql client. - -- binary - - Specifies that data is stored and read in binary mode instead of text mode. In binary mode, you cannot declare **DELIMITER**, **NULL**, or **CSV**. After **binary** is specified, CSV, FIXED, and TEXT cannot be specified through **option** or **copy_option**. - -- delimiter [ as ] 'character' - - Specifies the character that separates columns within each row (line) of the file. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - The value of **delimiter** cannot be **\r** or **\n**. - > - A delimiter cannot be the same as the null value. The delimiter for the CSV format cannot be same as the **quote** value. - > - The delimiter of TEXT data cannot contain any of the following characters: \\.abcdefghijklmnopqrstuvwxyz0123456789. - > - The data length of a single row should be less than 1 GB. A row that has many columns using long delimiters cannot contain much valid data. - > - You are advised to use multi-character delimiters or invisible delimiters. For example, you can use multi-characters (such as $^&) and invisible characters (such as 0x07, 0x08, and 0x1b). - - Value range: a multi-character delimiter within 10 bytes - - Default value: - - - A tab character in TEXT format - - A comma (,) in CSV format - - No delimiter in FIXED format - -- null [ as ] 'string' - - Specifies the string that represents a null value. - - Value range: - - - A null value cannot be **\\r** or **\\n**. The maximum length is 100 characters. - - A null value cannot be the same as the **delimiter** or **quote** value. - - Default value: - - - The default value for the CSV format is an empty string without quotation marks. - - The default value for the TEXT format is **\\N**. - -- header - - Specifies whether a file contains a header with the names of each column in the file. **header** is available only for CSV and FIXED files. - - When data is imported, if **header** is **on**, the first row of the data file will be identified as the header and ignored. If **header** is **off**, the first row will be identified as a data row. - - When data is exported, if header is **on**, **fileheader** must be specified. **fileheader** specifies the content in the header. If **header** is **off**, an exported file does not contain a header. - - Value range:**true/on** and **false/off** - - Default value: false - -- quote [ as ] 'character' - - Specifies a quoted character string for a CSV file. - - Default value: a double quotation mark (") - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - The value of **quote** cannot be the same as that of the **delimiter** or null parameter. - > - The value of **quote** must be a single-byte character. - > - Invisible characters are recommended, such as 0x07, 0x08, and 0x1b. - -- escape [ as ] 'character' - - Specifies an escape character for a CSV file. The value must be a single-byte character. - - Default value: a double quotation mark (") If the value is the same as that of **quote**, it will be replaced by **\0**. - -- force quote column_list | * - - In **CSV COPY TO** mode, forces quotation marks to be used for all non-null values in each specified column. Null values are not quoted. - - Value range: an existing column name - -- force not null column_list - - Assigns a value to a specified column in **CSV COPY FROM** mode. - - Value range: an existing column name - -- force null column\_list - - Assigns null to a specified column in **CSV COPY FROM** mode. - - Value range: an existing column name - -**Examples** - -1. Create a target table **a**. - - ```sql - mogdb=# CREATE TABLE a(a int); - ``` - -2. Import data. - - Copy data from **stdin** to table **a**. - - ```sql - mogdb=# \copy a from stdin; - ``` - - When the **>>** characters are displayed, enter data. To end your input, enter a backslash and a period (\.). - - ```sql - Enter data to be copied followed by a newline. - End with a backslash and a period on a line by itself. - >> 1 - >> 2 - >> \. - ``` - - Query data imported to table **a**. - - ```sql - mogdb=# SELECT * FROM a; - a - --- - 1 - 2 - (2 rows) - ``` - -3. Copy data from a local file to table **a**. The following assumes that the local file is **/home/omm/2.csv**. - - - Commas (,) are used as delimiters. - - - If the number of columns defined in a source data file is greater than that in a foreign table, extra columns will be ignored during import. - - ```sql - mogdb=# \copy a FROM '/home/omm/2.csv' WITH (delimiter',',IGNORE_EXTRA_DATA 'on'); - ``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/5-using-gs_restore-to-import-data.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/5-using-gs_restore-to-import-data.md deleted file mode 100644 index 9d15fe71..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/5-using-gs_restore-to-import-data.md +++ /dev/null @@ -1,263 +0,0 @@ ---- -title: Using gs_restore to Import Data -summary: Using gs_restore to Import Data -author: Guo Huan -date: 2021-03-04 ---- - -# Using gs_restore to Import Data - -## Scenarios - -**gs_restore** is an import tool provided by the MogDB database. You can use **gs_restore** to import the files exported by **gs_dump** to a database. **gs_restore** can import the files in .tar, custom, or directory format. - -**gs_restore** can: - -- Import data to a database. - - If a database is specified, data is imported to the database. If multiple databases are specified, the password for connecting to each database also needs to be specified. - -- Import data to a script. - - If no database is specified, a script containing the SQL statement to recreate the database is created and written to a file or standard output. This script output is equivalent to the plain text output of **gs_dump**. - -You can specify and sort the data to import. - -## Procedure - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> **gs_restore** incrementally imports data by default. To prevent data exception caused by consecutive imports, use the **-e** and **-c** parameters for each import. **-c** indicates that existing data is deleted from the target database before each import. **-e** indicates that the system ignores the import task with an error (error message is displayed after the import process is complete) and proceeds with the next by default. Therefore, you need to exit the system if an error occurs when you send the SQL statement to the database. - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Use **gs_restore** to import all object definitions from the exported file of the entire **mogdb** database to the **backupdb** database. - - ```bash - $ gs_restore -U jack /home/omm/backup/MPPDB_backup.tar -p 8000 -d backupdb -s -e -c - Password: - ``` - - **Table 1** Common parameters - - | Parameters | Description | Example Value | - | :--------- | :----------------------------------------------------------- | :------------ | - | -U | Username for database connection. | -U jack | - | -W | User password for database connection.
- This parameter is not required for database administrators if the trust policy is used for authentication.
- If you connect to the database without specifying this parameter and you are not a database administrator, you will be prompted to enter the password. | -W abcd@123 | - | -d | Database to which data will be imported. | -d backupdb | - | -p | TCP port or local Unix-domain socket file extension on which the server is listening for connections. | -p 8000 | - | -e | Exits if an error occurs when you send the SQL statement to the database. Error messages are displayed after the import process is complete. | - | - | -c | Cleans existing objects from the target database before the import. | - | - | -s | Imports only object definitions in schemas and does not import data. Sequence values will also not be imported. | - | - - For details about other parameters, see "Tool Reference > Server Tools > [gs_restore](../../../reference-guide/tool-reference/server-tools/gs_restore.md)" in the **Reference Guide**. - -## Examples - -Example 1: Run **gs_restore** to import data and all object definitions of the **mogdb** database from the **MPPDB_backup.dmp** file (custom format). - -```bash -$ gs_restore backup/MPPDB_backup.dmp -p 8000 -d backupdb -Password: -gs_restore[2017-07-21 19:16:26]: restore operation successful -gs_restore: total time: 13053 ms -``` - -Example 2: Run **gs_restore** to import data and all object definitions of the **mogdb** database from the **MPPDB_backup.tar** file. - -```bash -$ gs_restore backup/MPPDB_backup.tar -p 8000 -d backupdb -gs_restore[2017-07-21 19:21:32]: restore operation successful -gs_restore[2017-07-21 19:21:32]: total time: 21203 ms -``` - -Example 3: Run **gs_restore** to import data and all object definitions of the **mogdb** database from the **MPPDB_backup** directory. - -```bash -$ gs_restore backup/MPPDB_backup -p 8000 -d backupdb -gs_restore[2017-07-21 19:26:46]: restore operation successful -gs_restore[2017-07-21 19:26:46]: total time: 21003 ms -``` - -Example 4: Run **gs_restore** to import all object definitions of the database from the **MPPDB_backup.tar** file to the **backupdb** database. Table data is not imported. - -```bash -$ gs_restore /home/omm/backup/MPPDB_backup.tar -p 8000 -d backupdb -s -e -c -Password: -gs_restore[2017-07-21 19:46:27]: restore operation successful -gs_restore[2017-07-21 19:46:27]: total time: 32993 ms -``` - -Example 5: Run **gs_restore** to import data and all definitions in the **PUBLIC** schema from the **MPPDB_backup.dmp** file. Existing objects are deleted from the target database before the import. If an existing object references to an object in another schema, manually delete the referenced object first. - -```bash -$ gs_restore backup/MPPDB_backup.dmp -p 8000 -d backupdb -e -c -n PUBLIC -gs_restore: [archiver (db)] Error while PROCESSING TOC: -gs_restore: [archiver (db)] Error from TOC entry 313; 1259 337399 TABLE table1 gaussdba -gs_restore: [archiver (db)] could not execute query: ERROR: cannot drop table table1 because other objects depend on it -DETAIL: view t1.v1 depends on table table1 -HINT: Use DROP ... CASCADE to drop the dependent objects too. -Command was: DROP TABLE public.table1; -``` - -Manually delete the referenced object and create it again after the import is complete. - -```bash -$ gs_restore backup/MPPDB_backup.dmp -p 8000 -d backupdb -e -c -n PUBLIC -gs_restore[2017-07-21 19:52:26]: restore operation successful -gs_restore[2017-07-21 19:52:26]: total time: 2203 ms -``` - -Example 6: Run **gs_restore** to import the definition of the **hr.staffs** table in the **hr** schema from the **MPPDB_backup.dmp** file. Before the import, the **hr.staffs** table does not exist. - -```bash -$ gs_restore backup/MPPDB_backup.dmp -p 8000 -d backupdb -e -c -s -n hr -t hr.staffs -gs_restore[2017-07-21 19:56:29]: restore operation successful -gs_restore[2017-07-21 19:56:29]: total time: 21000 ms -``` - -Example 7: Run **gs_restore** to import data of the **hr.staffs** table in **hr** schema from the **MPPDB_backup.dmp** file. Before the import, the **hr.staffs** table is empty. - -```bash -$ gs_restore backup/MPPDB_backup.dmp -p 8000 -d backupdb -e -a -n hr -t hr.staffs -gs_restore[2017-07-21 20:12:32]: restore operation successful -gs_restore[2017-07-21 20:12:32]: total time: 20203 ms -``` - -Example 8: Run **gs_restore** to import the definition of the **hr.staffs** table. Before the import, the **hr.staffs** table already exists. - -```sql -human_resource=# select * from hr.staffs; - staff_id | first_name | last_name | email | phone_number | hire_date | employment_id | salary | commission_pct | manager_id | section_id -----------+-------------+-------------+----------+--------------------+---------------------+---------------+----------+----------------+------------+------------ - 200 | Jennifer | Whalen | JWHALEN | 515.123.4444 | 1987-09-17 00:00:00 | AD_ASST | 4400.00 | | 101 | 10 - 201 | Michael | Hartstein | MHARTSTE | 515.123.5555 | 1996-02-17 00:00:00 | MK_MAN | 13000.00 | | 100 | 20 - -$ gsql -d human_resource -p 8000 - -gsql ((MogDB x.x.x build 56189e20) compiled at 2022-01-07 18:47:53 commit 0 last mr ) -Non-SSL connection (SSL connection is recommended when requiring high-security) -Type "help" for help. - -human_resource=# drop table hr.staffs CASCADE; -NOTICE: drop cascades to view hr.staff_details_view -DROP TABLE - -$ gs_restore /home/omm/backup/MPPDB_backup.tar -p 8000 -d human_resource -n hr -t staffs -s -e -restore operation successful -total time: 904 ms - -human_resource=# select * from hr.staffs; - staff_id | first_name | last_name | email | phone_number | hire_date | employment_id | salary | commission_pct | manager_id | section_id -----------+------------+-----------+-------+--------------+-----------+---------------+--------+----------------+------------+------------ -(0 rows) -``` - -Example 9: Run **gs_restore** to import data and definitions of the **staffs** and **areas** tables. Before the import, the **staffs** and **areas** tables do not exist. - -```sql -human_resource=# \d - List of relations - Schema | Name | Type | Owner | Storage ---------+--------------------+-------+----------+---------------------------------- - hr | employment_history | table | omm | {orientation=row,compression=no} - hr | employments | table | omm | {orientation=row,compression=no} - hr | places | table | omm | {orientation=row,compression=no} - hr | sections | table | omm | {orientation=row,compression=no} - hr | states | table | omm | {orientation=row,compression=no} -(5 rows) - -$ gs_restore /home/mogdb/backup/MPPDB_backup.tar -p 8000 -d human_resource -n hr -t staffs -n hr -t areas -restore operation successful -total time: 724 ms - -human_resource=# \d - List of relations - Schema | Name | Type | Owner | Storage ---------+--------------------+-------+----------+---------------------------------- - hr | areas | table | omm | {orientation=row,compression=no} - hr | employment_history | table | omm | {orientation=row,compression=no} - hr | employments | table | omm | {orientation=row,compression=no} - hr | places | table | omm | {orientation=row,compression=no} - hr | sections | table | omm | {orientation=row,compression=no} - hr | staffs | table | omm | {orientation=row,compression=no} - hr | states | table | omm | {orientation=row,compression=no} -(7 rows) - -human_resource=# select * from hr.areas; - area_id | area_name ----------+------------------------ - 4 | Middle East and Africa - 1 | Europe - 2 | Americas - 3 | Asia -(4 rows) -``` - -Example 10: Run **gs_restore** to import data and all object definitions in the **hr** schema. - -```bash -$ gs_restore /home/omm/backup/MPPDB_backup1.dmp 8000 -d backupdb -n hr -e -c -restore operation successful -total time: 702 ms -``` - -Example 11: Run **gs_restore** to import all object definitions in the **hr** and **hr1** schemas to the **backupdb** database. - -```bash -$ gs_restore /home/omm/backup/MPPDB_backup2.dmp -p 8000 -d backupdb -n hr -n hr1 -s -restore operation successful -total time: 665 ms -``` - -Example 12: Run **gs_restore** to decrypt the files exported from the **human_resource** database and import them to the **backupdb** database. - -```sql -mogdb=# create database backupdb; -CREATE DATABASE - -$ gs_restore /home/omm/backup/MPPDB_backup.tar -p 8000 -d backupdb --with-key=1234567812345678 -restore operation successful -total time: 23472 ms - -$ gsql -d backupdb -p 8000 -r - -gsql ((MogDB x.x.x build 56189e20) compiled at 2022-01-07 18:47:53 commit 0 last mr ) -Non-SSL connection (SSL connection is recommended when requiring high-security) -Type "help" for help. - -backupdb=# select * from hr.areas; - area_id | area_name ----------+------------------------ - 4 | Middle East and Africa - 1 | Europe - 2 | Americas - 3 | Asia -(4 rows) -``` - -Example 13: **user 1** does not have the permission to import data from an exported file to the **backupdb** database and **role1** has this permission. To import the exported data to the **backupdb** database, you can set **-role** to **role1** in the **gs_restore** command. - -```sql -human_resource=# CREATE USER user1 IDENTIFIED BY "1234@abc"; -CREATE ROLE role1 with SYSADMIN IDENTIFIED BY "abc@1234"; - -$ gs_restore -U user1 /home/omm/backup/MPPDB_backup.tar -p 8000 -d backupdb --role role1 --rolepassword abc@1234 -Password: -restore operation successful -total time: 554 ms - -$ gsql -d backupdb -p 8000 -r - -gsql ((MogDB x.x.x build 56189e20) compiled at 2022-01-07 18:47:53 commit 0 last mr ) -Non-SSL connection (SSL connection is recommended when requiring high-security) -Type "help" for help. - -backupdb=# select * from hr.areas; - area_id | area_name ----------+------------------------ - 4 | Middle East and Africa - 1 | Europe - 2 | Americas - 3 | Asia -(4 rows) -``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/6-updating-data-in-a-table.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/6-updating-data-in-a-table.md deleted file mode 100644 index d1a64607..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/6-updating-data-in-a-table.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: Updating Data in a Table -summary: Updating Data in a Table -author: Guo Huan -date: 2021-03-04 ---- - -# Updating Data in a Table - -## Updating a Table by Using DML Statements - -In MogDB, you can update a table by running DML statements. - -### Procedure - -There is a table named **customer_t** and the table structure is as follows: - -```sql -CREATE TABLE customer_t -( c_customer_sk integer, - c_customer_id char(5), - c_first_name char(6), - c_last_name char(8) -) ; -``` - -You can run the following DML statements to update data in the table. - -- Run the **INSERT** statement to insert data into the table. - - - Insert a row to the **customer_t** table. - - ```sql - INSERT INTO customer_t (c_customer_sk, c_customer_id, c_first_name,c_last_name) VALUES (3769, 5, 'Grace','White'); - ``` - - - Insert multiple rows to the **customer_t** table. - - ```sql - INSERT INTO customer_t (c_customer_sk, c_customer_id, c_first_name,c_last_name) VALUES - (6885, 1, 'Joes', 'Hunter'), - (4321, 2, 'Lily','Carter'), - (9527, 3, 'James', 'Cook'), - (9500, 4, 'Lucy', 'Baker'); - ``` - - For details on how to use **INSERT**, see Inserting Data to Tables. - -- Run the **UPDATE** statement to update data in the table. Change the value of the **c_customer_id** column to **0**. - - ```sql - UPDATE customer_t SET c_customer_id = 0; - ``` - - For details on how to use **UPDATE**, see UPDATE. - -- Run the **DELETE** statement to delete rows from the table. - - You can use the **WHERE** clause to specify the rows whose data is to delete. If you do not specify it, all rows in the table are deleted and only the data structure is retained. - - ```sql - DELETE FROM customer_t WHERE c_last_name = 'Baker'; - ``` - - For details on how to use **DELETE**, see DELETE. - -- Run the **TRUNCATE** statement to delete all rows from the table. - - ```sql - TRUNCATE TABLE customer_t; - ``` - - For details on how to use **TRUNCATE**, see TRUNCATE. - - The **DELETE** statement deletes a row of data each time whereas the **TRUNCATE** statement deletes data by releasing the data page stored in the table. Therefore, data can be deleted more quickly by using **TRUNCATE** than using **DELETE**. - - **DELETE** deletes table data but does not release table storage space. **TRUNCATE** deletes table data and releases table storage space. - -## Updating and Inserting Data by Using the MERGE INTO Statement - -To add all or a large amount of data in a table to an existing table, you can run the **MERGE INTO** statement in MogDB to merge the two tables so that data can be quickly added to the existing table. - -The **MERGE INTO** statement matches data in a source table with that in a target table based on a join condition. If data matches, **UPDATE** will be executed on the target table. Otherwise, **INSERT** will be executed. This statement is a convenient way to combine multiple operations and avoids multiple **INSERT** or **UPDATE** statements. - -### Prerequisites - -You have the **INSERT** and **UPDATE** permissions for the target table and the **SELECT** permission for the source table. - -### Procedure - -1. Create a source table named **products** and insert data. - - ```sql - mogdb=# CREATE TABLE products - ( product_id INTEGER, - product_name VARCHAR2(60), - category VARCHAR2(60) - ); - - mogdb=# INSERT INTO products VALUES - (1502, 'olympus camera', 'electrncs'), - (1601, 'lamaze', 'toys'), - (1666, 'harry potter', 'toys'), - (1700, 'wait interface', 'books'); - ``` - -2. Create a target table named **newproducts** and insert data. - - ```sql - mogdb=# CREATE TABLE newproducts - ( product_id INTEGER, - product_name VARCHAR2(60), - category VARCHAR2(60) - ); - - mogdb=# INSERT INTO newproducts VALUES - (1501, 'vivitar 35mm', 'electrncs'), - (1502, 'olympus ', 'electrncs'), - (1600, 'play gym', 'toys'), - (1601, 'lamaze', 'toys'), - (1666, 'harry potter', 'dvd'); - ``` - -3. Run the **MERGE INTO** statement to merge data in the source table **products** into the target table **newproducts**. - - ```sql - MERGE INTO newproducts np - USING products p - ON (np.product_id = p.product_id ) - WHEN MATCHED THEN - UPDATE SET np.product_name = p.product_name, np.category = p.category - WHEN NOT MATCHED THEN - INSERT VALUES (p.product_id, p.product_name, p.category) ; - ``` - - For details on parameters in the statement, see [Table 1](#Parameters in the MERGE INTO statement). For more information, see MERGE INTO. - - **Table 1** Parameters in the MERGE INTO statement - - | Parameter | Description | Example Value | - | :-------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | - | **INTO** clause | Specifies a target table that is to be updated or has data to be inserted.
A table alias is supported. | Value: newproducts np
The table name is **newproducts** and the alias is **np**. | - | **USING** clause | Specifies a source table. A table alias is supported.
If the target table is a replication table, the source table must also be a replication table. | Value: products p
The table name is **products** and the alias is **p**. | - | **ON** clause | Specifies a join condition between a target table and a source table.
Columns in the join condition cannot be updated. | Value: np.product_id = p.product_id
The join condition is that the **product_id** column in the target table **newproducts** has equivalent values as the **product_id** column in the source table **products**. | - | **WHEN MATCHED** clause | Performs **UPDATE** if data in the source table matches that in the target table based on the condition.
- Only one **WHEN MATCHED** clause can be specified.
- The **WHEN MATCHED** clause can be omitted. If it is omitted, no operation will be performed on the rows that meet the condition in the **ON** clause.
- Columns involved in the distribution key of the target table cannot be updated. | Value: WHEN MATCHED THEN UPDATE SET np.product_name = p.product_name, np.category = p.category
When the condition in the **ON** clause is met, the values of the **product_name** and **category** columns in the target table **newproducts** are replaced with the values in the corresponding columns in the source table **products**. | - | **WHEN NOT MATCHED** clause | Performs **INSERT** if data in the source table does not match that in the target table based on the condition.
- Only one **WHEN NOT MATCHED** clause can be specified.
- The **WHEN NOT MATCHED** clause can be omitted.
- An **INSERT** clause can contain only one **VALUES**.
- The **WHEN MATCHED** and **WHEN NOT MATCHED** clauses can be exchanged in sequence. One of them can be omitted, but they cannot be omitted at the same time. | Value: WHEN NOT MATCHED THEN INSERT VALUES (p.product_id, p.product_name, p.category)
Insert rows in the source table **products** that do not meet the condition in the **ON** clause into the target table **newproducts**. | - -4. Query the target table **newproducts** after the merge. - - ```sql - SELECT * FROM newproducts; - ``` - - The command output is as follows: - - ```sql - product_id | product_name | category - ------------+----------------+----------- - 1501 | vivitar 35mm | electrncs - 1502 | olympus camera | electrncs - 1666 | harry potter | toys - 1600 | play gym | toys - 1601 | lamaze | toys - 1700 | wait interface | books - (6 rows) - ``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/7-deep-copy.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/7-deep-copy.md deleted file mode 100644 index 15c0a82b..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/7-deep-copy.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Deep Copy -summary: Deep Copy -author: Guo Huan -date: 2021-03-04 ---- - -# Deep Copy - -After data is imported, you can perform a deep copy to modify a partition key, change a row-store table to a column-store table, or add a partial cluster key. A deep copy re-creates a table and batch inserts data into the table. - -MogDB provides three deep copy methods. - -## Performing a Deep Copy by Using the CREATE TABLE Statement - -Run the **CREATE TABLE** statement to create a copy of the original table, batch insert data of the original table into the copy, and rename the copy to the name of the original table. - -When creating the copy, you can specify table and column attributes, such as the primary key. - -**Procedure** - -Perform the following operations to carry out a deep copy for the **customer_t** table: - -1. Run the **CREATE TABLE** statement to create the copy **customer_t_copy** of the **customer_t** table. - - ```sql - CREATE TABLE customer_t_copy - ( c_customer_sk integer, - c_customer_id char(5), - c_first_name char(6), - c_last_name char(8) - ) ; - ``` - -2. Run the **INSERT INTO…SELECT** statement to batch insert data of the original table into the copy. - - ```sql - INSERT INTO customer_t_copy (SELECT * FROM customer_t); - ``` - -3. Delete the original table. - - ```sql - DROP TABLE customer_t; - ``` - -4. Run the **ALTER TABLE** statement to rename the copy to the name of the original table. - - ```sql - ALTER TABLE customer_t_copy RENAME TO customer_t; - ``` - -## Performing a Deep Copy by Using the CREATE TABLE LIKE Statement - -Run the **CREATE TABLE LIKE** statement to create a copy of the original table, batch insert data of the original table into the copy, and rename the copy to the name of the original table. This method does not inherit the primary key attributes of the original table. You can use the **ALTER TABLE** statement to add them. - -**Procedure** - -1. Run the **CREATE TABLE LIKE** statement to create the copy **customer_t_copy** of the **customer_t** table. - - ```sql - CREATE TABLE customer_t_copy (LIKE customer_t); - ``` - -2. Run the **INSERT INTO…SELECT** statement to batch insert data of the original table into the copy. - - ```sql - INSERT INTO customer_t_copy (SELECT * FROM customer_t); - ``` - -3. Delete the original table. - - ```sql - DROP TABLE customer_t; - ``` - -4. Run the **ALTER TABLE** statement to rename the copy to the name of the original table. - - ```sql - ALTER TABLE customer_t_copy RENAME TO customer_t; - ``` - -## Performing a Deep Copy by Creating a Temporary Table and Truncating the Original Table - -Run the **CREATE TABLE ….** **AS** statement to create a temporary table for the original table, truncate the original table, and batch insert data of the temporary data into the original table. - -When creating the temporary table, retain the primary key attributes of the original table. This method is recommended if the original table has dependency items. - -**Procedure** - -1. Run the **CREATE TABLE AS** statement to create a temporary table **customer_t_temp** for the **customer_t** table. - - ```sql - CREATE TEMP TABLE customer_t_temp AS SELECT * FROM customer_t; - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > Compared with the use of permanent tables, the use of temporary tables can improve performance but may incur data loss. A temporary table is automatically deleted at the end of the session where it is located. If data loss is unacceptable, use a permanent table. - -2. Truncate the original table **customer_t**. - - ```sql - TRUNCATE customer_t; - ``` - -3. Run the **INSERT INTO…SELECT** statement to batch insert data of the temporary table into the original table. - - ```sql - INSERT INTO customer_t (SELECT * FROM customer_t_temp); - ``` - -4. Delete the temporary table **customer_t_temp**. - - ```sql - DROP TABLE customer_t_temp; - ``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/8-ANALYZE-table.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/8-ANALYZE-table.md deleted file mode 100644 index c0f9a5e4..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/8-ANALYZE-table.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: ANALYZE Table -summary: ANALYZE Table -author: Guo Huan -date: 2021-03-04 ---- - -# ANALYZE Table - -The execution plan generator needs to use table statistics to generate the most effective query execution plan to improve query performance. After data is imported, you are advised to run the **ANALYZE** statement to update table statistics. The statistics are stored in the system catalog **PG_STATISTIC**. - -## ANALYZE Table - -**ANALYZE** supports row-store and column-store tables. **ANALYZE** can also collect statistics about specified columns of a local table. For details on **ANALYZE**, see [ANALYZE | ANALYSE](../../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md). - -Update table statistics. - -Do **ANALYZE** to the **product_info** table. - -```sql -ANALYZE product_info; -``` - -```sql -ANALYZE -``` - -## autoanalyze - -MogDB provides the GUC parameter autovacuum to specify whether to enable the autovacuum function of the database. - -If **autovacuum** is set to **on**, the system will start the autovacuum thread to automatically analyze tables when the data volume in the table reaches the threshold. This is the autoanalyze function. - -- For an empty table, when the number of rows inserted to it is greater than 50, **ANALYZE** is automatically triggered. -- For a table containing data, the threshold is 50 + 10% x **reltuples**, where **reltuples** indicates the total number of rows in the table. - -The autovacuum function also depends on the following two GUC parameters in addition to **autovacuum**: - -- track_counts: This parameter must be set to **on** to enable statistics collection about the database. -- autovacuum_max_workers: This parameter must be set to a value greater than **0** to specify the maximum number of concurrent autovacuum threads. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - The autoanalyze function supports the default sampling mode but not percentage sampling. -> - The autoanalyze function does not collect multi-column statistics, which only supports percentage sampling. -> - The autoanalyze function supports row-store and column-store tables and does not support foreign tables, temporary tables, unlogged tables, and TOAST tables. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/9-doing-VACUUM-to-a-table.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/9-doing-VACUUM-to-a-table.md deleted file mode 100644 index 06de9a0d..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/9-doing-VACUUM-to-a-table.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Doing VACUUM to a Table -summary: Doing VACUUM to a Table -author: Guo Huan -date: 2021-03-04 ---- - -# Doing VACUUM to a Table - -If a large number of rows were updated or deleted during import, run **VACUUM FULL** before **ANALYZE**. A large number of UPDATE and DELETE operations generate huge disk page fragments, which reduces query efficiency. **VACUUM FULL** can restore disk page fragments and return them to the OS. - -Run the **VACUUM FULL** statement. - -Do **VACUUM FULL** to the **product_info** table. - -```sql -VACUUM FULL product_info -``` - -```sql -VACUUM -``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/importing-data.md b/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/importing-data.md deleted file mode 100644 index 49a11c00..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/importing-and-exporting-data/importing-data/importing-data.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Importing Data -summary: Importing Data -author: Guo Huan -date: 2023-05-22 ---- - -# Importing Data - -+ **[Import Modes](1-import-modes.md)** -+ **[Running the INSERT Statement to Insert Data](2-running-the-INSERT-statement-to-insert-data.md)** -+ **[Running the COPY FROM STDIN Statement to Import Data](3-running-the-COPY-FROM-STDIN-statement-to-import-data.md)** -+ **[Using a gsql Meta-Command to Import Data](4-using-a-gsql-meta-command-to-import-data.md)** -+ **[Using gs_restore to Import Data](5-using-gs_restore-to-import-data.md)** -+ **[Updating Data in a Table](6-updating-data-in-a-table.md)** -+ **[Deep Copy](7-deep-copy.md)** -+ **[ANALYZE Table](8-ANALYZE-table.md)** -+ **[Doing VACUUM to a Table](9-doing-VACUUM-to-a-table.md)** -+ **[Managing Concurrent Write Operations](10-managing-concurrent-write-operations.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/localization/character-set-support.md b/product/en/docs-mogdb/v5.2/administrator-guide/localization/character-set-support.md deleted file mode 100644 index 983d3ad5..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/localization/character-set-support.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -title: Character Set Support -summary: Character Set Support -author: Guo Huan -date: 2022-07-26 ---- - -# Character Set Support - -The character set support in MogDB allows you to store text in a variety of character sets (also called encodings), including single-byte character sets such as the ISO 8859 series and multiple-byte character sets such as EUC (Extended Unix Code), UTF-8, and Mule internal code. All supported character sets can be used transparently by clients, but a few are not supported for use within the server (that is, as a server-side encoding). The default character set is selected while initializing your MogDB database cluster using `gs_initdb`. It can be overridden when you create a database, so you can have multiple databases each with a different character set. - -An important restriction, however, is that each database's character set must be compatible with the database's `LC_CTYPE` (character classification) and `LC_COLLATE` (string sort order) locale settings. For `C` or `POSIX` locale, any character set is allowed, but for other locales there is only one character set that will work correctly. (On Windows, however, UTF-8 encoding can be used with any locale.) - -## Supported Character Sets - -Table1shows the character sets available for use in MogDB. - -**Table1 MogDB Character Sets** - -| Name | Description | Language | Server? | Bytes/Char | Aliases | -| ---------------- | --------------------------------- | ------------------------------ | ------- | ---------- | --------------------------------------------- | -| `BIG5` | Big Five | Traditional Chinese | No | 1-2 | `WIN950`, `Windows950` | -| `EUC_CN` | Extended UNIX Code-CN | Simplified Chinese | Yes | 1-3 | | -| `EUC_JP` | Extended UNIX Code-JP | Japanese | Yes | 1-3 | | -| `EUC_JIS_2004` | Extended UNIX Code-JP, JIS X 0213 | Japanese | Yes | 1-3 | | -| `EUC_KR` | Extended UNIX Code-KR | Korean | Yes | 1-3 | | -| `EUC_TW` | Extended UNIX Code-TW | Traditional Chinese, Taiwanese | Yes | 1-3 | | -| `GB18030` | National Standard | Chinese | Yes | 1-4 | | -| `GBK` | Extended National Standard | Simplified Chinese | Yes | 1-2 | `WIN936`, `Windows936` | -| `ISO_8859_5` | ISO 8859-5, ECMA 113 | Latin/Cyrillic | Yes | 1 | | -| `ISO_8859_6` | ISO 8859-6, ECMA 114 | Latin/Arabic | Yes | 1 | | -| `ISO_8859_7` | ISO 8859-7, ECMA 118 | Latin/Greek | Yes | 1 | | -| `ISO_8859_8` | ISO 8859-8, ECMA 121 | Latin/Hebrew | Yes | 1 | | -| `JOHAB` | JOHAB | Korean (Hangul) | No | 1-3 | | -| `KOI8R` | KOI8-R | Cyrillic (Russian) | Yes | 1 | `KOI8` | -| `KOI8U` | KOI8-U | Cyrillic (Ukrainian) | Yes | 1 | | -| `LATIN1` | ISO 8859-1, ECMA 94 | Western European | Yes | 1 | `ISO88591` | -| `LATIN2` | ISO 8859-2, ECMA 94 | Central European | Yes | 1 | `ISO88592` | -| `LATIN3` | ISO 8859-3, ECMA 94 | South European | Yes | 1 | `ISO88593` | -| `LATIN4` | ISO 8859-4, ECMA 94 | North European | Yes | 1 | `ISO88594` | -| `LATIN5` | ISO 8859-9, ECMA 128 | Turkish | Yes | 1 | `ISO88599` | -| `LATIN6` | ISO 8859-10, ECMA 144 | Nordic | Yes | 1 | `ISO885910` | -| `LATIN7` | ISO 8859-13 | Baltic | Yes | 1 | `ISO885913` | -| `LATIN8` | ISO 8859-14 | Celtic | Yes | 1 | `ISO885914` | -| `LATIN9` | ISO 8859-15 | LATIN1 with Euro and accents | Yes | 1 | `ISO885915` | -| `LATIN10` | ISO 8859-16, ASRO SR 14111 | Romanian | Yes | 1 | `ISO885916` | -| `MULE_INTERNAL` | Mule internal code | Multilingual Emacs | Yes | 1-4 | | -| `SJIS` | Shift JIS | Japanese | No | 1-2 | `Mskanji`, `ShiftJIS`, `WIN932`, `Windows932` | -| `SHIFT_JIS_2004` | Shift JIS, JIS X 0213 | Japanese | No | 1-2 | | -| `SQL_ASCII` | unspecified (see text) | any | Yes | 1 | | -| `UHC` | Unified Hangul Code | Korean | No | 1-2 | `WIN949`, `Windows949` | -| `UTF8` | Unicode, 8-bit | all | Yes | 1-4 | `Unicode` | -| `WIN866` | Windows CP866 | Cyrillic | Yes | 1 | `ALT` | -| `WIN874` | Windows CP874 | Thai | Yes | 1 | | -| `WIN1250` | Windows CP1250 | Central European | Yes | 1 | | -| `WIN1251` | Windows CP1251 | Cyrillic | Yes | 1 | `WIN` | -| `WIN1252` | Windows CP1252 | Western European | Yes | 1 | | -| `WIN1253` | Windows CP1253 | Greek | Yes | 1 | | -| `WIN1254` | Windows CP1254 | Turkish | Yes | 1 | | -| `WIN1255` | Windows CP1255 | Hebrew | Yes | 1 | | -| `WIN1256` | Windows CP1256 | Arabic | Yes | 1 | | -| `WIN1257` | Windows CP1257 | Baltic | Yes | 1 | | -| `WIN1258` | Windows CP1258 | Vietnamese | Yes | 1 | `ABC`, `TCVN`, `TCVN5712`, `VSCII` | - -Not all client APIs support all the listed character sets. The `SQL_ASCII` setting behaves considerably differently from the other settings. When the server character set is `SQL_ASCII`, the server interprets byte values 0-127 according to the ASCII standard, while byte values 128-255 are taken as uninterpreted characters. No encoding conversion will be done when the setting is `SQL_ASCII`. Thus, this setting is not so much a declaration that a specific encoding is in use, as a declaration of ignorance about the encoding. In most cases, if you are working with any non-ASCII data, it is unwise to use the `SQL_ASCII` setting because MogDB will be unable to help you by converting or validating non-ASCII characters. - -## Setting the Character Set - -`gs_initdb` defines the default character set (encoding) for a MogDB cluster. See [gs_initdb](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_initdb.md) for details. - -You can specify a non-default encoding at database creation time use this SQL command, provided that the encoding is compatible with the selected locale: - -```sql -CREATE DATABASE chinese WITH ENCODING 'UTF8' LC_COLLATE='en_US.UTF8' LC_CTYPE='en_US.UTF8' TEMPLATE=template0; -``` - -Notice that the above commands specify copying the `template0` database. When copying any other database, the encoding and locale settings cannot be changed from those of the source database, because that might result in corrupt data. For more information see [CREATE DATABASE](../../reference-guide/sql-syntax/CREATE-DATABASE.md). - -The encoding for a database is stored in the system catalog `pg_database`. You can see it by using the `gsql` `-l` option or the `\l` command. - -```bash -$ gsql -l - List of databases - Name | Owner | Encoding | Collate | Ctype | Access privileges ------------+-------+----------+-------------+-------------+------------------- - chinese | omm | UTF8 | en_US.UTF8 | en_US.UTF8 | - mogdb | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | - mogila | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | - postgres | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | - template0 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + - | | | | | omm=CTc/omm - template1 | omm | UTF8 | en_US.UTF-8 | en_US.UTF-8 | =c/omm + - | | | | | omm=CTc/omm -(6 rows) -``` - -> **Important:** On most modern operating systems, MogDB can determine which character set is implied by the `LC_CTYPE` setting, and it will enforce that only the matching database encoding is used. On older systems it is your responsibility to ensure that you use the encoding expected by the locale you have selected. A mistake in this area is likely to lead to strange behavior of locale-dependent operations such as sorting. -> -> MogDB will allow superusers to create databases with `SQL_ASCII` encoding even when `LC_CTYPE` is not `C` or `POSIX`. As noted above, `SQL_ASCII` does not enforce that the data stored in the database has any particular encoding, and so this choice poses risks of locale-dependent misbehavior. Using this combination of settings is deprecated and may someday be forbidden altogether. - -## Automatic Character Set Conversion Between Server and Client - -MogDB supports automatic character set conversion between server and client for certain character set combinations. The conversion information is stored in the `pg_conversion` system catalog. MogDB comes with some predefined conversions, as shown in Table 2. - -**Table2 Client/Server Character Set Conversions** - -| Server Character Set | Available Client Character Sets | -| -------------------- | ------------------------------------------------------------ | -| `BIG5` | not supported as a server encoding | -| `EUC_CN` | EUC_CN, `MULE_INTERNAL`, `UTF8` | -| `EUC_JP` | EUC_JP, `MULE_INTERNAL`, `SJIS`, `UTF8` | -| `EUC_JIS_2004` | EUC_JIS_2004, `SHIFT_JIS_2004`, `UTF8` | -| `EUC_KR` | EUC_KR, `MULE_INTERNAL`, `UTF8` | -| `EUC_TW` | EUC_TW, `BIG5`, `MULE_INTERNAL`, `UTF8` | -| `GB18030` | not supported as a server encoding | -| `GBK` | not supported as a server encoding | -| `ISO_8859_5` | ISO_8859_5, `KOI8R`, `MULE_INTERNAL`, `UTF8`, `WIN866`, `WIN1251` | -| `ISO_8859_6` | ISO_8859_6, `UTF8` | -| `ISO_8859_7` | ISO_8859_7, `UTF8` | -| `ISO_8859_8` | ISO_8859_8, `UTF8` | -| `JOHAB` | not supported as a server encoding | -| `KOI8R` | KOI8R, `ISO_8859_5`, `MULE_INTERNAL`, `UTF8`, `WIN866`, `WIN1251` | -| `KOI8U` | KOI8U, `UTF8` | -| `LATIN1` | LATIN1, `MULE_INTERNAL`, `UTF8` | -| `LATIN2` | LATIN2, `MULE_INTERNAL`, `UTF8`, `WIN1250` | -| `LATIN3` | LATIN3, `MULE_INTERNAL`, `UTF8` | -| `LATIN4` | LATIN4, `MULE_INTERNAL`, `UTF8` | -| `LATIN5` | LATIN5, `UTF8` | -| `LATIN6` | LATIN6, `UTF8` | -| `LATIN7` | LATIN7, `UTF8` | -| `LATIN8` | LATIN8, `UTF8` | -| `LATIN9` | LATIN9, `UTF8` | -| `LATIN10` | LATIN10, `UTF8` | -| `MULE_INTERNAL` | MULE_INTERNAL, `BIG5`, `EUC_CN`, `EUC_JP`, `EUC_KR`, `EUC_TW`, `ISO_8859_5`, `KOI8R`, `LATIN1` to `LATIN4`, `SJIS`, `WIN866`, `WIN1250`, `WIN1251` | -| `SJIS` | not supported as a server encoding | -| `SHIFT_JIS_2004` | not supported as a server encoding | -| `SQL_ASCII` | any (no conversion will be performed) | -| `UHC` | not supported as a server encoding | -| `UTF8` | all supported encodings | -| `WIN866` | WIN866, `ISO_8859_5`, `KOI8R`, `MULE_INTERNAL`, `UTF8`, `WIN1251` | -| `WIN874` | WIN874, `UTF8` | -| `WIN1250` | WIN1250, `LATIN2`, `MULE_INTERNAL`, `UTF8` | -| `WIN1251` | WIN1251, `ISO_8859_5`, `KOI8R`, `MULE_INTERNAL`, `UTF8`, `WIN866` | -| `WIN1252` | WIN1252, `UTF8` | -| `WIN1253` | WIN1253, `UTF8` | -| `WIN1254` | WIN1254, `UTF8` | -| `WIN1255` | WIN1255, `UTF8` | -| `WIN1256` | WIN1256, `UTF8` | - -To enable automatic character set conversion, you have to tell MogDB the character set (encoding) you would like to use in the client. There are several ways to accomplish this: - -- Using the `\encoding` command in gsql. `\encoding` allows you to change client encoding on the fly. For example, to change the encoding to `SJIS`, type: - - ```sql - \encoding SJIS - ``` - -- [libpq](../../developer-guide/dev/4-development-based-on-libpq/2-libpq/libpq-api-reference.md) has functions to control the client encoding. - -- Using `SET client_encoding TO`. Setting the client encoding can be done with this SQL command: - - ```bash - SET CLIENT_ENCODING TO 'value'; - ``` - - Also you can use the standard SQL syntax `SET NAMES` for this purpose: - - ```bash - SET NAMES 'value'; - ``` - - To query the current client encoding: - - ```bash - SHOW client_encoding; - ``` - - To return to the default encoding: - - ```bash - RESET client_encoding; - ``` - -- Using `PGCLIENTENCODING`. If the environment variable `PGCLIENTENCODING` is defined in the client's environment, that client encoding is automatically selected when a connection to the server is made. (This can subsequently be overridden using any of the other methods mentioned above.) - -- Using the configuration variable [client_encoding](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md#client_encoding). If the `client_encoding` variable is set, that client encoding is automatically selected when a connection to the server is made. (This can subsequently be overridden using any of the other methods mentioned above.) - -If the conversion of a particular character is not possible — suppose you chose `EUC_JP` for the server and `LATIN1` for the client, and some Japanese characters are returned that do not have a representation in `LATIN1` — an error is reported. - -If the client character set is defined as `SQL_ASCII`, encoding conversion is disabled, regardless of the server's character set. Just as for the server, use of `SQL_ASCII` is unwise unless you are working with all-ASCII data. - -## Further Reading - -These are good sources to start learning about various kinds of encoding systems. - -- CJKV Information Processing: Chinese, Japanese, Korean & Vietnamese Computing - - Contains detailed explanations of `EUC_JP`, `EUC_CN`, `EUC_KR`, `EUC_TW`. - -- [http://www.unicode.org/](http://www.unicode.org/) - - The web site of the Unicode Consortium. - -- RFC 3629 - - UTF-8 (8-bit UCS/Unicode Transformation Format) is defined here. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/localization/collation-support.md b/product/en/docs-mogdb/v5.2/administrator-guide/localization/collation-support.md deleted file mode 100644 index 0c230349..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/localization/collation-support.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Collation Support -summary: Collation Support -author: Zhang Cuiping -date: 2022-07-26 ---- - -# Collation Support - -The collation feature allows specifying the sort order and character classification behavior of data per-column, or even per-operation. This alleviates the restriction that the `LC_COLLATE` and `LC_CTYPE` settings of a database cannot be changed after its creation. - -## Concepts - -Conceptually, every expression of a collatable data type has a collation. (The built-in collatable data types are `text`, `varchar`, and `char`. User-defined base types can also be marked collatable, and of course a domain over a collatable data type is collatable.) If the expression is a column reference, the collation of the expression is the defined collation of the column. If the expression is a constant, the collation is the default collation of the data type of the constant. The collation of a more complex expression is derived from the collations of its inputs, as described below. - -The collation of an expression can be the “default” collation, which means the locale settings defined for the database. It is also possible for an expression's collation to be indeterminate. In such cases, ordering operations and other operations that need to know the collation will fail. - -When the database system has to perform an ordering or a character classification, it uses the collation of the input expression. This happens, for example, with `ORDER BY` clauses and function or operator calls such as `<`. The collation to apply for an `ORDER BY` clause is simply the collation of the sort key. The collation to apply for a function or operator call is derived from the arguments, as described below. In addition to comparison operators, collations are taken into account by functions that convert between lower and upper case letters, such as `lower`, `upper`, and `initcap`; by pattern matching operators; and by `to_char` and related functions. - -For a function or operator call, the collation that is derived by examining the argument collations is used at run time for performing the specified operation. If the result of the function or operator call is of a collatable data type, the collation is also used at parse time as the defined collation of the function or operator expression, in case there is a surrounding expression that requires knowledge of its collation. - -The *collation derivation* of an expression can be implicit or explicit. This distinction affects how collations are combined when multiple different collations appear in an expression. An explicit collation derivation occurs when a `COLLATE` clause is used; all other collation derivations are implicit. When multiple collations need to be combined, for example in a function call, the following rules are used: - -1. If any input expression has an explicit collation derivation, then all explicitly derived collations among the input expressions must be the same, otherwise an error is raised. If any explicitly derived collation is present, that is the result of the collation combination. -2. Otherwise, all input expressions must have the same implicit collation derivation or the default collation. If any non-default collation is present, that is the result of the collation combination. Otherwise, the result is the default collation. -3. If there are conflicting non-default implicit collations among the input expressions, then the combination is deemed to have indeterminate collation. This is not an error condition unless the particular function being invoked requires knowledge of the collation it should apply. If it does, an error will be raised at run-time. - -For example, consider this table definition: - -```sql -CREATE TABLE test1 ( - a text COLLATE "de_DE", - b text COLLATE "es_ES", - ... -); -``` - -Then in - -```sql -SELECT a < 'foo' FROM test1; -``` - -the `<` comparison is performed according to `de_DE` rules, because the expression combines an implicitly derived collation with the default collation. But in - -```sql -SELECT a < ('foo' COLLATE "fr_FR") FROM test1; -``` - -the comparison is performed using `fr_FR` rules, because the explicit collation derivation overrides the implicit one. Furthermore, given - -```sql -SELECT a < b FROM test1; -``` - -the parser cannot determine which collation to apply, since the `a` and `b` columns have conflicting implicit collations. Since the `<` operator does need to know which collation to use, this will result in an error. The error can be resolved by attaching an explicit collation specifier to either input expression, thus: - -```sql -SELECT a < b COLLATE "de_DE" FROM test1; -``` - -or equivalently - -```sql -SELECT a COLLATE "de_DE" < b FROM test1; -``` - -On the other hand, the structurally similar case - -```sql -SELECT a || b FROM test1; -``` - -does not result in an error, because the `||` operator does not care about collations: its result is the same regardless of the collation. - -The collation assigned to a function or operator's combined input expressions is also considered to apply to the function or operator's result, if the function or operator delivers a result of a collatable data type. So, in - -```sql -SELECT * FROM test1 ORDER BY a || 'foo'; -``` - -the ordering will be done according to `de_DE` rules. But this query: - -```sql -SELECT * FROM test1 ORDER BY a || b; -``` - -results in an error, because even though the `||` operator doesn't need to know a collation, the `ORDER BY` clause does. As before, the conflict can be resolved with an explicit collation specifier: - -```sql -SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; -``` - -## Managing Collations - -A collation is an SQL schema object that maps an SQL name to locales provided by libraries installed in the operating system. A collation definition has a *provider* that specifies which library supplies the locale data. One standard provider name is `libc`, which uses the locales provided by the operating system C library. These are the locales that most tools provided by the operating system use. Another provider is `icu`, which uses the external ICU library. ICU locales can only be used if support for ICU was configured when MogDB was built. - -A collation object provided by `libc` maps to a combination of `LC_COLLATE` and `LC_CTYPE` settings, as accepted by the `setlocale()` system library call. (As the name would suggest, the main purpose of a collation is to set `LC_COLLATE`, which controls the sort order. But it is rarely necessary in practice to have an `LC_CTYPE` setting that is different from `LC_COLLATE`, so it is more convenient to collect these under one concept than to create another infrastructure for setting `LC_CTYPE` per expression.) Also, a `libc` collation is tied to a character set encoding (see [Character Set Support](./character-set-support.md)). The same collation name may exist for different encodings. - -A collation object provided by `icu` maps to a named collator provided by the ICU library. ICU does not support separate “collate” and “ctype” settings, so they are always the same. Also, ICU collations are independent of the encoding, so there is always only one ICU collation of a given name in a database. - -### Standard Collations - -On all platforms, the collations named `default`, `C`, and `POSIX` are available. Additional collations may be available depending on operating system support. The `default` collation selects the `LC_COLLATE` and `LC_CTYPE` values specified at database creation time. The `C` and `POSIX` collations both specify “traditional C” behavior, in which only the ASCII letters “`A`” through “`Z`” are treated as letters, and sorting is done strictly by character code byte values. - -Additionally, the SQL standard collation name `ucs_basic` is available for encoding `UTF8`. It is equivalent to `C` and sorts by Unicode code point. - -### Predefined Collations - -If the operating system provides support for using multiple locales within a single program (`newlocale` and related functions), or if support for ICU is configured, then when a database cluster is initialized, `gs_initdb` populates the system catalog `pg_collation` with collations based on all the locales it finds in the operating system at the time. - -To inspect the currently available locales, use the query `SELECT * FROM pg_collation`, or the command `\dOS+` in gsql. - -#### libc Collations - -For example, the operating system might provide a locale named `de_DE.utf8`. `gs_initdb` would then create a collation named `de_DE.utf8` for encoding `UTF8` that has both `LC_COLLATE` and `LC_CTYPE` set to `de_DE.utf8`. It will also create a collation with the `.utf8` tag stripped off the name. So you could also use the collation under the name `de_DE`, which is less cumbersome to write and makes the name less encoding-dependent. Note that, nevertheless, the initial set of collation names is platform-dependent. - -The default set of collations provided by `libc` map directly to the locales installed in the operating system, which can be listed using the command `locale -a`. - -Within any particular database, only collations that use that database's encoding are of interest. Other entries in `pg_collation` are ignored. Thus, a stripped collation name such as `de_DE` can be considered unique within a given database even though it would not be unique globally. Use of the stripped collation names is recommended, since it will make one fewer thing you need to change if you decide to change to another database encoding. Note however that the `default`, `C`, and `POSIX` collations can be used regardless of the database encoding. - -MogDB considers distinct collation objects to be incompatible even when they have identical properties. Thus for example, - -```sql -SELECT a COLLATE "C" < b COLLATE "POSIX" FROM test1; -``` - -will draw an error even though the `C` and `POSIX` collations have identical behaviors. Mixing stripped and non-stripped collation names is therefore not recommended. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/localization/locale-support.md b/product/en/docs-mogdb/v5.2/administrator-guide/localization/locale-support.md deleted file mode 100644 index 7f7cdde0..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/localization/locale-support.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Locale Support -summary: Locale Support -author: Zhang Cuiping -date: 2022-07-26 ---- - -# Locale Support - -Locale support refers to an application respecting cultural preferences regarding alphabets, sorting, number formatting, etc. MogDB uses the standard ISO C and POSIX locale facilities provided by the server operating system. For additional information refer to the documentation of your system. - -## Overview - -Locale support is automatically initialized when a database cluster is created using `gs_initdb`. `gs_initdb` will initialize the database cluster with the locale setting of its execution environment by default, so if your system is already set to use the locale that you want in your database cluster then there is nothing else you need to do. If you want to use a different locale (or you are not sure which locale your system is set to), you can instruct `gs_initdb` exactly which locale to use by specifying the `--locale` option. For example: - -``` -gs_initdb -D /opt/mogdb/data -w "XXXXXXXX" --nodename='data1' --locale=en_US -``` - -This example for Unix systems sets the locale to Swedish (`sv`) as spoken in Sweden (`SE`). Other possibilities might include `en_US` (U.S. English) and `fr_CA` (French Canadian). If more than one character set can be used for a locale then the specifications can take the form `language_territory.codeset`. For example, `fr_BE.UTF-8` represents the French language (fr) as spoken in Belgium (BE), with a UTF-8 character set encoding. - -What locales are available on your system under what names depends on what was provided by the operating system vendor and what was installed. On most Unix systems, the command `locale -a` will provide a list of available locales. Windows uses more verbose locale names, such as `German_Germany` or `Swedish_Sweden.1252`, but the principles are the same. - -Occasionally it is useful to mix rules from several locales, e.g., use English collation rules but Spanish messages. To support that, a set of locale subcategories exist that control only certain aspects of the localization rules: - -| `LC_COLLATE` | String Sort Order | -| ------------- | ------------------------------------------------------------ | -| `LC_CTYPE` | Character classification (What is a letter? Its upper-case equivalent?) | -| `LC_MESSAGES` | Language of messages | -| `LC_MONETARY` | Formatting of currency amounts | -| `LC_NUMERIC` | Formatting of numbers | -| `LC_TIME` | Formatting of dates and times | - -The category names translate into names of `gs_initdb` options to override the locale choice for a specific category. For instance, to set the locale to French Canadian, but use U.S. rules for formatting currency, use `gs_initdb -D /opt/mogdb/data -w "XXXXXXXX" --nodename='data1' --locale=fr_CA --lc-monetary=en_US`. - -If you want the system to behave as if it had no locale support, use the special locale name `C`, or equivalently `POSIX`. - -Some locale categories must have their values fixed when the database is created. You can use different settings for different databases, but once a database is created, you cannot change them for that database anymore. `LC_COLLATE` and `LC_CTYPE` are these categories. They affect the sort order of indexes, so they must be kept fixed, or indexes on text columns would become corrupt. (But you can alleviate this restriction using collations, as discussed in [Collation Support](./collation-support.md)) The default values for these categories are determined when `gs_initdb` is run, and those values are used when new databases are created, unless specified otherwise in the `CREATE DATABASE` command. - -The other locale categories can be changed whenever desired by setting the server configuration parameters that have the same name as the locale categories (see [Zone and Formatting](../../reference-guide/guc-parameters/default-settings-of-client-connection/zone-and-formatting.md) for details). The values that are chosen by `gs_initdb` are actually only written into the configuration file `postgresql.conf` to serve as defaults when the server is started. If you remove these assignments from `postgresql.conf` then the server will inherit the settings from its execution environment. - -Note that the locale behavior of the server is determined by the environment variables seen by the server, not by the environment of any client. Therefore, be careful to configure the correct locale settings before starting the server. A consequence of this is that if client and server are set up in different locales, messages might appear in different languages depending on where they originated. - -> **Note**: When we speak of inheriting the locale from the execution environment, this means the following on most operating systems: For a given locale category, say the collation, the following environment variables are consulted in this order until one is found to be set: `LC_ALL`, `LC_COLLATE` (or the variable corresponding to the respective category), `LANG`. If none of these environment variables are set then the locale defaults to `C`. -> -> Some message localization libraries also look at the environment variable `LANGUAGE` which overrides all other locale settings for the purpose of setting the language of messages. If in doubt, please refer to the documentation of your operating system, in particular the documentation about gettext. - -## Behavior - -The locale settings influence the following SQL features: - -- Sort order in queries using `ORDER BY` or the standard comparison operators on textual data -- The `upper`, `lower`, and `initcap` functions -- Pattern matching operators (`LIKE`, `SIMILAR TO`, and POSIX-style regular expressions); locales affect both case insensitive matching and the classification of characters by character-class regular expressions -- The `to_char` family of functions -- The ability to use indexes with `LIKE` clauses - -The drawback of using locales other than `C` or `POSIX` in MogDB is its performance impact. It slows character handling and prevents ordinary indexes from being used by `LIKE`. For this reason use locales only if you actually need them. - -As a workaround to allow MogDB to use indexes with `LIKE` clauses under a non-C locale, several custom operator classes exist. These allow the creation of an index that performs a strict character-by-character comparison, ignoring locale comparison rules. Another approach is to create indexes using the `C` collation, as discussed in [Collation Support](./collation-support.md). - -## Problems - -If locale support doesn't work according to the explanation above, check that the locale support in your operating system is correctly configured. To check what locales are installed on your system, you can use the command `locale -a` if your operating system provides it. - -MogDB `LC_COLLATE` and `LC_CTYPE` settings are determined when a database is created, and cannot be changed except by creating a new database. Other locale settings including `LC_MESSAGES` and `LC_MONETARY` are initially determined by the environment the server is started in, but can be changed on-the-fly. You can check the active locale settings using the `SHOW` command. - -Client applications that handle server-side errors by parsing the text of the error message will obviously have problems when the server's messages are in a different language. Authors of such applications are advised to make use of the error code scheme instead. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/localization/localization.md b/product/en/docs-mogdb/v5.2/administrator-guide/localization/localization.md deleted file mode 100644 index 32ea4dd3..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/localization/localization.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Localization -summary: Localization -author: Guo Huan -date: 2023-05-22 ---- - -# Localization - -+ **[Locale Support](locale-support.md)** -+ **[Collation Support](collation-support.md)** -+ **[Character Set Support](character-set-support.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/1-mot-introduction.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/1-mot-introduction.md deleted file mode 100644 index 6eba1aac..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/1-mot-introduction.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: MOT Introduction -summary: MOT Introduction -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Introduction - -MogDB introduces Memory-Optimized Tables (MOT) storage engine - a transactional row-based store (rowstore), that is optimized for many-core and large memory servers. MOT is a state-of-the-art production-grade feature (Beta release) of the MogDB database that provides greater performance for transactional workloads. MOT is fully ACID compliant and includes strict durability and high availability support. Businesses can leverage MOT for mission-critical, performance-sensitive Online Transaction Processing (OLTP) applications in order to achieve high performance, high throughput, low and predictable latency and high utilization of many-core servers. MOT is especially suited to leverage and scale-up when run on modern servers with multiple sockets and many-core processors, such as Huawei Taishan servers with ARM/Kunpeng processors and x86-based Dell or similar servers. - -**Figure 1** Memory-Optimized Storage Engine Within MogDB - -![memory-optimized-storage-engine-within-opengauss](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-introduction-2.png) - -[Figure 1](#memoryoptimized) presents the Memory-Optimized Storage Engine component (in green) of MogDB database and is responsible for managing MOT and transactions. - -MOT tables are created side-by-side regular disk-based tables. MOT's effective design enables almost full SQL coverage and support for a full database feature-set, such as stored procedures and user-defined functions (excluding the features listed in **MOT SQL Coverage and Limitations** section). - -With data and indexes stored totally in-memory, a Non-Uniform Memory Access (NUMA)-aware design, algorithms that eliminate lock and latch contention and query native compilation, MOT provides faster data access and more efficient transaction execution. - -MOT's effective almost lock-free design and highly tuned implementation enable exceptional near-linear throughput scale-up on many-core servers - probably the best in the industry. - -Memory-Optimized Tables are fully ACID compliant, as follows: - -- **Atomicity -** An atomic transaction is an indivisible series of database operations that either all occur or none occur after a transaction has been completed (committed or aborted, respectively). -- **Consistency -** Every transaction leaves the database in a consistent (data integrity) state. -- **Isolation -** Transactions cannot interfere with each other. MOT supports repeatable-reads and read-committed isolation levels. In the next release, MOT will also support serializable isolation. See the **MOT Isolation Levels** section for more information. -- **Durability -** The effects of successfully completed (committed) transactions must persist despite crashes and failures. MOT is fully integrated with the WAL-based logging of MogDB. Both synchronous and asynchronous logging options are supported. MOT also uniquely supports synchronous + group commit with NUMA-awareness optimization. See the **MOT Durability Concepts** section for more information. - -The MOT Engine was published in the VLDB 2020 (an International Conference on ‘Very Large Data Bases" or VLDB): - -**Industrial-Strength OLTP Using Main Memory and Many Cores**, VLDB 2020 vol. 13 - [Paper](http://www.vldb.org/pvldb/vol13/p3099-avni.pdf), [Video on youtube](https://www.modb.pro/video/6676?slink), [Video on bilibili](https://www.bilibili.com/video/BV1MA411n7ef?p=97). diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/2-mot-features-and-benefits.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/2-mot-features-and-benefits.md deleted file mode 100644 index 1713c463..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/2-mot-features-and-benefits.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: MOT Features and Benefits -summary: MOT Features and Benefits -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Features and Benefits - -MOT provide users with significant benefits in performance (query and transaction latency), scalability (throughput and concurrency) and in some cases cost (high resource utilization) - - -- **Low Latency -** Provides fast query and transaction response time -- **High Throughput -** Supports spikes and constantly high user concurrency -- **High Resource Utilization -** Utilizes hardware to its full extent - -Using MOT, applications are able to achieve more 2.5 to 4 times (2.5x - 4x) higher throughput. For example, in our TPC-C benchmarks (interactive transactions and synchronous logging) performed both on Huawei Taishan Kunpeng-based (ARM) servers and on Dell x86 Intel Xeon-based servers, MOT provides throughput gains that vary from 2.5x on a 2-socket server to 3.7x on a 4-socket server, reaching 4.8M (million) tpmC on an ARM 4-socket 256-cores server. - -The lower latency provided by MOT reduces transaction speed by 3x to 5.5x, as observed in TPC-C benchmarks. - -Additionally, MOT enables extremely high utilization of server resources when running under high load and contention, which is a well-known problem for all leading industry databases. Using MOT, utilization reaches 99% on 4-socket server, compared with much lower utilization observed when testing other industry leading databases. - -This abilities are especially evident and important on modern many-core servers. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/3-mot-key-technologies.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/3-mot-key-technologies.md deleted file mode 100644 index 3bf4108c..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/3-mot-key-technologies.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: MOT Key Technologies -summary: MOT Key Technologies -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Key Technologies - -The following key MOT technologies enable its benefits: - -- **Memory Optimized Data Structures -** With the objective of achieving optimal high concurrent throughput and predictable low latency, all data and indexes are in memory, no intermediate page buffers are used and minimal, short-duration locks are used. Data structures and all algorithms have been specialized and optimized for in-memory design. -- **Lock-free Transaction Management -** The MOT storage engine applies an optimistic approach to achieving data integrity versus concurrency and high throughput. During a transaction, an MOT table does not place locks on any version of the data rows being updated, thus significantly reducing contention in some high-volume systems. Optimistic Concurrency Control (OCC) statements within a transaction are implemented without locks, and all data modifications are performed in a part of the memory that is dedicated to private transactions (also called *Private Transaction Memory*). This means that during a transaction, the relevant data is updated in the Private Transaction Memory, thus enabling lock-less reads and writes; and a very short duration lock is only placed at the Commit phase. For more details, see the **MOT Concurrency Control Mechanism** section. -- **Lock-free Index -** Because database data and indexes stored totally in-memory, having an efficient index data structure and algorithm is essential. The MOT Index is based on state-of-the-art Masstree a fast and scalable Key Value (KV) store for multi-core systems, implemented as a Trie of B+ trees. In this way, excellent performance is achieved on many-core servers and during high concurrent workloads. This index applies various advanced techniques in order to optimize performance, such as an optimistic lock approach, cache-line awareness and memory prefetching. -- **NUMA-aware Memory Management -** MOT memory access is designed with Non-Uniform Memory Access (NUMA) awareness. NUMA-aware algorithms enhance the performance of a data layout in memory so that threads access the memory that is physically attached to the core on which the thread is running. This is handled by the memory controller without requiring an extra hop by using an interconnect, such as Intel QPI. MOT's smart memory control module with pre-allocated memory pools for various memory objects improves performance, reduces locks and ensures stability. Allocation of a transaction's memory objects is always NUMA-local. Deallocated objects are returned to the pool. Minimal usage of OS malloc during transactions circumvents unnecessary locks. -- **Efficient Durability - Logging and Checkpoint -** Achieving disk persistence (also known as *durability*) is a crucial requirement for being ACID compliant (the **D** stands for Durability). All current disks (including the SSD and NVMe) are significantly slower than memory and thus are always the bottleneck of a memory-based database. As an in-memory storage engine with full durability support, MOT's durability design must implement a wide variety of algorithmic optimizations in order to ensure durability, while still achieving the speed and throughput objectives for which it was designed. These optimizations include - - - Parallel logging, which is also available in all MogDB disk tables - - Log buffering per transaction and lock-less transaction preparation - - Updating delta records, meaning only logging changes - - In addition to synchronous and asynchronous, innovative NUMA-aware group commit logging - - State-of-the-art database checkpoints (CALC) enable the lowest memory and computational overhead. -- **High SQL Coverage and Feature Set -** By extending and relying on the PostgreSQL Foreign Data Wrappers (FDW) + Index support, the entire range of SQL is covered, including stored procedures, user-defined functions and system function calls. You may refer to the **MOT SQL Coverage and Limitations** section for a list of the features that are not supported. -- **Queries Native Compilation using PREPARE Statements -** Queries and transaction statements can be executed in an interactive manner by using PREPARE client commands that have been precompiled into a native execution format (which are also known as *Code-Gen* or *Just-in-Time [JIT]* compilation). This achieves an average of 30% higher performance. Compilation and Lite Execution are applied when possible, and if not, applicable queries are processed using the standard execution path. A Cache Plan module (that has been optimized for OLTP) re-uses compilation results throughout an entire session (even using different bind settings), as well as across different sessions. -- **Seamless Integration of MOT and MogDB Database -** The MOT operates side by side the disk-based storage engine within an integrated envelope. MOT's main memory engine and disk-based storage engines co-exist side by side in order to support multiple application scenarios, while internally reusing database auxiliary services, such as a Write-Ahead Logging (WAL) Redo Log, Replication, Checkpointing, Recovery, High Availability and so on. Users benefit from the unified deployment, configuration and access of both disk-based tables and MOT tables. This provides a flexible and cost-efficient choice of which storage engine to use according to specific requirements. For example, to place highly performance-sensitive data that causes bottlenecks into memory. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/4-mot-usage-scenarios.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/4-mot-usage-scenarios.md deleted file mode 100644 index ef56b9ec..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/4-mot-usage-scenarios.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: MOT Usage Scenarios -summary: MOT Usage Scenarios -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Usage Scenarios - -MOT can significantly speed up an application's overall performance, depending on the characteristics of the workload. MOT improves the performance of transaction processing by making data access and transaction execution more efficient and minimizing redirections by removing lock and latch contention between concurrently executing transactions. - -MOT's extreme speed stems from the fact that it is optimized around concurrent in-memory usage management (not just because it is in memory). Data storage, access and processing algorithms were designed from the ground up to take advantage of the latest state of the art enhancements in in-memory and high-concurrency computing. - -MogDB enables an application to use any combination of MOT tables and standard disk-based tables. MOT is especially beneficial for enabling your most active, high-contention and performance-sensitive application tables that have proven to be bottlenecks and for tables that require a predictable low-latency access and high throughput. - -MOT tables can be used for a variety of application use cases, which include: - -- **High-throughput Transactions Processing -** This is the primary scenario for using MOT, because it supports large transaction volume that requires consistently low latency for individual transactions. Examples of such applications are real-time decision systems, payment systems, financial instrument trading, sports betting, mobile gaming, ad delivery and so on. -- **Acceleration of Performance Bottlenecks -** High contention tables can significantly benefit from using MOT, even when other tables are on disk. The conversion of such tables (in addition to related tables and tables that are referenced together in queries and transactions) result in a significant performance boost as the result of lower latencies, less contention and locks, and increased server throughput ability. -- **Elimination of Mid-Tier Cache -** Cloud and Mobile applications tend to have periodic or spikes of massive workload. Additionally, many of these applications have 80% or above read-workload, with frequent repetitive queries. To sustain the workload spikes, as well to provide optimal user experience by low-latency response time, applications sometimes deploy a mid-tier caching layer. Such additional layers increase development complexity and time, and also increase operational costs. MOT provides a great alternative, simplifying the application architecture with a consistent and high performance data store, while shortening development cycles and reducing CAPEX and OPEX costs. -- **Large-scale Data Streaming and Data Ingestion -** MOT tables enables large-scale streamlined data processing in the Cloud (for Mobile, M2M and IoT), Transactional Processing (TP), Analytical Processing (AP) and Machine Learning (ML). MOT tables are especially good at consistently and quickly ingesting large volumes of data from many different sources at the same time. The data can be later processed, transformed and moved in slower disk-based tables. Alternatively, MOT enables the querying of consistent and up-date data that enable real-time conclusions. In IoT and cloud applications with many real-time data streams, it is common to have special data ingestion and processing triers. For instance, an Apache Kafka cluster can be used to ingest data of 100,000 events/sec with a 10msec latency. A periodic batch processing task enriches and converts the collected data into an alternative format to be placed into a relational database for further analysis. MOT can support such scenarios (while eliminating the separate data ingestion tier) by ingesting data streams directly into MOT relational tables, ready for analysis and decisions. This enables faster data collection and processing, MOT eliminates costly tiers and slow batch processing, increases consistency, increases freshness of analyzed data, as well as lowers Total Cost of Ownership (TCO). -- **Lower TCO -** Higher resource efficiency and mid-tier elimination can save 30% to 90%. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/5-mot-performance-benchmarks.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/5-mot-performance-benchmarks.md deleted file mode 100644 index 8d68d86f..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/5-mot-performance-benchmarks.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: MOT Performance Benchmarks -summary: MOT Performance Benchmarks -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Performance Benchmarks - -Our performance tests are based on the TPC-C Benchmark that is commonly used both by industry and academia. - -Ours tests used BenchmarkSQL (see **MOT Sample TPC-C Benchmark**) and generates the workload using interactive SQL commands, as opposed to stored procedures. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Using the stored procedures approach may produce even higher performance results because it involves significantly less networking roundtrips and database envelope SQL processing cycles. - -All tests that evaluated the performance of MogDB MOT vs DISK used synchronous logging and its optimized **group-commit=on** version in MOT. - -Finally, we performed an additional test in order to evaluate MOT's ability to quickly and ingest massive quantities of data and to serve as an alternative to a mid-tier data ingestion solutions. - -All tests were performed in June 2020. - -The following shows various types of MOT performance benchmarks. - -## MOT Hardware - -The tests were performed on servers with the following configuration and with 10Gbe networking - - -- ARM64/Kunpeng 920-based 2-socket servers, model Taishan 2280 v2 (total 128 Cores), 800GB RAM, 1TB NVMe disk. OS: openEuler - -- ARM64/Kunpeng 960-based 4-socket servers, model Taishan 2480 v2 (total 256 Cores), 512GB RAM, 1TB NVMe disk. OS: openEuler - -- x86-based Dell servers, with 2-sockets of Intel Xeon Gold 6154 CPU @ 3GHz with 18 Cores (72 Cores, with hyper-threading=on), 1TB RAM, 1TB SSD OS: CentOS 7.6 - -- x86-based SuperMicro server, with 8-sockets of Intel(R) Xeon(R) CPU E7-8890 v4 @ 2.20GHz 24 cores (total 384 Cores, with hyper-threading=on), 1TB RAM, 1.2TB SSD (Seagate 1200 SSD 200GB, SAS 12Gb/s). OS: Ubuntu 16.04.2 LTS - -- x86-based Huawei server, with 4-sockets of Intel(R) Xeon(R) CPU E7-8890 v4 2.2Ghz (total 96 Cores, with hyper-threading=on), 512GB RAM, SSD 2TB OS: CentOS 7.6 - -## MOT Results - Summary - -MOT provides higher performance than disk-tables by a factor of 2.5x to 4.1x and reaches 4.8 million tpmC on ARM/Kunpeng-based servers with 256 cores. The results clearly demonstrate MOT's exceptional ability to scale-up and utilize all hardware resources. Performance jumps as the quantity of CPU sockets and server cores increases. - -MOT delivers up to 30,000 tpmC/core on ARM/Kunpeng-based servers and up to 40,000 tpmC/core on x86-based servers. - -Due to a more efficient durability mechanism, in MOT the replication overhead of a Primary/Secondary High Availability scenario is 7% on ARM/Kunpeng and 2% on x86 servers, as opposed to the overhead in disk tables of 20% on ARM/Kunpeng and 15% on x86 servers. - -Finally, MOT delivers 2.5x lower latency, with TPC-C transaction response times of 2 to 7 times faster. - -## MOT High Throughput - -The following shows the results of various MOT table high throughput tests. - -### ARM/Kunpeng 2-Socket 128 Cores - -**Performance** - -The following figure shows the results of testing the TPC-C benchmark on a Huawei ARM/Kunpeng server that has two sockets and 128 cores. - -Four types of tests were performed - - -- Two tests were performed on MOT tables and another two tests were performed on MogDB disk-based tables. -- Two of the tests were performed on a Single node (without high availability), meaning that no replication was performed to a secondary node. The other two tests were performed on Primary/Secondary nodes (with high availability), meaning that data written to the primary node was replicated to a secondary node. - -MOT tables are represented in orange and disk-based tables are represented in blue. - -**Figure 1** ARM/Kunpeng 2-Socket 128 Cores - Performance Benchmarks - -![arm-kunpeng-2-socket-128-cores-performance-benchmarks](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-performance-benchmarks-10.png) - -The results showed that: - -- As expected, the performance of MOT tables is significantly greater than of disk-based tables in all cases. -- For a Single Node - 3.8M tpmC for MOT tables versus 1.5M tpmC for disk-based tables -- For a Primary/Secondary Node - 3.5M tpmC for MOT tables versus 1.2M tpmC for disk-based tables -- For production grade (high-availability) servers (Primary/Secondary Node) that require replication, the benefit of using MOT tables is even more significant than for a Single Node (without high-availability, meaning no replication). -- The MOT replication overhead of a Primary/Secondary High Availability scenario is 7% on ARM/Kunpeng and 2% on x86 servers, as opposed to the overhead of disk tables of 20% on ARM/Kunpeng and 15% on x86 servers. - -**Performance per CPU core** - -The following figure shows the TPC-C benchmark performance/throughput results per core of the tests performed on a Huawei ARM/Kunpeng server that has two sockets and 128 cores. The same four types of tests were performed (as described above). - -**Figure 2** ARM/Kunpeng 2-Socket 128 Cores - Performance per Core Benchmarks - -![arm-kunpeng-2-socket-128-cores-performance-per-core-benchmarks](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-performance-benchmarks-11.png) - -The results showed that as expected, the performance of MOT tables is significantly greater per core than of disk-based tables in all cases. It also shows that for production grade (high-availability) servers (Primary/Secondary Node) that require replication, the benefit of using MOT tables is even more significant than for a Single Node (without high-availability, meaning no replication). - -### ARM/Kunpeng 4-Socket 256 Cores - -The following demonstrates MOT's excellent concurrency control performance by showing the tpmC per quantity of connections. - -**Figure 3** ARM/Kunpeng 4-Socket 256 Cores - Performance Benchmarks - -![arm-kunpeng-4-socket-256-cores-performance-benchmarks](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-performance-benchmarks-12.png) - -The results show that performance increases significantly even when there are many cores and that peak performance of 4.8M tpmC is achieved at 768 connections. - -### x86-based Servers - -- **8-Socket 384 Cores** - -The following demonstrates MOT’s excellent concurrency control performance by comparing the tpmC per quantity of connections between disk-based tables and MOT. This test was performed on an x86 server with eight sockets and 384 cores. The orange represents the results of the MOT table. - -**Figure 4** x86 8-Socket 384 Cores - Performance Benchmarks - -![x86-8-socket-384-cores-performance-benchmarks](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-performance-benchmarks-13.png) - -The results show that MOT tables significantly outperform disk-based tables and have very highly efficient performance per core on a 386 core server, reaching over 3M tpmC / core. - -- **4-Socket 96 Cores** - -3.9 million tpmC was achieved by MOT on this 4-socket 96 cores server. The following figure shows a highly efficient MOT table performance per core reaching 40,000 tpmC / core. - -**Figure 5** 4-Socket 96 Cores - Performance Benchmarks - -![4-socket-96-cores-performance-benchmarks](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-performance-benchmarks-14.png) - -## MOT Low Latency - -The following was measured on ARM/Kunpeng 2-socket server (128 cores). The numbers scale is milliseconds (ms). - -**Figure 1** Low Latency (90th%) - Performance Benchmarks - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-performance-benchmarks-15.png) - -MOT's average transaction speed is 2.5x, with MOT latency of 10.5 ms, compared to 23-25ms for disk tables. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The average was calculated by taking into account all TPC-C 5 transaction percentage distributions. For more information, you may refer to the description of TPC-C transactions in the **MOT Sample TPC-C Benchmark** section. - -**Figure 2** Low Latency (90th%, Transaction Average) - Performance Benchmarks - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-performance-benchmarks-16.png) - -## MOT RTO and Cold-Start Time - -### High Availability Recovery Time Objective (RTO) - -MOT is fully integrated into MogDB, including support for high-availability scenarios consisting of primary and secondary deployments. The WAL Redo Log's replication mechanism replicates changes into the secondary database node and uses it for replay. - -If a Failover event occurs, whether it is due to an unplanned primary node failure or due to a planned maintenance event, the secondary node quickly becomes active. The amount of time that it takes to recover and replay the WAL Redo Log and to enable connections is also referred to as the Recovery Time Objective (RTO). - -**The RTO of MogDB, including the MOT, is less than 10 seconds.** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The Recovery Time Objective (RTO) is the duration of time and a service level within which a business process must be restored after a disaster in order to avoid unacceptable consequences associated with a break in continuity. In other words, the RTO is the answer to the question: "How much time did it take to recover after notification of a business process disruption?" - -In addition, as shown in the **MOT High Throughput** section in MOT the replication overhead of a Primary/Secondary High Availability scenario is only 7% on ARM/Kunpeng servers and 2% on x86 servers, as opposed to the replication overhead of disk-tables, which is 20% on ARM/Kunpeng and 15% on x86 servers. - -### Cold-Start Recovery Time - -Cold-start Recovery time is the amount of time it takes for a system to become fully operational after a stopped mode. In memory databases, this includes the loading of all data and indexes into memory, thus it depends on data size, hardware bandwidth, and on software algorithms to process it efficiently. - -Our MOT tests using ARM servers with NVMe disks demonstrate the ability to load **100 GB of database checkpoint in 40 seconds (2.5 GB/sec)**. Because MOT does not persist indexes and therefore they are created at cold-start, the actual size of the loaded data + indexes is approximately 50% more. Therefore, can be converted to **MOT cold-start time of Data + Index capacity of 150GB in 40 seconds,** or **225 GB per minute (3.75 GB/sec)**. - -The following figure demonstrates cold-start process and how long it takes to load data into a MOT table from the disk after a cold start. - -**Figure 1** Cold-Start Time - Performance Benchmarks - -![cold-start-time-performance-benchmarks](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-performance-benchmarks-17.png) - -- **Database Size -** The total amount of time to load the entire database (in GB) is represented by the blue line and the **TIME (sec)** Y axis on the left. -- **Throughput -** The quantity of database GB throughput per second is represented by the orange line and the **Throughput GB/sec** Y axis on the right. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The performance demonstrated during the test is very close to the bandwidth of the SSD hardware. Therefore, it is feasible that higher (or lower) performance may be achieved on a different platform. - -## MOT Resource Utilization - -The following figure shows the resource utilization of the test performed on a x86 server with four sockets, 96 cores and 512GB RAM server. It demonstrates that a MOT table is able to efficiently and consistently consume almost all available CPU resources. For example, it shows that almost 100% CPU percentage utilization is achieved for 192 cores and 3.9M tpmC. - -- **tmpC -** Number of TPC-C transactions completed per minute is represented by the orange bar and the **tpmC** Y axis on the left. -- **CPU % Utilization -** The amount of CPU utilization is represented by the blue line and the **CPU %** Y axis on the right. - -**Figure 1** Resource Utilization - Performance Benchmarks - -![resource-utilization-performance-benchmarks](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-performance-benchmarks-18.png) - -## MOT Data Ingestion Speed - -This test simulates realtime data streams arriving from massive IoT, cloud or mobile devices that need to be quickly and continuously ingested into the database on a massive scale. - -- The test involved ingesting large quantities of data, as follows - - - - 10 million rows were sent by 500 threads, 2000 rounds, 10 records (rows) in each insert command, each record was 200 bytes. - - The client and database were on different machines. Database server - x86 2-socket, 72 cores. - -- Performance Results - - - **Throughput - 10,000** Records/Core or **2** MB/Core. - - **Latency - 2.8ms per a 10 records** bulk insert (includes client-server networking) - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** We are projecting that multiple additional, and even significant, performance improvements will be made by MOT for this scenario. Click **MOT Usage Scenarios** for more information about large-scale data streaming and data ingestion. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/introducing-mot.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/introducing-mot.md deleted file mode 100644 index b173e245..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/1-introducing-mot/introducing-mot.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Introducing MOT -summary: Introducing MOT -author: Guo Huan -date: 2023-05-22 ---- - -# Introducing MOT - -This chapter introduces MogDB Memory-Optimized Tables (MOT), describes its features and benefits, key technologies, applicable scenarios, performance benchmarks and its competitive advantages. - -+ **[MOT Introduction](1-mot-introduction.md)** -+ **[MOT Features and Benefits](2-mot-features-and-benefits.md)** -+ **[MOT Key Technologies](3-mot-key-technologies.md)** -+ **[MOT Usage Scenarios](4-mot-usage-scenarios.md)** -+ **[MOT Performance Benchmarks](5-mot-performance-benchmarks.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/1-using-mot-overview.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/1-using-mot-overview.md deleted file mode 100644 index a54764d5..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/1-using-mot-overview.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Using MOT Overview -summary: Using MOT Overview -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Using MOT Overview - -MOT is automatically deployed as part of openGauss. You may refer to the **MOT Preparation** section for a description of how to estimate and plan required memory and storage resources in order to sustain your workload. The **MOT Deployment** section describes all the configuration settings in MOT, as well as non-mandatory options for server optimization. - -Using MOT tables is quite simple. The syntax of all MOT commands is the same as for disk-based tables and includes support for most of standard PostgreSQL SQL, DDL and DML commands and features, such as Stored Procedures. Only the create and drop table statements in MOT differ from the statements for disk-based tables in openGauss. You may refer to the **MOT Usage** section for a description of these two simple commands, to learn how to convert a disk-based table into an MOT table, to get higher performance using Query Native Compilation and PREPARE statements and for a description of external tool support and the limitations of the MOT engine. - -The **MOT Administration** section describes how to perform database maintenance, monitoring and analysis of logs and reported errors. Lastly, the **MOT Sample TPC-C Benchmark** section describes how to perform a standard TPC-C benchmark. - -- Read the following topics to learn how to use MOT - - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/using-mot-overview-2.png) diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/2-mot-preparation.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/2-mot-preparation.md deleted file mode 100644 index 3cceb8b2..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/2-mot-preparation.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -title: MOT Preparation -summary: MOT Preparation -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Preparation - -The following describes the prerequisites and the memory and storage planning to perform in order to prepare to use MOT. - -## MOT Prerequisites - -The following specifies the hardware and software prerequisites for using MogDB MOT. - -### Supported Hardware - -MOT can utilize state-of-the-art hardware, as well as support existing hardware platforms. Both x86 architecture and ARM by Huawei Kunpeng architecture are supported. - -MOT is fully aligned with the hardware supported by the MogDB database. For more information, see the *MogDB Installation Guide*. - -### CPU - -MOT delivers exceptional performance on many-core servers (scale-up). MOT significantly outperforms the competition in these environments and provides near-linear scaling and extremely high resource utilization. - -Even so, users can already start realizing MOT's performance benefits on both low-end, mid-range and high-end servers, starting from one or two CPU sockets, as well as four and even eight CPU sockets. Very high performance and resource utilization are also expected on very high-end servers that have 16 or even 32 sockets (for such cases, we recommend contacting Enmo support). - -### Memory - -MOT supports standard RAM/DRAM for its data and transaction management. All MOT tables’ data and indexes reside in-memory; therefore, the memory capacity must support the data capacity and still have space for further growth. For detailed information about memory requirements and planning, see the **MOT Memory and Storage Planning** section. - -### Storage IO - -MOT is a durable database and uses persistent storage (disk/SSD/NVMe drive[s]) for transaction log operations and periodic checkpoints. - -We recommend using a storage device with low latency, such as SSD with a RAID-1 configuration, NVMe or any enterprise-grade storage system. When appropriate hardware is used, the database transaction processing and contention are the bottleneck, not the IO. - -For detailed memory requirements and planning, see the **MOT Memory and Storage Planning** section. - -Supported Operating Systems - -MOT is fully aligned with the operating systems supported by MogDB. - -MOT supports both bare-metal and virtualized environments that run the following operating systems on a bare-metal server or virtual machine - - -- **x86 -** CentOS 7.6 and EulerOS 2.0 -- **ARM -** openEuler and EulerOS - -### OS Optimization - -MOT does not require any special modifications or the installation of new software. However, several optional optimizations can enhance performance. You may refer to the **MOT Server Optimization - x86** and **MOT Server Optimization - ARM Huawei Taishan 2P/4P** sections for a description of the optimizations that enable maximal performance. - -## MOT Memory and Storage Planning - -This section describes the considerations and guidelines for evaluating, estimating and planning the quantity of memory and storage capacity to suit your specific application needs. This section also describes the various data aspects that affect the quantity of required memory, such as the size of data and indexes for the planned tables, memory to sustain transaction management and how fast the data is growing. - -### MOT Memory Planning - -MOT belongs to the in-memory database class (IMDB) in which all tables and indexes reside entirely in memory. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Memory storage is volatile, meaning that it requires power to maintain the stored information. Disk storage is persistent, meaning that it is written to disk, which is non-volatile storage. MOT uses both, having all data in memory, while persisting (by WAL logging) transactional changes to disk with strict consistency (in synchronous logging mode). - -Sufficient physical memory must exist on the server in order to maintain the tables in their initial state, as well as to accommodate the related workload and growth of data. All this is in addition to the memory that is required for the traditional disk-based engine, tables and sessions that support the workload of disk-based tables. Therefore, planning ahead for enough memory to contain them all is essential. - -Even so, you can get started with whatever amount of memory you have and perform basic tasks and evaluation tests. Later, when you are ready for production, the following issues should be addressed. - -- **Memory Configuration Settings** - - Similar to standard PG , the memory of the MogDB database process is controlled by the upper limit in its max_process_memory setting, which is defined in the postgres.conf file. The MOT engine and all its components and threads, reside within the MogDB process. Therefore, the memory allocated to MOT also operates within the upper boundary defined by max_process_memory for the entire MogDB database process. - - The amount of memory that MOT can reserve for itself is defined as a portion of max_process_memory. It is either a percentage of it or an absolute value that is less than it. This portion is defined in the mot.conf configuration file by the _mot__memory settings. - - The portion of max_process_memory that can be used by MOT must still leave at least 2 GB available for the PG (MogDB) envelope. Therefore, in order to ensure this, MOT verifies the following during database startup - - - ``` - (max_mot_global_memory + max_mot_local_memory) + 2GB < max_process_memory - ``` - - If this limit is breached, then MOT memory internal limits are adjusted in order to provide the maximum possible within the limitations described above. This adjustment is performed during startup and calculates the value of MOT max memory accordingly. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** MOT max memory is a logically calculated value of either the configured settings or their adjusted values of (max_mot_global_memory + max_mot_local_memory). - - In this case, a warning is issued to the server log, as shown below - - - **Warning Examples** - - Two messages are reported - the problem and the solution. - - The following is an example of a warning message reporting the problem - - - ``` - [WARNING] MOT engine maximum memory definitions (global: 9830 MB, local: 1843 MB, session large store: 0 MB, total: 11673 MB) breach GaussDB maximum process memory restriction (12288 MB) and/or total system memory (64243 MB). MOT values shall be adjusted accordingly to preserve required gap (2048 MB). - ``` - - The following is an example of a warning message indicating that MOT is automatically adjusting the memory limits - - - ``` - [WARNING] Adjusting MOT memory limits: global = 8623 MB, local = 1617 MB, session large store = 0 MB, total = 10240 MB - ``` - - This is the only place that shows the new memory limits. - - Additionally, MOT does not allow the insertion of additional data when the total memory usage approaches the chosen memory limits. The threshold for determining when additional data insertions are no longer allowed, is defined as a percentage of MOT max memory (which is a calculated value, as described above). The default is 90, meaning 90%. Attempting to add additional data over this threshold returns an error to the user and is also registered in the database log file. - -- **Minimum and Maximum** - - In order to secure memory for future operations, MOT pre-allocates memory based on the minimum global and local settings. The database administrator should specify the minimum amount of memory required for the MOT tables and sessions to sustain their workload. This ensures that this minimal memory is allocated to MOT even if another excessive memory-consuming application runs on the same server as the database and competes with the database for memory resources. The maximum values are used to limit memory growth. - -- **Global and Local** - - The memory used by MOT is comprised of two parts - - - - **Global Memory -** Global memory is a long-term memory pool that contains the data and indexes of MOT tables. It is evenly distributed across NUMA-nodes and is shared by all CPU cores. - - **Local Memory -** Local memory is a memory pool used for short-term objects. Its primary consumers are sessions handling transactions. These sessions are storing data changes in the part of the memory dedicated to the relevant specific transaction (known as *transaction private memory*). Data changes are moved to the global memory at the commit phase. Memory object allocation is performed in NUMA-local manner in order to achieve the lowest possible latency. - - Deallocated objects are put back in the relevant memory pools. Minimal use of operating system memory allocation (malloc) functions during transactions circumvents unnecessary locks and latches. - - The allocation of these two memory parts is controlled by the dedicated **min/max_mot_global_memory** and **min/max_mot_local_memory** settings. If MOT global memory usage gets too close to this defined maximum, then MOT protects itself and does not accept new data. Attempts to allocate memory beyond this limit are denied and an error is reported to the user. - -- **Minimum Memory Requirements** - - To get started and perform a minimal evaluation of MOT performance, there are a few requirements. - - Make sure that the **max_process_memory** (as defined in **postgres.conf**) has sufficient capacity for MOT tables and sessions (configured by **mix/max_mot_global_memory** and **mix/max_mot_local_memory**), in addition to the disk tables buffer and extra memory. For simple tests, the default **mot.conf** settings can be used. - -- **Actual Memory Requirements During Production** - - In a typical OLTP workload, with 80:20 read:write ratio on average, MOT memory usage per table is 60% higher than in disk-based tables (this includes both the data and the indexes). This is due to the use of more optimal data structures and algorithms that enable faster access, with CPU-cache awareness and memory-prefetching. - - The actual memory requirement for a specific application depends on the quantity of data, the expected workload and especially on the data growth. - -- **Max Global Memory Planning - Data + Index Size** - - To plan for maximum global memory - - - 1. Determine the size of a specific disk table (including both its data and all its indexes). The following statistical query can be used to determine the data size of the **customer** table and the **customer_pkey** index size - - - **Data size -** select pg_relation_size(‘customer'); - - **Index -** select pg_relation_size('customer_pkey'); - 2. Add 60%, which is the common requirement in MOT relative to the current size of the disk-based data and index. - 3. Add an additional percentage for the expected growth of data. For example - - - 5% monthly growth = 80% yearly growth (1.05^12). Thus, in order to sustain a year's growth, allocate 80% more memory than is currently used by the tables. - - This completes the estimation and planning of the max_mot_global_memory value. The actual setting can be defined either as an absolute value or a percentage of the Postgres max_process_memory. The exact value is typically finetuned during deployment. - -- **Max Local Memory Planning - Concurrent Session Support** - - Local memory needs are primarily a function of the quantity of concurrent sessions. The typical OLTP workload of an average session uses up to 8 MB. This should be multiplied by the quantity of sessions and then a little bit extra should be added. - - A memory calculation can be performed in this manner and then finetuned, as follows - - - ``` - SESSION_COUNT * SESSION_SIZE (8 MB) + SOME_EXTRA (100MB should be enough) - ``` - - The default specifies 15% of Postgres's max_process_memory, which by default is 12 GB. This equals 1.8 GB, which is sufficient for 230 sessions, which is the requirement for the max_mot_local memory. The actual setting can be defined either in absolute values or as a percentage of the Postgres max_process_memory. The exact value is typically finetuned during deployment. - - **Unusually Large Transactions** - - Some transactions are unusually large because they apply changes to a large number of rows. This may increase a single session's local memory up to the maximum allowed limit, which is 1 GB. For example - - - ``` - delete from SOME_VERY_LARGE_TABLE; - ``` - - Take this scenario into consideration when configuring the max_mot_local_memory setting, as well as during application development. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** You may refer to the **MEMORY (MOT)** section for more information about configuration settings. - -### Storage IO - -MOT is a memory-optimized, persistent database storage engine. A disk drive(s) is required for storing the Redo Log (WAL) and a periodic checkpoint. - -It is recommended to use a storage device with low latency, such as SSD with a RAID-1 configuration, NVMe or any enterprise-grade storage system. When appropriate hardware is used, the database transaction processing and contention are the bottleneck, not the IO. - -Since the persistent storage is much slower than RAM memory, the IO operations (logging and checkpoint) can create a bottleneck for both an in-memory and memory-optimized databases. However, MOT has a highly efficient durability design and implementation that is optimized for modern hardware (such as SSD and NVMe). In addition, MOT has minimized and optimized writing points (for example, by using parallel logging, a single log record per transaction and NUMA-aware transaction group writing) and has minimized the data written to disk (for example, only logging the delta or updated columns of the changed records and only logging a transaction at the commit phase). - -### Required Capacity - -The required capacity is determined by the requirements of checkpointing and logging, as described below - - -- **Checkpointing** - - A checkpoint saves a snapshot of all the data to disk. - - Twice the size of all data should be allocated for checkpointing. There is no need to allocate space for the indexes for checkpointing - - Checkpointing = 2x the MOT Data Size (rows only, index is not persistent). - - Twice the size is required because a snapshot is saved to disk of the entire size of the data, and in addition, the same amount of space should be allocated for the checkpoint that is in progress. When a checkpoint process finishes, the previous checkpoint files are deleted. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** In the next MogDB release, MOT will have an incremental checkpoint feature, which will significantly reduce this storage capacity requirement. - -- **Logging** - - MOT table log records are written to the same database transaction log as the other records of disk-based tables. - - The size of the log depends on the transactional throughput, the size of the data changes and the time between checkpoints (at each time checkpoint the Redo Log is truncated and starts to expand again). - - MOT tables use less log bandwidth and have lower IO contention than disk-based tables. This is enabled by multiple mechanisms. - - For example, MOT does not log every operation before a transaction has been completed. It is only logged at the commit phase and only the updated delta record is logged (not full records like for disk-based tables). - - In order to ensure that the log IO device does not become a bottleneck, the log file must be placed on a drive that has low latency. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** You may refer to the **STORAGE (MOT)** section for more information about configuration settings. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/3-mot-deployment.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/3-mot-deployment.md deleted file mode 100644 index 1bf1320b..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/3-mot-deployment.md +++ /dev/null @@ -1,660 +0,0 @@ ---- -title: MOT Deployment -summary: MOT Deployment -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Deployment - -The following sections describe various mandatory and optional settings for optimal deployment. - -## MOT Server Optimization - x86 - -Generally, databases are bounded by the following components - - -- **CPU -** A faster CPU speeds up any CPU-bound database. -- **Disk -** High-speed SSD/NVME speeds up any I/O-bound database. -- **Network -** A faster network speeds up any **SQL\\Net**-bound database. - -In addition to the above, the following general-purpose server settings are used by default and may significantly affect a database's performance. - -MOT performance tuning is a crucial step for ensuring fast application functionality and data retrieval. MOT can utilize state-of-the-art hardware, and therefore it is extremely important to tune each system in order to achieve maximum throughput. - -The following are optional settings for optimizing MOT database performance running on an Intel x86 server. These settings are optimal for high throughput workloads - - -### BIOS - -- Hyper Threading - ON - - Activation (HT=ON) is highly recommended. - - We recommend turning hyper threading ON while running OLTP workloads on MOT. When hyper-threading is used, some OLTP workloads demonstrate performance gains of up to40%. - -### OS Environment Settings - -- NUMA - - Disable NUMA balancing, as described below. MOT performs its own memory management with extremely efficient NUMA-awareness, much more than the default methods used by the operating system. - - ``` - echo 0 > /proc/sys/kernel/numa_balancing - ``` - -- Services - - Disable Services, as described below - - - ``` - service irqbalance stop # MANADATORY - service sysmonitor stop # OPTIONAL, performance - service rsyslog stop # OPTIONAL, performance - ``` - -- Tuned Service - - The following section is mandatory. - - The server must run the throughput-performance profile - - - ``` - [...]$ tuned-adm profile throughput-performance - ``` - - The **throughput-performance** profile is broadly applicable tuning that provides excellent performance across a variety of common server workloads. - - Other less suitable profiles for MogDB and MOT server that may affect MOT's overall performance are - balanced, desktop, latency-performance, network-latency, network-throughput and powersave. - -- Sysctl - - The following lists the recommended operating system settings for best performance. - - - Add the following settings to /etc/sysctl.conf and run sysctl -p - - ```bash - net.ipv4.ip_local_port_range = 9000 65535 - kernel.sysrq = 1 - kernel.panic_on_oops = 1 - kernel.panic = 5 - kernel.hung_task_timeout_secs = 3600 - kernel.hung_task_panic = 1 - vm.oom_dump_tasks = 1 - kernel.softlockup_panic = 1 - fs.file-max = 640000 - kernel.msgmnb = 7000000 - kernel.sched_min_granularity_ns = 10000000 - kernel.sched_wakeup_granularity_ns = 15000000 - kernel.numa_balancing=0 - vm.max_map_count = 1048576 - net.ipv4.tcp_max_tw_buckets = 10000 - net.ipv4.tcp_tw_reuse = 1 - net.ipv4.tcp_tw_recycle = 1 - net.ipv4.tcp_keepalive_time = 30 - net.ipv4.tcp_keepalive_probes = 9 - net.ipv4.tcp_keepalive_intvl = 30 - net.ipv4.tcp_retries2 = 80 - kernel.sem = 250 6400000 1000 25600 - net.core.wmem_max = 21299200 - net.core.rmem_max = 21299200 - net.core.wmem_default = 21299200 - net.core.rmem_default = 21299200 - #net.sctp.sctp_mem = 94500000 915000000 927000000 - #net.sctp.sctp_rmem = 8192 250000 16777216 - #net.sctp.sctp_wmem = 8192 250000 16777216 - net.ipv4.tcp_rmem = 8192 250000 16777216 - net.ipv4.tcp_wmem = 8192 250000 16777216 - net.core.somaxconn = 65535 - vm.min_free_kbytes = 26351629 - net.core.netdev_max_backlog = 65535 - net.ipv4.tcp_max_syn_backlog = 65535 - #net.sctp.addip_enable = 0 - net.ipv4.tcp_syncookies = 1 - vm.overcommit_memory = 0 - net.ipv4.tcp_retries1 = 5 - net.ipv4.tcp_syn_retries = 5 - ``` - - - Update the section of /etc/security/limits.conf to the following - - - ```bash - soft nofile 100000 - hard nofile 100000 - ``` - - The **soft** and a **hard** limit settings specify the quantity of files that a process may have opened at once. The soft limit may be changed by each process running these limits up to the hard limit value. - -- Disk/SSD - - The following describes how to ensure that disk R/W performance is suitable for database synchronous commit mode. - - To do so, test your disk bandwidth using the following - - ``` - [...]$ sync; dd if=/dev/zero of=testfile bs=1M count=1024; sync - 1024+0 records in - 1024+0 records out - 1073741824 bytes (1.1 GB) copied, 1.36034 s, 789 MB/s - ``` - - In case the disk bandwidth is significantly below the above number (789 MB/s), it may create a performance bottleneck for MogDB, and especially for MOT. - -### Network - -Use a 10Gbps network or higher. - -To verify, use iperf, as follows - - -``` -Server side: iperf -s -Client side: iperf -c -``` - -- rc.local - Network Card Tuning - - The following optional settings have a significant effect on performance - - - 1. Copy set_irq_affinity.sh from to /var/scripts/. - - 2. Put in /etc/rc.d/rc.local and run chmod in order to ensure that the following script is executed during boot - - - ```bash - chmod +x /etc/rc.d/rc.local - var/scripts/set_irq_affinity.sh -x all - ethtool -K gro off - ethtool -C adaptive-rx on adaptive-tx on - Replace with the network card, i.e. ens5f1 - ``` - -## MOT Server Optimization - ARM Huawei Taishan 2P/4P - -The following are optional settings for optimizing MOT database performance running on an ARM/Kunpeng-based Huawei Taishan 2280 v2 server powered by 2-sockets with a total of 256 Cores and Taishan 2480 v2 server powered by 4-sockets with a total of 256 Cores. - -Unless indicated otherwise, the following settings are for both client and server machines - - -### BIOS - -Modify related BIOS settings, as follows - - -1. Select **BIOS** - **Advanced** - **MISC Config**. Set **Support Smmu** to **Disabled**. - -2. Select **BIOS** - **Advanced** - **MISC Config**. Set **CPU Prefetching Configuration** to **Disabled**. - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-deployment-1.png) - -3. Select **BIOS** - **Advanced** - **Memory Config**. Set **Die Interleaving** to **Disabled**. - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-deployment-2.png) - -4. Select **BIOS** - **Advanced** - **Performance Config**. Set **Power Policy** to **Performance**. - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-deployment-3.png) - -### OS - Kernel and Boot - -- The following operating system kernel and boot parameters are usually configured by a sysadmin. - - Configure the kernel parameters, as follows - - - ```bash - net.ipv4.ip_local_port_range = 9000 65535 - kernel.sysrq = 1 - kernel.panic_on_oops = 1 - kernel.panic = 5 - kernel.hung_task_timeout_secs = 3600 - kernel.hung_task_panic = 1 - vm.oom_dump_tasks = 1 - kernel.softlockup_panic = 1 - fs.file-max = 640000 - kernel.msgmnb = 7000000 - kernel.sched_min_granularity_ns = 10000000 - kernel.sched_wakeup_granularity_ns = 15000000 - kernel.numa_balancing=0 - vm.max_map_count = 1048576 - net.ipv4.tcp_max_tw_buckets = 10000 - net.ipv4.tcp_tw_reuse = 1 - net.ipv4.tcp_tw_recycle = 1 - net.ipv4.tcp_keepalive_time = 30 - net.ipv4.tcp_keepalive_probes = 9 - net.ipv4.tcp_keepalive_intvl = 30 - net.ipv4.tcp_retries2 = 80 - kernel.sem = 32000 1024000000 500 32000 - kernel.shmall = 52805669 - kernel.shmmax = 18446744073692774399 - sys.fs.file-max = 6536438 - net.core.wmem_max = 21299200 - net.core.rmem_max = 21299200 - net.core.wmem_default = 21299200 - net.core.rmem_default = 21299200 - net.ipv4.tcp_rmem = 8192 250000 16777216 - net.ipv4.tcp_wmem = 8192 250000 16777216 - net.core.somaxconn = 65535 - vm.min_free_kbytes = 5270325 - net.core.netdev_max_backlog = 65535 - net.ipv4.tcp_max_syn_backlog = 65535 - net.ipv4.tcp_syncookies = 1 - vm.overcommit_memory = 0 - net.ipv4.tcp_retries1 = 5 - net.ipv4.tcp_syn_retries = 5 - ##NEW - kernel.sched_autogroup_enabled=0 - kernel.sched_min_granularity_ns=2000000 - kernel.sched_latency_ns=10000000 - kernel.sched_wakeup_granularity_ns=5000000 - kernel.sched_migration_cost_ns=500000 - vm.dirty_background_bytes=33554432 - kernel.shmmax=21474836480 - net.ipv4.tcp_timestamps = 0 - net.ipv6.conf.all.disable_ipv6=1 - net.ipv6.conf.default.disable_ipv6=1 - net.ipv4.tcp_keepalive_time=600 - net.ipv4.tcp_keepalive_probes=3 - kernel.core_uses_pid=1 - ``` - -- Tuned Service - - The following section is mandatory. - - The server must run a throughput-performance profile - - - ``` - [...]$ tuned-adm profile throughput-performance - ``` - - The **throughput-performance** profile is broadly applicable tuning that provides excellent performance across a variety of common server workloads. - - Other less suitable profiles for MogDB and MOT server that may affect MOT's overall performance are - balanced, desktop, latency-performance, network-latency, network-throughput and powersave. - -- Boot Tuning - - Add **iommu.passthrough=1** to the **kernel boot arguments**. - - When operating in **pass-through** mode, the adapter does require **DMA translation to the memory,** which improves performance. - -## MOT Configuration Settings - -MOT is provided preconfigured to creating working MOT Tables. For best results, it is recommended to customize the MOT configuration (defined in the file named mot.conf) according to your application's specific requirements and your preferences. - -This file is read-only upon server startup. If you edit this file while the system is running, then the server must be reloaded in order for the changes to take effect. - -The mot.conf file is located in the same folder as the postgres.conf configuration file. - -Read the **General Guidelines** section and then review and configure the following sections of the mot.conf file, as needed. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The topics listed above describe each of the setting sections in the mot.conf file. In addition to the above topics, for an overview of all the aspects of a specific MOT feature (such as Recovery), you may refer to the relevant topic of this user manual. For example, the mot.conf file has a Recovery section that contains settings that affect MOT recovery and this is described in the **MOT Recovery** section that is listed above. In addition, for a full description of all aspects of Recovery, you may refer to the **MOT Recovery** section of the Administration chapter of this user manual. Reference links are also provided in each relevant section of the descriptions below. - -The following topics describe each section in the mot.conf file and the settings that it contains, as well as the default value of each. - -### General Guidelines - -The following are general guidelines for editing the mot.conf file. - -- Each setting appears with its default value as follows - - - ``` - # name = value - ``` - -- Blank/white space is acceptable. - -- Comments are indicated by placing a number sign (#) anywhere on a line. - -- The default values of each setting appear as a comment throughout this file. - -- In case a parameter is uncommented and a new value is placed, the new setting is defined. - -- Changes to the mot.conf file are applied only at the start or reload of the database server. - -Memory Units are represented as follows - - -- KB - Kilobytes -- MB - Megabytes -- GB - Gigabytes -- TB - Terabytes - -If no memory units are specified, then bytes are assumed. - -Some memory units are represented as a percentage of the **max_process_memory** setting that is configured in **postgresql.conf**. For example - **20%**. - -Time units are represented as follows - - -- us - microseconds (or micros) -- ms - milliseconds (or millis) -- s - seconds (or secs) -- min - minutes (or mins) -- h - hours -- d - days - -If no time units are specified, then microseconds are assumed. - -### REDO LOG (MOT) - -- **enable_group_commit = false** - - Specifies whether to use group commit. - - This option is only relevant when MogDB is configured to use synchronous commit, meaning only when the synchronous_commit setting in postgresql.conf is configured to any value other than off. - -- **group_commit_size = 16** - -- **group_commit_timeout = 10 ms** - - This option is only relevant when the MOT engine has been configured to **Synchronous Group Commit** logging. This means that the synchronous_commit setting in postgresql.conf is configured to true and the enable_group_commit parameter in the mot.conf configuration file is configured to true. - - Defines which of the following determines when a group of transactions is recorded in the WAL Redo Log - - - **group_commit_size** - The quantity of committed transactions in a group. For example, **16** means that when 16 transactions in the same group have been committed by their client application, then an entry is written to disk in the WAL Redo Log for each of the 16 transactions. - - **group_commit_timeout** - A timeout period in ms. For example, **10** means that after 10 ms, an entry is written to disk in the WAL Redo Log for each of the transactions in the same group that have been committed by their client application in the lats 10 ms. - - A commit group is closed after either the configured number of transactions has arrived or after the configured timeout period since the group was opened. After the group is closed, all the transactions in the group wait for a group flush to complete execution and then notify the client that each transaction has ended. - - You may refer to **MOT Logging - WAL Redo Log** section for more information about the WAL Redo Log and synchronous group commit logging. - -### CHECKPOINT (MOT) - -- **checkpoint_dir =** - - Specifies the directory in which checkpoint data is to be stored. The default location is in the data folder of each data node. - -- **checkpoint_segsize = 16 MB** - - Specifies the segment size used during checkpoint. Checkpoint is performed in segments. When a segment is full, it is serialized to disk and a new segment is opened for the subsequent checkpoint data. - -- **checkpoint_workers = 3** - - Specifies the number of workers to use during checkpoint. - - Checkpoint is performed in parallel by several MOT engine workers. The quantity of workers may substantially affect the overall performance of the entire checkpoint operation, as well as the operation of other running transactions. To achieve a shorter checkpoint duration, a larger number of workers should be used, up to the optimal number (which varies based on the hardware and workload). However, be aware that if this number is too large, it may negatively impact the execution time of other running transactions. Keep this number as low as possible to minimize the effect on the runtime of other running transactions, but at the cost of longer checkpoint duration. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** You may refer to the **MOT Checkpoints** section for more information about configuration settings. - -### RECOVERY (MOT) - -- **checkpoint_recovery_workers = 3** - - Specifies the number of workers (threads) to use during checkpoint data recovery. Each MOT engine worker runs on its own core and can process a different table in parallel by reading it into memory. For example, while the default is three-course, you might prefer to set this parameter to the number of cores that are available for processing. After recovery these threads are stopped and killed. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** You may refer to the **MOT Recovery** section for more information about configuration settings. - -### STATISTICS (MOT) - -- **enable_stats = false** - - Configures periodic statistics for printing. - -- **print_stats_period = 10 minute** - - Configures the time period for printing a summary statistics report. - -- **print_full_stats_period = 1 hours** - - Configures the time period for printing a full statistics report. - - The following settings configure the various sections included in the periodic statistics report. If none of them are configured, then the statistics report is suppressed. - -- **enable_log_recovery_stats = false** - - Log recovery statistics contain various Redo Log recovery metrics. - -- **enable_db_session_stats = false** - - Database session statistics contain transaction events, such commits, rollbacks and so on. - -- **enable_network_stats = false** - - Network statistics contain connection/disconnection events. - -- **enable_log_stats = false** - - Log statistics contain details regarding the Redo Log. - -- **enable_memory_stats = false** - - Memory statistics contain memory-layer details. - -- **enable_process_stats = false** - - Process statistics contain total memory and CPU consumption for the current process. - -- **enable_system_stats = false** - - System statistics contain total memory and CPU consumption for the entire system. - -- **enable_jit_stats = false** - - JIT statistics contain information regarding JIT query compilation and execution. - -### ERROR LOG (MOT) - -- **log_level = INFO** - - Configures the log level of messages issued by the MOT engine and recorded in the Error log of the database server. Valid values are PANIC, ERROR, WARN, INFO, TRACE, DEBUG, DIAG1 and DIAG2. - -- **Log.COMPONENT.LOGGER.log_level=LOG_LEVEL** - - Configures specific loggers using the syntax described below. - - For example, to configure the TRACE log level for the ThreadIdPool logger in system component, use the following syntax - - - ``` - Log.System.ThreadIdPool.log_level=TRACE - ``` - - To configure the log level for all loggers under some component, use the following syntax - - - ``` - Log.COMPONENT.log_level=LOG_LEVEL - ``` - - For example - - - ``` - Log.System.log_level=DEBUG - ``` - -### MEMORY (MOT) - -- **enable_numa = true** - - Specifies whether to use NUMA-aware memory allocation. - - When disabled, all affinity configurations are disabled as well. - - MOT engine assumes that all the available NUMA nodes have memory. If the machine has some special configuration in which some of the NUMA nodes have no memory, then the MOT engine initialization and hence the database server startup will fail. In such machines, it is recommended that this configuration value be set to false, in order to prevent startup failures and let the MOT engine to function normally without using NUMA-aware memory allocation. - -- **affinity_mode = fill-physical-first** - - Configures the affinity mode of threads for the user session and internal MOT tasks. - - When a thread pool is used, this value is ignored for user sessions, as their affinity is governed by the thread pool. However, it is still used for internal MOT tasks. - - Valid values are **fill-socket-first**, **equal-per-socket**, **fill-physical-first** and **none** - - - - **Fill-socket-first** attaches threads to cores in the same socket until the socket is full and then moves to the next socket. - - **Equal-per-socket** spreads threads evenly among all sockets. - - **Fill-physical-first** attaches threads to physical cores in the same socket until all physical cores are employed and then moves to the next socket. When all physical cores are used, then the process begins again with hyper-threaded cores. - - **None** disables any affinity configuration and lets the system scheduler determine on which core each thread is scheduled to run. - -- **lazy_load_chunk_directory = true** - - Configures the chunk directory mode that is used for memory chunk lookup. - - **Lazy** mode configures the chunk directory to load parts of it on demand, thus reducing the initial memory footprint (from 1 GB to 1 MB approximately). However, this may result in minor performance penalties and errors in extreme conditions of memory distress. In contrast, using a **non-lazy** chunk directory allocates an additional 1 GB of initial memory, produces slightly higher performance and ensures that chunk directory errors are avoided during memory distress. - -- **reserve_memory_mode = virtual** - - Configures the memory reservation mode (either **physical** or **virtual**). - - Whenever memory is allocated from the kernel, this configuration value is consulted to determine whether the allocated memory is to be resident (**physical**) or not (**virtual**). This relates primarily to preallocation, but may also affect runtime allocations. For **physical** reservation mode, the entire allocated memory region is made resident by forcing page faults on all pages spanned by the memory region. Configuring **virtual** memory reservation may result in faster memory allocation (particularly during preallocation), but may result in page faults during the initial access (and thus may result in a slight performance hit) and more sever errors when physical memory is unavailable. In contrast, physical memory allocation is slower, but later access is both faster and guaranteed. - -- **store_memory_policy = compact** - - Configures the memory storage policy (**compact** or **expanding**). - - When **compact** policy is defined, unused memory is released back to the kernel, until the lower memory limit is reached (see **min_mot_memory** below). In **expanding** policy, unused memory is stored in the MOT engine for later reuse. A **compact** storage policy reduces the memory footprint of the MOT engine, but may occasionally result in minor performance degradation. In addition, it may result in unavailable memory during memory distress. In contrast, **expanding** mode uses more memory, but results in faster memory allocation and provides a greater guarantee that memory can be re-allocated after being de-allocated. - -- **chunk_alloc_policy = auto** - - Configures the chunk allocation policy for global memory. - - MOT memory is organized in chunks of 2 MB each. The source NUMA node and the memory layout of each chunk affect the spread of table data among NUMA nodes, and therefore can significantly affect the data access time. When allocating a chunk on a specific NUMA node, the allocation policy is consulted. - - Available values are **auto**, **local**, **page-interleaved**, **chunk-interleaved** and **native** - - - - **Auto** policy selects a chunk allocation policy based on the current hardware. - - **Local** policy allocates each chunk on its respective NUMA node. - - **Page-interleaved** policy allocates chunks that are composed of interleaved memory 4-kilobyte pages from all NUMA nodes. - - **Chunk-interleaved** policy allocates chunks in a round robin fashion from all NUMA nodes. - - **Native** policy allocates chunks by calling the native system memory allocator. - -- **chunk_prealloc_worker_count = 8** - - Configures the number of workers per NUMA node participating in memory preallocation. - -- **max_mot_global_memory = 80%** - - Configures the maximum memory limit for the global memory of the MOT engine. - - Specifying a percentage value relates to the total defined by **max_process_memory** configured in **postgresql.conf**. - - The MOT engine memory is divided into global (long-term) memory that is mainly used to store user data and local (short-term) memory that is mainly used by user sessions for local needs. - - Any attempt to allocate memory beyond this limit is denied and an error is reported to the user. Ensure that the sum of **max_mot_global_memory** and **max_mot_local_memory** do not exceed the **max_process_memory** configured in **postgresql.conf**. - -- **min_mot_global_memory = 0 MB** - - Configures the minimum memory limit for the global memory of the MOT engine. - - Specifying a percentage value relates to the total defined by the **max_process_memory** configured in **postgresql.conf**. - - This value is used for the preallocation of memory during startup, as well as to ensure that a minimum amount of memory is available for the MOT engine during its normal operation. When using **compact** storage policy (see **store_memory_policy** above), this value designates the lower limit under which memory is not released back to the kernel, but rather kept in the MOT engine for later reuse. - -- **max_mot_local_memory = 15%** - - Configures the maximum memory limit for the local memory of the MOT engine. - - Specifying a percentage value relates to the total defined by the **max_process_memory** configured in **postgresql.conf**. - - MOT engine memory is divided into global (long-term) memory that is mainly used to store user data and local (short-term) memory that is mainly used by user session for local needs. - - Any attempt to allocate memory beyond this limit is denied and an error is reported to the user. Ensure that the sum of **max_mot_global_memory** and **max_mot_local_memory** do not exceed the **max_process_memory** configured in **postgresql.conf**. - -- **min_mot_local_memory = 0 MB** - - Configures the minimum memory limit for the local memory of the MOT engine. - - Specifying a percentage value relates to the total defined by the **max_process_memory** configured in **postgresql.conf**. - - This value is used for preallocation of memory during startup, as well as to ensure that a minimum amount of memory is available for the MOT engine during its normal operation. When using compact storage policy (see **store_memory_policy** above), this value designates the lower limit under which memory is not released back to the kernel, but rather kept in the MOT engine for later reuse. - -- **max_mot_session_memory = 0 MB** - - Configures the maximum memory limit for a single session in the MOT engine. - - Typically, sessions in the MOT engine can allocate as much local memory as needed, so long as the local memory limit is not exceeded. To prevent a single session from taking too much memory, and thereby denying memory from other sessions, this configuration item is used to restrict small session-local memory allocations (up to 1,022 KB). - - Make sure that this configuration item does not affect large or huge session-local memory allocations. - - A value of zero denotes no restriction on any session-local small allocations per session, except for the restriction arising from the local memory allocation limit configured by **max_mot_local_memory**. - - Note: Percentage values cannot be set for this configuration item. - -- **min_mot_session_memory = 0 MB** - - Configures the minimum memory reservation for a single session in the MOT engine. - - This value is used to preallocate memory during session creation, as well as to ensure that a minimum amount of memory is available for the session to perform its normal operation. - - Note: Percentage values cannot be set for this configuration item. - -- **session_large_buffer_store_size = 0 MB** - - Configures the large buffer store for sessions. - - When a user session executes a query that requires a lot of memory (for example, when using many rows), the large buffer store is used to increase the certainty level that such memory is available and to serve this memory request more quickly. Any memory allocation for a session exceeding 1,022 KB is considered as a large memory allocation. If the large buffer store is not used or is depleted, such allocations are treated as huge allocations that are served directly from the kernel. - - Note: Percentage values cannot be set for this configuration item. - -- **session_large_buffer_store_max_object_size = 0 MB** - - Configures the maximum object size in the large allocation buffer store for sessions. - - Internally, the large buffer store is divided into objects of varying sizes. This value is used to set an upper limit on objects originating from the large buffer store, as well as to determine the internal division of the buffer store into objects of various size. - - This size cannot exceed 1⁄8 of the **session_large_buffer_store_size**. If it does, it is adjusted to the maximum possible. - - Note: Percentage values cannot be set for this configuration item. - -- **session_max_huge_object_size = 1 GB** - - Configures the maximum size of a single huge memory allocation made by a session. - - Huge allocations are served directly from the kernel and therefore are not guaranteed to succeed. - - This value also pertains to global (meaning not session-related) memory allocations. - - Note: Percentage values cannot be set for this configuration item. - -### GARBAGE COLLECTION (MOT) - -- **enable_gc = true** - - Specifies whether to use the Garbage Collector (GC). - -- **reclaim_threshold = 512 KB** - - Configures the memory threshold for the garbage collector. - - Each session manages its own list of to-be-reclaimed objects and performs its own garbage collection during transaction commitment. This value determines the total memory threshold of objects waiting to be reclaimed, above which garbage collection is triggered for a session. - - In general, the trade-off here is between un-reclaimed objects vs garbage collection frequency. Setting a low value keeps low levels of un-reclaimed memory, but causes frequent garbage collection that may affect performance. Setting a high value triggers garbage collection less frequently, but results in higher levels of un-reclaimed memory. This setting is dependent upon the overall workload. - -- **reclaim_batch_size = 8000** - - Configures the batch size for garbage collection. - - The garbage collector reclaims memory from objects in batches, in order to restrict the number of objects being reclaimed in a single garbage collection pass. The intent of this approach is to minimize the operation time of a single garbage collection pass. - -- **high_reclaim_threshold = 8 MB** - - Configures the high memory threshold for garbage collection. - - Because garbage collection works in batches, it is possible that a session may have many objects that can be reclaimed, but which were not. In such situations, in order to prevent garbage collection lists from becoming too bloated, this value is used to continue reclaiming objects within a single pass, even though that batch size limit has been reached, until the total size of the still-waiting-to-be-reclaimed objects is less than this threshold, or there are no more objects eligible for reclamation. - -### JIT (MOT) - -- **enable_mot_codegen = true** - - Specifies whether to use JIT query compilation and execution for planned queries. - - JIT query execution enables JIT-compiled code to be prepared for a prepared query during its planning phase. The resulting JIT-compiled function is executed whenever the prepared query is invoked. JIT compilation usually takes place in the form of LLVM. On platforms where LLVM is not natively supported, MOT provides a software-based fallback called Tiny Virtual Machine (TVM). - -- **force_mot_pseudo_codegen = false** - - Specifies whether to use TVM (pseudo-LLVM) even though LLVM is supported on the current platform. - - On platforms where LLVM is not natively supported, MOT automatically defaults to TVM. - - On platforms where LLVM is natively supported, LLVM is used by default. This configuration item enables the use of TVM for JIT compilation and execution on platforms on which LLVM is supported. - -- **enable_mot_codegen_print = false** - - Specifies whether to print emitted LLVM/TVM IR code for JIT-compiled queries. - -- **mot_codegen_limit = 100** - - Limits the number of JIT queries allowed per user session. - -### Default mot.conf - -The minimum settings and configuration specify to point the **postgresql.conf** file to the location of the **mot.conf** file - - -``` -postgresql.conf -mot_config_file = '/tmp/gauss/mot.conf' -``` - -Ensure that the value of the max_process_memory setting is sufficient to include the global (data and index) and local (sessions) memory of MOT tables. - -The default content of **mot.conf** is sufficient to get started. The settings can be optimized later. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/4-mot-usage.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/4-mot-usage.md deleted file mode 100644 index 61762a95..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/4-mot-usage.md +++ /dev/null @@ -1,506 +0,0 @@ ---- -title: MOT Usage -summary: MOT Usage -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Usage - -Using MOT tables is quite simple and is described in the few short sections below. - -MogDB enables an application to use of MOT tables and standard disk-based tables. You can use MOT tables for your most active, high-contention and throughput-sensitive application tables or you can use MOT tables for all your application's tables. - -The following commands describe how to create MOT tables and how to convert existing disk-based tables into MOT tables in order to accelerate an application's database-related performance. MOT is especially beneficial when applied to tables that have proven to be bottlenecks. - -The following is a simple overview of the tasks related to working with MOT tables: - -- Granting User Permissions -- Creating/Dropping an MOT Table -- Creating an Index for an MOT Table -- Converting a Disk Table into an MOT Table -- Query Native Compilation -- Retrying an Aborted Transaction -- MOT External Support Tools -- MOT SQL Coverage and Limitations - -## Granting User Permissions - -The following describes how to assign a database user permission to access the MOT storage engine. This is performed only once per database user, and is usually done during the initial configuration phase. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The granting of user permissions is required because MOT is integrated into the MogDB database by using and extending the Foreign Data Wrapper (FDW) mechanism, which requires granting user access permissions. - -To enable a specific user to create and access MOT tables (DDL, DML, SELECT) - - -Run the following statement only once - - -```sql -GRANT USAGE ON FOREIGN SERVER mot_server TO ; -``` - -All keywords are not case sensitive. - -## Creating/Dropping an MOT Table - -Creating a Memory Optimized Table (MOT) is very simple. Only the create and drop table statements in MOT differ from the statements for disk-based tables in MogDB. The syntax of **all other** commands for SELECT, DML and DDL are the same for MOT tables as for MogDB disk-based tables. - -- To create an MOT table - - - ```sql - create FOREIGN table test(x int) [server mot_server]; - ``` - -- Always use the FOREIGN keyword to refer to MOT tables. - -- The [server mot_server] part is optional when creating an MOT table because MOT is an integrated engine, not a separate server. - -- The above is an extremely simple example creating a table named **test** with a single integer column named **x**. In the next section (**Creating an Index**) a more realistic example is provided. - -- MOT tables cannot be created if incremental checkpoint is enabled in postgresql.conf. So please set enable_incremental_checkpoint to off before creating the MOT. - -- To drop an MOT table named test - - - ```sql - drop FOREIGN table test; - ``` - -For a description of the limitations of supported features for MOT tables, such as data types, see the **MOT SQL Coverage and Limitations** section. - -## Creating an Index for an MOT Table - -Standard PostgreSQL create and drop index statements are supported. - -For example - - -```sql -create index text_index1 on test(x) ; -``` - -The following is a complete example of creating an index for the ORDER table in a TPC-C workload - - -```sql -create FOREIGN table bmsql_oorder ( - o_w_id integer not null, - o_d_id integer not null, - o_id integer not null, - o_c_id integer not null, - o_carrier_id integer, - o_ol_cnt integer, - o_all_local integer, - o_entry_d timestamp, - primarykey (o_w_id, o_d_id, o_id) -); - -create index bmsql_oorder_index1 on bmsql_oorder(o_w_id, o_d_id, o_c_id, o_id) ; -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** There is no need to specify the **FOREIGN** keyword before the MOT table name, because it is only created for create and drop table commands. - -For MOT index limitations, see the Index subsection under the _SQL Coverage and Limitations_ section. - -## Converting a Disk Table into an MOT Table - -The direct conversion of disk tables into MOT tables is not yet possible, meaning that no ALTER TABLE statement yet exists that converts a disk-based table into an MOT table. - -The following describes how to manually perform a few steps in order to convert a disk-based table into an MOT table, as well as how the **gs_dump** tool is used to export data and the **gs_restore** tool is used to import data. - -### Prerequisite Check - -Check that the schema of the disk table to be converted into an MOT table contains all required columns. - -Check whether the schema contains any unsupported column data types, as described in the _Unsupported Data Types_ section. - -If a specific column is not supported, then it is recommended to first create a secondary disk table with an updated schema. This schema is the same as the original table, except that all the unsupported types have been converted into supported types. - -Afterwards, use the following script to export this secondary disk table and then import it into an MOT table. - -### Converting - -To covert a disk-based table into an MOT table, perform the following - - -1. Suspend application activity. -2. Use **gs_dump** tool to dump the table’s data into a physical file on disk. Make sure to use the **data only**. -3. Rename your original disk-based table. -4. Create an MOT table with the same table name and schema. Make sure to use the create FOREIGN keyword to specify that it will be an MOT table. -5. Use **gs_restore** to load/restore data from the disk file into the database table. -6. Visually/manually verify that all the original data was imported correctly into the new MOT table. An example is provided below. -7. Resume application activity. - -**IMPORTANT Note** **-** In this way, since the table name remains the same, application queries and relevant database stored-procedures will be able to access the new MOT table seamlessly without code changes. Please note that MOT does not currently support cross-engine multi-table queries (such as by using Join, Union and sub-query) and cross-engine multi-table transactions. Therefore, if an original table is accessed somewhere in a multi-table query, stored procedure or transaction, you must either convert all related disk-tables into MOT tables or alter the relevant code in the application or the database. - -### Conversion Example - -Let's say that you have a database name **benchmarksql** and a table named **customer** (which is a disk-based table) to be migrated it into an MOT table. - -To migrate the customer table into an MOT table, perform the following - - -1. Check your source table column types. Verify that all types are supported by MOT, refer to section *Unsupported Data Types*. - - ```sql - benchmarksql-# \d+ customer - Table "public.customer" - Column | Type | Modifiers | Storage | Stats target | Description - --------+---------+-----------+---------+--------------+------------- - x | integer | | plain | | - y | integer | | plain | | - Has OIDs: no - Options: orientation=row, compression=no - ``` - -2. Check your source table data. - - ```sql - benchmarksql=# select * from customer; - x | y - ---+--- - 1 | 2 - 3 | 4 - (2 rows) - ``` - -3. Dump table data only by using **gs_dump**. - - ```sql - $ gs_dump -Fc benchmarksql -a --table customer -f customer.dump - gs_dump[port='15500'][benchmarksql][2020-06-04 16:45:38]: dump database benchmarksql successfully - gs_dump[port='15500'][benchmarksql][2020-06-04 16:45:38]: total time: 332 ms - ``` - -4. Rename the source table name. - - ```sql - benchmarksql=# alter table customer rename to customer_bk; - ALTER TABLE - ``` - -5. Create the MOT table to be exactly the same as the source table. - - ```sql - benchmarksql=# create foreign table customer (x int, y int); - CREATE FOREIGN TABLE - benchmarksql=# select * from customer; - x | y - ---+--- - (0 rows) - ``` - -6. Import the source dump data into the new MOT table. - - ```sql - $ gs_restore -C -d benchmarksql customer.dump - restore operation successful - total time: 24 ms - Check that the data was imported successfully. - benchmarksql=# select * from customer; - x | y - ---+--- - 1 | 2 - 3 | 4 - (2 rows) - - benchmarksql=# \d - List of relations - Schema | Name | Type | Owner | Storage - --------+-------------+---------------+--------+---------------------------------- - public | customer | foreign table | aharon | - public | customer_bk | table | aharon | {orientation=row,compression=no} - (2 rows) - ``` - -## Query Native Compilation - -An additional feature of MOT is the ability to prepare and parse *pre-compiled full queries* in a native format (using a PREPARE statement) before they are needed for execution. - -This native format can later be executed (using an EXECUTE command) more efficiently. This type of execution is much quicker because the native format bypasses multiple database processing layers during execution and thus enables better performance. - -This division of labor avoids repetitive parse analysis operations. In this way, queries and transaction statements are executed in an interactive manner. This feature is sometimes called *Just-In-Time (JIT)* query compilation. - -### Query Compilation - PREPARE Statement - -To use MOT’s native query compilation, call the PREPARE client statement before the query is executed. This instructs MOT to pre-compile the query and/or to pre-load previously pre-compiled code from a cache. - -The following is an example of PREPARE syntax in SQL - - -```sql -PREPARE name [ ( data_type [, ...] ) ] AS statement -``` - -PREPARE creates a prepared statement in the database server, which is a server-side object that can be used to optimize performance. - -### Execute Command - -When an EXECUTE command is subsequently issued, the prepared statement is parsed, analyzed, rewritten and executed. This division of labor avoids repetitive parse analysis operations, while enabling the execution plan to depend on specific provided setting values. - -The following is an example of how to invoke a PREPARE and then an EXECUTE statement in a Java application. - -```sql -conn = DriverManager.getConnection(connectionUrl, connectionUser, connectionPassword); - -// Example 1: PREPARE without bind settings -String query = "SELECT * FROM getusers"; -PreparedStatement prepStmt1 = conn.prepareStatement(query); -ResultSet rs1 = pstatement.executeQuery()) -while (rs1.next()) {…} - -// Example 2: PREPARE with bind settings -String sqlStmt = "SELECT * FROM employees where first_name=? and last_name like ?"; -PreparedStatement prepStmt2 = conn.prepareStatement(sqlStmt); -prepStmt2.setString(1, "Mark"); // first name "Mark" -prepStmt2.setString(2, "%n%"); // last name contains a letter "n" -ResultSet rs2 = prepStmt2.executeQuery()) -while (rs2.next()) {…} -``` - -The following describes the supported and unsupported features of MOT compilation. - -### Supported Queries for Lite Execution - -The following query types are suitable for lite execution - - -- Simple point queries - - - SELECT (including SELECT for UPDATE) - - UPDATE - - DELETE -- INSERT query -- Range UPDATE queries that refer to a full prefix of the primary key -- Range SELECT queries that refer to a full prefix of the primary key -- JOIN queries where one or both parts collapse to a point query -- JOIN queries that refer to a full prefix of the primary key in each joined table - -### Unsupported Queries for Lite Execution - -Any special query attribute disqualifies it for Lite Execution. In particular, if any of the following conditions apply, then the query is declared as unsuitable for Lite Execution. You may refer to the Unsupported Queries for Native Compilation and Lite Execution section for more information. - -It is important to emphasize that in case a query statement does not fit - -native compilation and lite execution, no error is reported to the client and the query will still be executed in a normal and standard manner. - -For more information about MOT native compilation capabilities, see either the section about Query Native Compilation or a more detailed information in the Query Native Compilation (JIT) section. - -## Retrying an Aborted Transaction - -In Optimistic Concurrency Control (OCC) (such as the one used by MOT) during a transaction (using any isolation level) no locks are placed on a record until the COMMIT phase. This is a powerful advantage that significantly increases performance. Its drawback is that an update may fail if another session attempts to update the same record. This results in an entire transaction that must be aborted. These so called *Update Conflicts* are detected by MOT at the commit time by a version checking mechanism. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** A similar abort happens on engines using pessimistic concurrency control, such as standard PG and the MogDB disk-based tables, when SERIALIZABLE or REPEATABLE-READ isolation level are used. - -Such update conflicts are quite rare in common OLTP scenarios and are especially rare in our experience with MOT. However, because there is still a chance that they may happen, developers should consider resolving this issue using transaction retry code. - -The following describes how to retry a table command after multiple sessions attempt to update the same table simultaneously. You may refer to the OCC vs 2PL Differences by Example section for more detailed information. The following example is taken from TPC-C payment transaction. - -```java -int commitAborts = 0; - -while (commitAborts < RETRY_LIMIT) { - - try { - stmt =db.stmtPaymentUpdateDistrict; - stmt.setDouble(1, 100); - stmt.setInt(2, 1); - stmt.setInt(3, 1); - stmt.executeUpdate(); - - db.commit(); - - break; - } - catch (SQLException se) { - if(se != null && se.getMessage().contains("could not serialize access due to concurrent update")) { - log.error("commmit abort = " + se.getMessage()); - commitAborts++; - continue; - }else { - db.rollback(); - } - - break; - } -} -``` - -## MOT External Support Tools - -The following external MogDB tools have been modified in order to support MOT. Make sure to use the most recent version of each. An overview describing MOT-related usage is provided below. For a full description of these tools and their usage, refer to the MogDB Tools Reference document. - -### gs_ctl (Full and Incremental) - -This tool is used to create a standby server from a primary server, as well as to synchronize a server with another copy of the same server after their timelines have diverged. - -At the end of the operation, the latest MOT checkpoint is fetched by the tool, taking into consideration the **checkpoint_dir** configuration setting value. - -The checkpoint is fetched from the source server's **checkpoint_dir** to the destination server's **checkpoint_dir**. - -Currently, MOT does not support an incremental checkpoint. Therefore, the gs_ctl incremental build does not work in an incremental manner for MOT, but rather in FULL mode. The Postgres (disk-tables) incremental build can still be done incrementally. - -### gs_basebackup - -gs_basebackup is used to prepare base backups of a running server, without affecting other database clients. - -The MOT checkpoint is fetched at the end of the operation as well. However, the checkpoint's location is taken from **checkpoint_dir** in the source server and is transferred to the data directory of the source in order to back it up correctly. - -### gs_dump - -gs_dump is used to export the database schema and data to a file. It also supports MOT tables. - -### gs_restore - -gs_restore is used to import the database schema and data from a file. It also supports MOT tables. - -## MOT SQL Coverage and Limitations - -MOT design enables almost complete coverage of SQL and future feature sets. For example, standard Postgres SQL is mostly supported, as well common database features, such as stored procedures and user defined functions. - -The following describes the various types of SQL coverages and limitations - - -### Unsupported Features - -The following features are not supported by MOT - - -- Engine Interop - No cross-engine (Disk+MOT) queries, views or transactions. Planned for 2021. -- MVCC, Isolation - No snapshot/serializable isolation. Planned for 2021. -- Native Compilation (JIT) - Limited SQL coverage. Also, JIT compilation of stored procedures is not supported. -- LOCAL memory is limited to 1 GB. A transaction can only change data of less than 1 GB. -- Capacity (Data+Index) is limited to available memory. Anti-caching + Data Tiering will be available in the future. -- No full-text search index. -- Do not support Logical copy. - -In addition, the following are detailed lists of various general limitations of MOT tables, MOT indexes, Query and DML syntax and the features and limitations of Query Native Compilation. - -### MOT Table Limitations - -The following lists the functionality limitations of MOT tables - - -- Partitioning -- AES encryption, row-level access control, dynamic data masking -- Stream operations -- User-defined types -- Sub-transactions -- DML triggers -- DDL triggers -- Collations other than "C" or "POSIX" - -### Unsupported Table DDLs - -- Alter table -- Create table, like including -- Create table as select -- Partition by range -- Create table with no-logging clause -- DEFERRABLE primary key -- Reindex -- Tablespace -- Create schema with subcommands - -### Unsupported Data Types - -- UUID -- User-Defined Type (UDF) -- Array data type -- NVARCHAR2(n) -- Clob -- Name -- Blob -- Raw -- Path -- Circle -- Reltime -- Bit varying(10) -- Tsvector -- Tsquery -- JSON -- Box -- Text -- Line -- Point -- LSEG -- POLYGON -- INET -- CIDR -- MACADDR -- Smalldatetime -- BYTEA -- Bit -- Varbit -- OID -- Money -- Any unlimited varchar/character varying -- HSTORE -- XML -- Int16 -- Abstime -- Tsrange -- Tstzrange -- Int8range -- Int4range -- Numrange -- Daterange -- HLL - -### UnsupportedIndex DDLs and Index - -- Create index on decimal/numeric - -- Create index on nullable columns - -- Create index, index per table > 9 - -- Create index on key size > 256 - - The key size includes the column size in bytes + a column additional size, which is an overhead required to maintain the index. The below table lists the column additional size for different column types. - - Additionally, in case of non-unique indexes an extra 8 bytes is required. - - Thus, the following pseudo code calculates the **key size**: - - ```java - keySize =0; - - for each (column in index){ - keySize += (columnSize + columnAddSize); - } - if (index is non_unique) { - keySize += 8; - } - ``` - - | Column Type | Column Size | Column Additional Size | - | :---------- | :---------- | :--------------------- | - | varchar | N | 4 | - | tinyint | 1 | 1 | - | smallint | 2 | 1 | - | int | 4 | 1 | - | bigint | 8 | 1 | - | float | 4 | 2 | - | float8 | 8 | 3 | - - Types that are not specified in above table, the column additional size is zero (for instance timestamp). - -### Unsupported DMLs - -- Merge into -- Select into -- Lock table -- Copy from table -- Upsert - -### Unsupported Queries for Native Compilation and Lite Execution - -- The query refers to more than two tables -- The query has any one of the following attributes - - - Aggregation on non-primitive types - - Window functions - - Sub-query sub-links - - Distinct-ON modifier (distinct clause is from DISTINCT ON) - - Recursive (WITH RECURSIVE was specified) - - Modifying CTE (has INSERT/UPDATE/DELETE in WITH) - -In addition, the following clauses disqualify a query from lite execution - - -- Returning list -- Group By clause -- Grouping sets -- Having clause -- Windows clause -- Distinct clause -- Sort clause that does not conform to native index order -- Set operations -- Constraint dependencies diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/5-mot-administration.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/5-mot-administration.md deleted file mode 100644 index 5b2171fe..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/5-mot-administration.md +++ /dev/null @@ -1,419 +0,0 @@ ---- -title: MOT Administration -summary: MOT Administration -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Administration - -The following describes various MOT administration topics. - -## MOT Durability - -Durability refers to long-term data protection (also known as *disk persistence*). Durability means that stored data does not suffer from any kind of degradation or corruption, so that data is never lost or compromised. Durability ensures that data and the MOT engine are restored to a consistent state after a planned shutdown (for example, for maintenance) or an unplanned crash (for example, a power failure). - -Memory storage is volatile, meaning that it requires power to maintain the stored information. Disk storage, on the other hand, is non-volatile, meaning that it does not require power to maintain stored information, thus, it can survive a power shutdown. MOT uses both types of storage - it has all data in memory, while persisting transactional changes to disk **MOT Durability** and by maintaining frequent periodic **MOT Checkpoints** in order to ensure data recovery in case of shutdown. - -The user must ensure sufficient disk space for the logging and Checkpointing operations. A separated drive can be used for the Checkpoint to improve performance by reducing disk I/O load. - -You may refer to the **MOT Key Technologies** section for an overview of how durability is implemented in the MOT engine. - -**To configure durability -** - -To ensure strict consistency, configure the synchronous_commit parameter to **On** in the postgres.conf configuration file. - -**MOTs WAL Redo Log and Checkpoints enable durability, as described below -** - -### MOT Logging - WAL Redo Log - -To ensure Durability, MOT is fully integrated with the MogDB's Write-Ahead Logging (WAL) mechanism, so that MOT persists data in WAL records using MogDB's XLOG interface. This means that every addition, update, and deletion to an MOT table’s record is recorded as an entry in the WAL. This ensures that the most current data state can be regenerated and recovered from this non-volatile log. For example, if three new rows were added to a table, two were deleted and one was updated, then six entries would be recorded in the log. - -MOT log records are written to the same WAL as the other records of MogDB disk-based tables. - -MOT only logs an operation at the transaction commit phase. - -MOT only logs the updated delta record in order to minimize the amount of data written to disk. - -During recovery, data is loaded from the last known or a specific Checkpoint; and then the WAL Redo log is used to complete the data changes that occur from that point forward. - -The WAL (Redo Log) retains all the table row modifications until a Checkpoint is performed (as described above). The log can then be truncated in order to reduce recovery time and to save disk space. - -**Note** - In order to ensure that the log IO device does not become a bottleneck, the log file must be placed on a drive that has low latency. - -### MOT Logging Types - -Two synchronous transaction logging options and one asynchronous transaction logging option are supported (these are also supported by the standard MogDB disk engine). MOT also supports synchronous Group Commit logging with NUMA-awareness optimization, as described below. - -According to your configuration, one of the following types of logging is implemented - - -- **Synchronous Redo Logging** - - The **Synchronous Redo Logging** option is the simplest and most strict redo logger. When a transaction is committed by a client application, the transaction redo entries are recorded in the WAL (Redo Log), as follows - - - 1. While a transaction is in progress, it is stored in the MOT's memory. - 2. After a transaction finishes and the client application sends a Commit command, the transaction is locked and then written to the WAL Redo Log on the disk. This means that while the transaction log entries are being written to the log, the client application is still waiting for a response. - 3. As soon as the transaction's entire buffer is written to the log, the changes to the data in memory take place and then the transaction is committed. After the transaction has been committed, the client application is notified that the transaction is complete. - - **Summary** - - The **Synchronous Redo Logging** option is the safest and most strict because it ensures total synchronization of the client application and the WAL Redo log entries for each transaction as it is committed; thus ensuring total durability and consistency with absolutely no data loss. This logging option prevents the situation where a client application might mark a transaction as successful, when it has not yet been persisted to disk. - - The downside of the **Synchronous Redo Logging** option is that it is the slowest logging mechanism of the three options. This is because a client application must wait until all data is written to disk and because of the frequent disk writes (which typically slow down the database). - -- **Group Synchronous Redo Logging** - - The **Group Synchronous Redo Logging** option is very similar to the **Synchronous Redo Logging** option, because it also ensures total durability with absolutely no data loss and total synchronization of the client application and the WAL (Redo Log) entries. The difference is that the **Group Synchronous Redo Logging** option writes _groups of transaction_r edo entries to the WAL Redo Log on the disk at the same time, instead of writing each and every transaction as it is committed. Using Group Synchronous Redo Logging reduces the amount of disk I/Os and thus improves performance, especially when running a heavy workload. - - The MOT engine performs synchronous Group Commit logging with Non-Uniform Memory Access (NUMA)-awareness optimization by automatically grouping transactions according to the NUMA socket of the core on which the transaction is running. - - You may refer to the **NUMA Awareness Allocation and Affinity** section for more information about NUMA-aware memory access. - - When a transaction commits, a group of entries are recorded in the WAL Redo Log, as follows - - - 1. While a transaction is in progress, it is stored in the memory. The MOT engine groups transactions in buckets according to the NUMA socket of the core on which the transaction is running. This means that all the transactions running on the same socket are grouped together and that multiple groups will be filling in parallel according to the core on which the transaction is running. - - Writing transactions to the WAL is more efficient in this manner because all the buffers from the same socket are written to disk together. - - **Note** - Each thread runs on a single core/CPU which belongs to a single socket and each thread only writes to the socket of the core on which it is running. - - 2. After a transaction finishes and the client application sends a Commit command, the transaction redo log entries are serialized together with other transactions that belong to the same group. - - 3. After the configured criteria are fulfilled for a specific group of transactions (quantity of committed transactions or timeout period as describes in the **REDO LOG (MOT)** section), the transactions in this group are written to the WAL on the disk. This means that while these log entries are being written to the log, the client applications that issued the commit are waiting for a response. - - 4. As soon as all the transaction buffers in the NUMA-aware group have been written to the log, all the transactions in the group are performing the necessary changes to the memory store and the clients are notified that these transactions are complete. - - **Summary** - - The **Group Synchronous Redo Logging** option is a an extremely safe and strict logging option because it ensures total synchronization of the client application and the WAL Redo log entries; thus ensuring total durability and consistency with absolutely no data loss. This logging option prevents the situation where a client application might mark a transaction as successful, when it has not yet been persisted to disk. - - On one hand this option has fewer disk writes than the **Synchronous Redo Logging** option, which may mean that it is faster. The downside is that transactions are locked for longer, meaning that they are locked until after all the transactions in the same NUMA memory have been written to the WAL Redo Log on the disk. - - The benefits of using this option depend on the type of transactional workload. For example, this option benefits systems that have many transactions (and less so for systems that have few transactions, because there are few disk writes anyway). - -- **Asynchronous Redo Logging** - - The **Asynchronous Redo Logging** option is the fastest logging method, However, it does not ensure no data loss, meaning that some data that is still in the buffer and was not yet written to disk may get lost upon a power failure or database crash. When a transaction is committed by a client application, the transaction redo entries are recorded in internal buffers and written to disk at preconfigured intervals. The client application does not wait for the data being written to disk. It continues to the next transaction. This is what makes asynchronous redo logging the fastest logging method. - - When a transaction is committed by a client application, the transaction redo entries are recorded in the WAL Redo Log, as follows - - - 1. While a transaction is in progress, it is stored in the MOT's memory. - 2. After a transaction finishes and the client application sends a Commit command, the transaction redo entries are written to internal buffers, but are not yet written to disk. Then changes to the MOT data memory take place and the client application is notified that the transaction is committed. - 3. At a preconfigured interval, a redo log thread running in the background collects all the buffered redo log entries and writes them to disk. - - **Summary** - - The Asynchronous Redo Logging option is the fastest logging option because it does not require the client application to wait for data being written to disk. In addition, it groups many transactions redo entries and writes them together, thus reducing the amount of disk I/Os that slow down the MOT engine. - - The downside of the Asynchronous Redo Logging option is that it does not ensure that data will not get lost upon a crash or failure. Data that was committed, but was not yet written to disk, is not durable on commit and thus cannot be recovered in case of a failure. The Asynchronous Redo Logging option is most relevant for applications that are willing to sacrifice data recovery (consistency) over performance. - -### Configuring Logging - -Two synchronous transaction logging options and one asynchronous transaction logging option are supported by the standard MogDB disk engine. - -To configure logging - - -1. The determination of whether synchronous or asynchronous transaction logging is performed is configured in the synchronous_commit **(On = Synchronous)** parameters in the postgres.conf configuration file. - -If a synchronous mode of transaction logging has been selected (synchronous_commit = **On**, as described above), then the enable_group_commit parameter in the mot.conf configuration file determines whether the **Group Synchronous Redo Logging** option or the **Synchronous Redo Logging** option is used. For **Group Synchronous Redo Logging**, you must also define in the mot.conf file which of the following thresholds determine when a group of transactions is recorded in the WAL - -- group_commit_size **-** The quantity of committed transactions in a group. For example, **16** means that when 16 transactions in the same group have been committed by a client application, then an entry is written to disk in the WAL Redo Log for all 16 transactions. - -- group_commit_timeout - A timeout period in ms. For example, **10** means that after 10 ms, an entry is written to disk in the WAL Redo Log for each of the transactions in the same group that have been committed by their client application in the last 10 ms. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** You may refer to the **REDO LOG (MOT)** for more information about configuration settings. - -### MOT Checkpoints - -A Checkpoint is the point in time at which all the data of a table's rows is saved in files on persistent storage in order to create a full durable database image. It is a snapshot of the data at a specific point in time. - -A Checkpoint is required in order to reduce a database's recovery time by shortening the quantity of WAL (Redo Log) entries that must be replayed in order to ensure durability. Checkpoint's also reduce the storage space required to keep all the log entries. - -If there were no Checkpoints, then in order to recover a database, all the WAL redo entries would have to be replayed from the beginning of time, which could take days/weeks depending on the quantity of records in the database. Checkpoints record the current state of the database and enable old redo entries to be discarded. - -Checkpoints are essential during recovery scenarios (especially for a cold start). First, the data is loaded from the last known or a specific Checkpoint; and then the WAL is used to complete the data changes that occurred since then. - -For example - If the same table row is modified 100 times, then 100 entries are recorded in the log. When Checkpoints are used, then even if a specific table row was modified 100 times, it is recorded in the Checkpoint a single time. After the recording of a Checkpoint, recovery can be performed on the basis of that Checkpoint and only the WAL Redo Log entries that occurred since the Checkpoint need be played. - -## MOT Recovery - -The main objective of MOT Recovery is to restore the data and the MOT engine to a consistent state after a planned shutdown (for example, for maintenance) or an unplanned crash (for example, after a power failure). - -MOT recovery is performed automatically with the recovery of the rest of the MogDB database and is fully integrated into MogDB recovery process (also called a *Cold Start*). - -MOT recovery consists of two stages - - -**Checkpoint Recovery -** First, data must be recovered from the latest Checkpoint file on disk by loading it into memory rows and creating indexes. - -**WAL Redo Log Recovery -** Afterwards, the recent data (which was not captured in the Checkpoint) must be recovered from the WAL Redo Log by replaying records that were added to the log since the Checkpoint that was used in the Checkpoint Recovery (described above). - -The WAL Redo Log recovery is managed and triggered by MogDB. - -- To configure recovery. - -- While WAL recovery is performed in a serial manner, the Checkpoint recovery can be configured to run in a multi-threaded manner (meaning in parallel by multiple workers). - -- Configure the **Checkpoint_recovery_workers** parameter in the **mot.conf** file, which is described in the **RECOVERY (MOT)** section. - -## MOT Replication and High Availability - -Since MOT is integrated into MogDB and uses/supports its replication and high availability, both synchronous and asynchronous replication are supported out of the box. - -The MogDB gs_ctl tool is used for availability control and to operate the cluster. This includes gs_ctl switchover, gs_ctl failover, gs_ctl build and so on. - -You may refer to the MogDB Tools Reference document for more information. - -- To configure replication and high availability. -- Refer to the relevant MogDB documentation. - -## MOT Memory Management - -For planning and finetuning, see the **MOT Memory and Storage Planning** and **MOT Configuration Settings** sections. - -## MOT Vacuum - -Use VACUUM for garbage collection and optionally to analyze a database, , as follows - - -- [PG] - - In Postgress (PG), the VACUUM reclaims storage occupied by dead tuples. In normal PG operation, tuples that are deleted or that are made obsolete by an update are not physically removed from their table. They remain present until a VACUUM is done. Therefore, it is necessary to perform a VACUUM periodically, especially on frequently updated tables. - -- [MOT Extension] - - MOT tables do not need a periodic VACUUM operation, since dead/empty tuples are re-used by new ones. MOT tables require VACUUM operations only when their size is significantly reduced and they do not expect to grow to their original size in the near future. - - For example, an application that periodically (for example, once in a week) performs a large deletion of a table/tables data while inserting new data takes days and does not necessarily require the same quantity of rows. In such cases, it makes sense to activate the VACUUM. - - The VACUUM operation on MOT tables is always transformed into a VACUUM FULL with an exclusive table lock. - -- Supported Syntax and Limitations - - Activation of the VACUUM operation is performed in a standard manner. - - ```sql - VACUUM [FULL | ANALYZE] [ table ]; - ``` - - Only the FULL and ANALYZE VACUUM options are supported. The VACUUM operation can only be performed on an entire MOT table. - - The following PG vacuum options are not supported: - - - FREEZE - - VERBOSE - - Column specification - - LAZY mode (partial table scan) - - Additionally, the following functionality is not supported - - - AUTOVACUUM - -## MOT Statistics - -Statistics are intended for performance analysis or debugging. It is uncommon to turn them ON in a production environment (by default, they are OFF). Statistics are primarily used by database developers and to a lesser degree by database users. - -There is some impact on performance, particularly on the server. Impact on the user is negligible. - -The statistics are saved in the database server log. The log is located in the data folder and named **postgresql-DATE-TIME.log**. - -Refer to **STATISTICS (MOT)** for detailed configuration options. - -## MOT Monitoring - -All syntax for monitoring of PG-based FDW tables is supported. This includes Table or Index sizes (as described below). In addition, special functions exist for monitoring MOT memory consumption, including MOT Global Memory, MOT Local Memory and a single client session. - -### Table and Index Sizes - -The size of tables and indexes can be monitored by querying pg_relation_size. - -For example - -**Data Size** - -```sql -select pg_relation_size('customer'); -``` - -**Index** - -```sql -select pg_relation_size('customer_pkey'); -``` - -### MOT GLOBAL Memory Details - -Check the size of MOT global memory, which includes primarily the data and indexes. - -```sql -select * from mot_global_memory_detail(); -``` - -Result - - -```sql -numa_node | reserved_size | used_size -----------------+----------------+------------- --1 | 194716368896 | 25908215808 -0 | 446693376 | 446693376 -1 | 452984832 | 452984832 -2 | 452984832 | 452984832 -3 | 452984832 | 452984832 -4 | 452984832 | 452984832 -5 | 364904448 | 364904448 -6 | 301989888 | 301989888 -7 | 301989888 | 301989888 -``` - -Where - - -- -1 is the total memory. -- 0..7 are NUMA memory nodes. - -### MOT LOCAL Memory Details - -Check the size of MOT local memory, which includes session memory. - -```sql -select * from mot_local_memory_detail(); -``` - -Result - - -```sql -numa_node | reserved_size | used_size -----------------+----------------+------------- --1 | 144703488 | 144703488 -0 | 25165824 | 25165824 -1 | 25165824 | 25165824 -2 | 18874368 | 18874368 -3 | 18874368 | 18874368 -4 | 18874368 | 18874368 -5 | 12582912 | 12582912 -6 | 12582912 | 12582912 -7 | 12582912 | 12582912 -``` - -Where - - -- -1 is the total memory. -- 0..7 are NUMA memory nodes. - -### Session Memory - -Memory for session management is taken from the MOT local memory. - -Memory usage by all active sessions (connections) is possible using the following query - - -```sql -select * from mot_session_memory_detail(); -``` - -Result - - -```sql -sessid | total_size | free_size | used_size -----------------------------------------+-----------+----------+---------- -1591175063.139755603855104 | 6291456 | 1800704 | 4490752 - -``` - -Legend - - -- **total_size -** is allocated for the session -- **free_size -** not in use -- **used_size -** In actual use - -The following query enables a DBA to determine the state of local memory used by the current session - - -```sql -select * from mot_session_memory_detail() - where sessid = pg_current_sessionid(); -``` - -Result - - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-administration-1.png) - -## MOT Error Messages - -Errors may be caused by a variety of scenarios. All errors are logged in the database server log file. In addition, user-related errors are returned to the user as part of the response to the query, transaction or stored procedure execution or to database administration action. - -- Errors reported in the Server log include - Function, Entity, Context, Error message, Error description and Severity. -- Errors reported to users are translated into standard PostgreSQL error codes and may consist of an MOT-specific message and description. - -The following lists the error messages, error descriptions and error codes. The error code is actually an internal code and not logged or returned to users. - -### Errors Written the Log File - -All errors are logged in the database server log file. The following lists the errors that are written to the database server log file and are **not** returned to the user. The log is located in the data folder and named **postgresql-DATE-TIME.log**. - -**Table 1** Errors Written Only to the Log File - -| Message in the Log | Error Internal Code | -| :---------------------------------- | :------------------------------- | -| Error code denoting success | MOT_NO_ERROR 0 | -| Out of memory | MOT_ERROR_OOM 1 | -| Invalid configuration | MOT_ERROR_INVALID_CFG 2 | -| Invalid argument passed to function | MOT_ERROR_INVALID_ARG 3 | -| System call failed | MOT_ERROR_SYSTEM_FAILURE 4 | -| Resource limit reached | MOT_ERROR_RESOURCE_LIMIT 5 | -| Internal logic error | MOT_ERROR_INTERNAL 6 | -| Resource unavailable | MOT_ERROR_RESOURCE_UNAVAILABLE 7 | -| Unique violation | MOT_ERROR_UNIQUE_VIOLATION 8 | -| Invalid memory allocation size | MOT_ERROR_INVALID_MEMORY_SIZE 9 | -| Index out of range | MOT_ERROR_INDEX_OUT_OF_RANGE 10 | -| Error code unknown | MOT_ERROR_INVALID_STATE 11 | - -### Errors Returned to the User - -The following lists the errors that are written to the database server log file and are returned to the user. - -MOT returns PG standard error codes to the envelope using a Return Code (RC). Some RCs cause the generation of an error message to the user who is interacting with the database. - -The PG code (described below) is returned internally by MOT to the database envelope, which reacts to it according to standard PG behavior. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** %s, %u and %lu in the message are replaced by relevant error information, such as query, table name or another information. - %s - String - %u - Number - %lu - Number - -**Table 2** Errors Returned to the User and Logged to the Log File - -| Short and Long Description Returned to the User | PG Code | Internal Error Code | -| :---------------------------------------------------- | :------------------------------ | :------------------------------ | -| Success.Denotes success | ERRCODE_SUCCESSFUL_COMPLETIONCOMPLETION | RC_OK = 0 | -| FailureUnknown error has occurred. | ERRCODE_FDW_ERROR | RC_ERROR = 1 | -| Unknown error has occurred.Denotes aborted operation. | ERRCODE_FDW_ERROR | RC_ABORT | -| Column definition of %s is not supported.Column type %s is not supported yet. | ERRCODE_INVALID_COLUMN_DEFINITION | RC_UNSUPPORTED_COL_TYPE | -| Column definition of %s is not supported.Column type Array of %s is not supported yet. | ERRCODE_INVALID_COLUMN_DEFINITION | RC_UNSUPPORTED_COL_TYPE_ARR | -| Column size %d exceeds max tuple size %u.Column definition of %s is not supported. | ERRCODE_FEATURE_NOT_SUPPORTED | RC_EXCEEDS_MAX_ROW_SIZE | -| Column name %s exceeds max name size %u.Column definition of %s is not supported. | ERRCODE_INVALID_COLUMN_DEFINITION | RC_COL_NAME_EXCEEDS_MAX_SIZE | -| Column size %d exceeds max size %u.Column definition of %s is not supported. | ERRCODE_INVALID_COLUMN_DEFINITION | RC_COL_SIZE_INVLALID | -| Cannot create table.Cannot add column %s; as the number of declared columns exceeds the maximum declared columns. | ERRCODE_FEATURE_NOT_SUPPORTED | RC_TABLE_EXCEEDS_MAX_DECLARED_COLS | -| Cannot create index.Total column size is greater than maximum index size %u. | ERRCODE_FDW_KEY_SIZE_EXCEEDS_MAX_ALLOWED | RC_INDEX_EXCEEDS_MAX_SIZE | -| Cannot create index.Total number of indexes for table %s is greater than the maximum number of indexes allowed %u. | ERRCODE_FDW_TOO_MANY_INDEXES | RC_TABLE_EXCEEDS_MAX_INDEXES | -| Cannot execute statement.Maximum number of DDLs per transaction reached the maximum %u. | ERRCODE_FDW_TOO_MANY_DDL_CHANGES_IN_TRANSACTION_NOT_ALLOWED | RC_TXN_EXCEEDS_MAX_DDLS | -| Unique constraint violationDuplicate key value violates unique constraint \"%s\"".Key %s already exists. | ERRCODE_UNIQUE_VIOLATION | RC_UNIQUE_VIOLATION | -| Table \"%s\" does not exist. | ERRCODE_UNDEFINED_TABLE | RC_TABLE_NOT_FOUND | -| Index \"%s\" does not exist. | ERRCODE_UNDEFINED_TABLE | RC_INDEX_NOT_FOUND | -| Unknown error has occurred. | ERRCODE_FDW_ERROR | RC_LOCAL_ROW_FOUND | -| Unknown error has occurred. | ERRCODE_FDW_ERROR | RC_LOCAL_ROW_NOT_FOUND | -| Unknown error has occurred. | ERRCODE_FDW_ERROR | RC_LOCAL_ROW_DELETED | -| Unknown error has occurred. | ERRCODE_FDW_ERROR | RC_INSERT_ON_EXIST | -| Unknown error has occurred. | ERRCODE_FDW_ERROR | RC_INDEX_RETRY_INSERT | -| Unknown error has occurred. | ERRCODE_FDW_ERROR | RC_INDEX_DELETE | -| Unknown error has occurred. | ERRCODE_FDW_ERROR | RC_LOCAL_ROW_NOT_VISIBLE | -| Memory is temporarily unavailable. | ERRCODE_OUT_OF_LOGICAL_MEMORY | RC_MEMORY_ALLOCATION_ERROR | -| Unknown error has occurred. | ERRCODE_FDW_ERROR | RC_ILLEGAL_ROW_STATE | -| Null constraint violated.NULL value cannot be inserted into non-null column %s at table %s. | ERRCODE_FDW_ERROR | RC_NULL_VIOLATION | -| Critical error.Critical error: %s. | ERRCODE_FDW_ERROR | RC_PANIC | -| A checkpoint is in progress - cannot truncate table. | ERRCODE_FDW_OPERATION_NOT_SUPPORTED | RC_NA | -| Unknown error has occurred. | ERRCODE_FDW_ERROR | RC_MAX_VALUE | -| <recovery message> | - | ERRCODE_CONFIG_FILE_ERROR | -| <recovery message> | - | ERRCODE_INVALID_TABLE_DEFINITION | -| Memory engine - Failed to perform commit prepared. | - | ERRCODE_INVALID_TRANSACTION_STATE | -| Invalid option <option name> | - | ERRCODE_FDW_INVALID_OPTION_NAME | -| Invalid memory allocation request size. | - | ERRCODE_INVALID_PARAMETER_VALUE | -| Memory is temporarily unavailable. | - | ERRCODE_OUT_OF_LOGICAL_MEMORY | -| Could not serialize access due to concurrent update. | - | ERRCODE_T_R_SERIALIZATION_FAILURE | -| Alter table operation is not supported for memory table.Cannot create MOT tables while incremental checkpoint is enabled.Re-index is not supported for memory tables. | - | ERRCODE_FDW_OPERATION_NOT_SUPPORTED | -| Allocation of table metadata failed. | - | ERRCODE_OUT_OF_MEMORY | -| Database with OID %u does not exist. | - | ERRCODE_UNDEFINED_DATABASE | -| Value exceeds maximum precision: %d. | - | ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE | -| You have reached a maximum logical capacity %lu of allowed %lu. | - | ERRCODE_OUT_OF_LOGICAL_MEMORY | diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/6-mot-sample-tpcc-benchmark.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/6-mot-sample-tpcc-benchmark.md deleted file mode 100644 index 6c0c4dd5..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/6-mot-sample-tpcc-benchmark.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: MOT Sample TPC-C Benchmark -summary: MOT Sample TPC-C Benchmark -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Sample TPC-C Benchmark - -## TPC-C Introduction - -The TPC-C Benchmark is an industry standard benchmark for measuring the performance of Online Transaction Processing (OLTP) systems. It is based on a complex database and a number of different transaction types that are executed on it. TPC-C is both a hardware-independent and a software-independent benchmark and can thus be run on every test platform. An official overview of the benchmark model can be found at the tpc.org website here - . - -The database consists of nine tables of various structures and thus also nine types of data records. The size and quantity of the data records varies per table. A mix of five concurrent transactions of varying types and complexities is executed on the database, which are largely online or in part queued for deferred batch processing. Because these tables compete for limited system resources, many system components are stressed and data changes are executed in a variety of ways. - -**Table 1** TPC-C Database Structure - -| Table | Number of Entries | -| :--------- | :--------------------------------------- | -| Warehouse | n | -| Item | 100,000 | -| Stock | n x 100,000 | -| District | n x 10 | -| Customer | 3,000 per district, 30,000 per warehouse | -| Order | Number of customers (initial value) | -| New order | 30% of the orders (initial value) | -| Order line | ~ 10 per order | -| History | Number of customers (initial value) | - -The transaction mix represents the complete business processing of an order - from its entry through to its delivery. More specifically, the provided mix is designed to produce an equal number of new-order transactions and payment transactions and to produce a single delivery transaction, a single order-status transaction and a single stock-level transaction for every ten new-order transactions. - -**Table 2** TPC-C Transactions Ratio - -| Transaction Level ≥ 4% | Share of All Transactions | -| :--------------------- | :------------------------ | -| TPC-C New order | ≤ 45% | -| Payment | ≥ 43% | -| Order status | ≥ 4% | -| Delivery | ≥ 4% (batch) | -| Stock level | ≥ 4% | - -There are two ways to execute the transactions - **as stored procedures** (which allow higher throughput) and in **standard interactive SQL mode**. - -**Performance Metric - tpm-C** - -The tpm-C metric is the number of new-order transactions executed per minute. Given the required mix and a wide range of complexity and types among the transactions, this metric most closely simulates a comprehensive business activity, not just one or two transactions or computer operations. For this reason, the tpm-C metric is considered to be a measure of business throughput. - -The tpm-C unit of measure is expressed as transactions-per-minute-C, whereas "C" stands for TPC-C specific benchmark. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The official TPC-C Benchmark specification can be accessed at - . Some of the rules of this specification are generally not fulfilled in the industry, because they are too strict for industry reality. For example, Scaling rules - (a) tpm-C / Warehouse must be >9 and <12.86 (implying that a very high warehouses rate is required in order to achieve a high tpm-C rate, which also means that an extremely large database and memory capacity are required); and (b) 10x terminals x Warehouses (implying a huge quantity of simulated clients). - -## System-Level Optimization - -Follow the instructions in the **MOT Server Optimization - x86** section. The following section describes the key system-level optimizations for deploying the MogDB database on a Huawei Taishan server and on a Euler 2.8 operating system for ultimate performance. - -## BenchmarkSQL - An Open-Source TPC-C Tool - -For example, to test TPCC, the **BenchmarkSQL** can be used, as follows - - -- Download **benchmarksql** from the following link - -- The schema creation scripts in the **benchmarksql** tool need to be adjusted to MOT syntax and unsupported DDLs need to be avoided. The adjusted scripts can be directly downloaded from the following link - . The contents of this tar file includes sql.common.mogdb.mot folder and jTPCCTData.java file as well as a sample configuration file postgresql.conf and a TPCC properties file props.mot for reference. -- Place the sql.common.mogdb.mot folder in the same level as sql.common under run folder and replace the file src/client/jTPCCTData.java with the downloaded java file. -- Edit the file runDatabaseBuild.sh under run folder to remove **extraHistID** from **AFTER_LOAD** list to avoid unsupported alter table DDL. -- Replace the JDBC driver under lib/postgres folder with the MogDB JDBC driver available from the following link - . - -The only change done in the downloaded java file (compared to the original one) was to comment the error log printing for serialization and duplicate key errors. These errors are normal in case of MOT, since it uses Optimistic Concurrency Control (OCC) mechanism. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The benchmark test is executed using a standard interactive SQL mode without stored procedures. - -## Running the Benchmark - -Anyone can run the benchmark by starting up the server and running the **benchmarksql** scripts. - -To run the benchmark - - -1. Go to the **benchmarksql** run folder and rename sql.common to sql.common.orig. -2. Create a link sql.common to sql.common.mogdb.mot in order to test MOT. -3. Start up the database server. -4. Configure the props.pg file in the client. -5. Run the benchmark. - -## Results Report - -- Results in CLI - - BenchmarkSQL results should appear as follows - - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-sample-tpcc-benchmark-1.jpg) - - Over time, the benchmark measures and averages the committed transactions. The example above benchmarks for two minutes. - - The score is **2.71M tmp-C** (new-orders per-minute), which is 45% of the total committed transactions, meaning the **tpmTOTAL**. - -- Detailed Result Report - - The following is an example of a detailed result report - - - **Figure 1** Detailed Result Report - - ![detailed-result-report](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-sample-tpcc-benchmark-2.png) - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-sample-tpcc-benchmark-3.png) - - BenchmarkSQL collects detailed performance statistics and operating system performance data (if configured). - - This information can show the latency of the queries, and thus expose bottlenecks related to storage/network/CPU. - -- Results of TPC-C of MOT on Huawei Taishan 2480 - - Our TPC-C benchmark dated 01-May-2020 with an MogDB database installed on Taishan 2480 server (a 4-socket ARM/Kunpeng server), achieved a throughput of 4.79M tpm-C. - - A near linear scalability was demonstrated, as shown below - - - **Figure 2** Results of TPC-C of MOT on Huawei Taishan 2480 - - ![results-of-tpc-c-of-mot-on-huawei-taishan-2480](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-sample-tpcc-benchmark-4.png) diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/using-mot.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/using-mot.md deleted file mode 100644 index b1347196..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/2-using-mot/using-mot.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Using MOT -summary: Using MOT -author: Guo Huan -date: 2023-05-22 ---- - -# Using MOT - -This chapter describes how to deploy, use and manage MogDB MOT. Using MOT tables is quite simple. The syntax of all MOT commands is the same as for MogDB disk‑based tables. Only the create and drop table statements in MOT differ from the statements for disk-based tables in MogDB. You may refer to this chapter in order to learn how to get started, how to convert a disk‑based table into an MOT table, how to use advanced MOT features, such as Native Compilation (JIT) for Queries and Stored Procedures, execution of Cross-engine Transactions, as well as MOT's limitations and coverage. MOT administration options are also described here. This chapter also describes how to perform a TPC-C benchmark. - -+ **[Using MOT Overview](1-using-mot-overview.md)** -+ **[MOT Preparation](2-mot-preparation.md)** -+ **[MOT Deployment](3-mot-deployment.md)** -+ **[MOT Usage](4-mot-usage.md)** -+ **[MOT Administration](5-mot-administration.md)** -+ **[MOT Sample TPC-C Benchmark](6-mot-sample-tpcc-benchmark.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-1.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-1.md deleted file mode 100644 index 3b0a39a4..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-1.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: MOT Scale-up Architecture -summary: MOT Scale-up Architecture -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Scale-up Architecture - -To **scale up** means to add additional cores to the *same machine* in order to add computing power. To scale up refers to the most common traditional form of adding computing power in a machine that has a single pair of controllers and multiple cores. Scale-up architecture is limited by the scalability limits of a machine’s controller. - -## Technical Requirements - -MOT has been designed to achieve the following - - -- **Linear Scale-up -** MOT delivers a transactional storage engine that utilizes all the cores of a single NUMA architecture server in order to provide near-linear scale-up performance. This means that MOT is targeted to achieve a direct, near-linear relationship between the quantity of cores in a machine and the multiples of performance increase. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The near-linear scale-up results achieved by MOT significantly outperform all other existing solutions, and come as close as possible to achieving optimal results, which are limited by the physical restrictions and limitations of hardware, such as wires. - -- **No Maximum Number of Cores Limitation -** MOT does not place any limits on the maximum quantity of cores. This means that MOT is scalable from a single core up to 1,000s of cores, with minimal degradation per additional core, even when crossing NUMA socket boundaries. - -- **Extremely High Transactional Throughout -** MOT delivers a transactional storage engine that can achieve extremely high transactional throughout compared with any other OLTP vendor on the market. - -- **Extremely Low Transactional Latency -** MOT delivers a transactional storage engine that can reach extremely low transactional latency compared with any other OLTP vendor on the market. - -- **Seamless Integration and Leveraging with/of MogDB -** MOT integrates its transactional engine in a standard and seamless manner with the MogDB product. In this way, MOT reuses maximum functionality from the MogDB layers that are situated on top of its transactional storage engine. - -## Design Principles - -To achieve the requirements described above (especially in an environment with many-cores), our storage engine's architecture implements the following techniques and strategies - - -- **Data and indexes only reside in memory**. -- **Data and indexes are not laid out with physical partitions** (because these might achieve lower performance for certain types of applications). -- Transaction concurrency control is based on **Optimistic Concurrency Control (OCC)** without any centralized contention points. See the **MOT Concurrency Control Mechanism** section for more information about OCC. -- **Parallel Redo Logs (ultimately per core)** are used to efficiently avoid a central locking point. -- **Indexes are lock-free**. See the **MOT Indexes** section for more information about lock-free indexes. -- **NUMA-awareness memory allocation** is used to avoid cross-socket access, especially for session lifecycle objects. See the **NUMA Awareness Allocation and Affinity** section for more information about NUMA-awareness. -- **A customized MOT memory management allocator** with pre-cached object pools is used to avoid expensive runtime allocation and extra points of contention. This dedicated MOT memory allocator makes memory allocation more efficient by pre-accessing relatively large chunks of memory from the operation system as needed and then divvying it out to the MOT as needed. - -## Integration using Foreign Data Wrappers (FDW) - -MOT complies with and leverages MogDB's standard extensibility mechanism - Foreign Data Wrapper (FDW), as shown in the following diagram. - -The PostgreSQL Foreign Data Wrapper (FDW) feature enables the creation of foreign tables in an MOT database that are proxies for some other data source, such as Oracle, MySQL, PostgreSQL and so on. When a query is made on a foreign table, the FDW queries the external data source and returns the results, as if they were coming from a table in your database. - -MogDB relies on the PostgreSQL Foreign Data Wrappers (FDW) and Index support so that SQL is entirely covered, including stored procedures, user defined functions, system functions calls. - -**Figure 1** MOT Architecture - -![mot-architecture](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-scale-up-architecture-2.png) - -In the diagram above, the MOT engine is represented in green, while the existing MogDB (based on Postgres) components are represented in the top part of this diagram in blue. As you can see, the Foreign Data Wrapper (FDW) mediates between the MOT engine and the MogDB components. - -**MOT-Related FDW Customizations** - -Integrating MOT through FDW enables the reuse of the most upper layer MogDB functionality and therefore significantly shortened MOT's time-to-market without compromising SQL coverage. - -However, the original FDW mechanism in MogDB was not designed for storage engine extensions, and therefore lacks the following essential functionalities - - -- Index awareness of foreign tables to be calculated in the query planning phase -- Complete DDL interfaces -- Complete transaction lifecycle interfaces -- Checkpoint interfaces -- Redo Log interface -- Recovery interfaces -- Vacuum interfaces - -In order to support all the missing functionalities, the SQL layer and FDW interface layer were extended to provide the necessary infrastructure in order to enable the plugging in of the MOT transactional storage engine. - -## Result - Linear Scale-up - -The following shows the results achieved by the MOT design principles and implementation described above. - -To the best of our knowledge, MOT outperforms all existing industry-grade OLTP databases in transactional throughput of ACID-compliant workloads. - -MogDB and MOT have been tested on the following many-core systems with excellent performance scalability results. The tests were performed both on x86 Intel-based and ARM/Kunpeng-based many-core servers. You may refer to the **MOT Performance Benchmarks** section for more detailed performance review. - -Our TPC-C benchmark dated June 2020 tested an MogDB MOT database on a Taishan 2480 server. A 4-socket ARM/Kunpeng server, achieved throughput of 4.8 M tpmC. The following graph shows the near-linear nature of the results, meaning that it shows a significant increase in performance correlating to the increase of the quantity of cores - - -**Figure 2** TPC-C on ARM (256 Cores) - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-performance-benchmarks-12.png) - -The following is an additional example that shows a test on an x86-based server also showing CPU utilization. - -**Figure 3** tpmC vs CPU Usage - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-performance-benchmarks-18.png) - -The chart shows that MOT demonstrates a significant performance increase correlation with an increase of the quantity of cores. MOT consumes more and more of the CPU correlating to the increase of the quantity of cores. Other industry solutions do not increase and sometimes show slightly degraded performance, which is a well-known problem in the database industry that affects customers’ CAPEX and OPEX expenses and operational efficiency. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-2.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-2.md deleted file mode 100644 index aab31c2a..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-2.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: MOT Concurrency Control Mechanism -summary: MOT Concurrency Control Mechanism -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Concurrency Control Mechanism - -After investing extensive research to find the best concurrency control mechanism, we concluded that SILO based on OCC is the best ACID-compliant OCC algorithm for MOT. SILO provides the best foundation for MOT's challenging requirements. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** MOT is fully Atomicity, Consistency, Isolation, Durability (ACID)-compliant, as described in the **MOT Introduction** section. - -The following topics describe MOT's concurrency control mechanism - - -## MOT Local and Global Memory - -SILO manages both a local memory and a global memory, as shown in Figure 1. - -- **Global** memory is long-term shared memory is shared by all cores and is used primarily to store all the table data and indexes -- **Local** memory is short-term memory that is used primarily by sessions for handling transactions and store data changes in a primate to transaction memory until the commit phase. - -When a transaction change is required, SILO handles the copying of all that transaction's data from the global memory into the local memory. Minimal locks are placed on the global memory according to the OCC approach, so that the contention time in the global shared memory is extremely minimal. After the transaction’ change has been completed, this data is pushed back from the local memory to the global memory. - -The basic interactive transactional flow with our SILO-enhanced concurrency control is shown in the figure below - - -**Figure 1** Private (Local) Memory (for each transaction) and a Global Memory (for all the transactions of all the cores) - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-concurrency-control-mechanism-2.png) - -For more details, refer to the Industrial-Strength OLTP Using Main Memory and Many-cores document [**Comparison - Disk vs. MOT**]. - -## MOT SILO Enhancements - -SILO in its basic algorithm flow outperformed many other ACID-compliant OCCs that we tested in our research experiments. However, in order to make it a product-grade mechanism, we had to enhance it with many essential functionalities that were missing in the original design, such as - - -- Added support for interactive mode transactions, where transactions are running SQL by SQL from the client side and not as a single step on the server side -- Added optimistic inserts -- Added support for non-unique indexes -- Added support for read-after-write in transactions so that users can see their own changes before they are committed -- Added support for lockless cooperative garbage collection -- Added support for lockless checkpoints -- Added support for fast recovery -- Added support for two-phase commit in a distributed deployment - -Adding these enhancements without breaking the scalable characteristic of the original SILO was very challenging. - -## MOT Isolation Levels - -Even though MOT is fully ACID-compliant (as described in the section), not all isolation levels are supported in MogDB 2.1. The following table describes all isolation levels, as well as what is and what is not supported by MOT. - -**Table 1** Isolation Levels - -| Isolation Level | Description | -| :--------------- | :----------------------------------------------------------- | -| READ UNCOMMITTED | **Not supported by MOT.** | -| READ COMMITTED | **Supported by MOT.**
The READ COMMITTED isolation level that guarantees that any data that is read was already committed when it was read. It simply restricts the reader from seeing any intermediate, uncommitted or dirty reads. Data is free to be changed after it has been read so that READ COMMITTED does not guarantee that if the transaction re-issues the read, that the same data will be found. | -| SNAPSHOT | **Not supported by MOT.**
The SNAPSHOT isolation level makes the same guarantees as SERIALIZABLE, except that concurrent transactions can modify the data. Instead, it forces every reader to see its own version of the world (its own snapshot). This makes it very easy to program, plus it is very scalable, because it does not block concurrent updates. However, in many implementations this isolation level requires higher server resources. | -| REPEATABLE READ | **Supported by MOT.**
REPEATABLE READ is a higher isolation level that (in addition to the guarantees of the READ COMMITTED isolation level) guarantees that any data that is read cannot change. If a transaction reads the same data again, it will find the same previously read data in place, unchanged and available to be read.
Because of the optimistic model, concurrent transactions are not prevented from updating rows read by this transaction. Instead, at commit time this transaction validates that the REPEATABLE READ isolation level has not been violated. If it has, this transaction is rolled back and must be retried. | -| SERIALIZABLE | **Not supported by MOT**.
Serializable isolation makes an even stronger guarantee. In addition to everything that the REPEATABLE READ isolation level guarantees, it also guarantees that no new data can be seen by a subsequent read.
It is named SERIALIZABLE because the isolation is so strict that it is almost a bit like having the transactions run in series rather than concurrently. | - -The following table shows the concurrency side effects enabled by the different isolation levels. - -**Table 2** Concurrency Side Effects Enabled by Isolation Levels - -| Isolation Level | Description | Non-repeatable Read | Phantom | -| :--------------- | :---------- | :------------------ | :------ | -| READ UNCOMMITTED | Yes | Yes | Yes | -| READ COMMITTED | No | Yes | Yes | -| REPEATABLE READ | No | No | Yes | -| SNAPSHOT | No | No | No | -| SERIALIZABLE | No | No | No | - -In the near future release, MogDB MOT will also support both SNAPSHOT and SERIALIZABLE isolation levels. - -## MOT Optimistic Concurrency Control - -The Concurrency Control Module (CC Module for short) provides all the transactional requirements for the Main Memory Engine. The primary objective of the CC Module is to provide the Main Memory Engine with support for various isolation levels. - -### Optimistic OCC vs. Pessimistic 2PL - -The functional differences of Pessimistic 2PL (2-Phase Locking) vs. Optimistic Concurrency Control (OCC) involve pessimistic versus optimistic approaches to transaction integrity. - -Disk-based tables use a pessimistic approach, which is the most commonly used database method. The MOT Engine use an optimistic approach. - -The primary functional difference between the pessimistic approach and the optimistic approach is that if a conflict occurs - - -- The pessimistic approach causes the client to wait. -- The optimistic approach causes one of the transactions to fail, so that the failed transaction must be retried by the client. - -**Optimistic Concurrency Control Approach (Used by MOT)** - -The **Optimistic Concurrency Control (OCC)** approach detects conflicts as they occur, and performs validation checks at commit time. - -The optimistic approach has less overhead and is usually more efficient, partly because transaction conflicts are uncommon in most applications. - -The functional differences between optimistic and pessimistic approaches is larger when the REPEATABLE READ isolation level is enforced and is largest for the SERIALIZABLE isolation level. - -**Pessimistic Approaches (Not used by MOT)** - -The **Pessimistic Concurrency Control** (2PL or 2-Phase Locking) approach uses locks to block potential conflicts before they occur. A lock is applied when a statement is executed and released when the transaction is committed. Disk-based row-stores use this approach (with the addition of Multi-version Concurrency Control [MVCC]). - -In 2PL algorithms, while a transaction is writing a row, no other transaction can access it; and while a row is being read, no other transaction can overwrite it. Each row is locked at access time for both reading and writing; and the lock is released at commit time. These algorithms require a scheme for handling and avoiding deadlock. Deadlock can be detected by calculating cycles in a wait-for graph. Deadlock can be avoided by keeping time ordering using TSO or by some kind of back-off scheme. - -**Encounter Time Locking (ETL)** - -Another approach is Encounter Time Locking (ETL), where reads are handled in an optimistic manner, but writes lock the data that they access. As a result, writes from different ETL transactions are aware of each other and can decide to abort. It has been empirically verified that ETL improves the performance of OCC in two ways - - -- First, ETL detects conflicts early on and often increases transaction throughput. This is because transactions do not perform useless operations, because conflicts discovered at commit time (in general) cannot be solved without aborting at least one transaction. -- Second, encounter-time locking Reads-After-Writes (RAW) are handled efficiently without requiring expensive or complex mechanisms. - -**Conclusion** - -OCC is the fastest option for most workloads. This finding has also been observed in our preliminary research phase. - -One of the reasons is that when every core executes multiple threads, a lock is likely to be held by a swapped thread, especially in interactive mode. Another reason is that pessimistic algorithms involve deadlock detection (which introduces overhead) and usually uses read-write locks (which are less efficient than standard spin-locks). - -We have chosen Silo because it was simpler than other existing options, such as TicToc, while maintaining the same performance for most workloads. ETL is sometimes faster than OCC, but it introduces spurious aborts which may confuse a user, in contrast to OCC which aborts only at commit. - -### OCC vs 2PL Differences by Example - -The following shows the differences between two user experiences - Pessimistic (for disk-based tables) and Optimistic (MOT tables) when sessions update the same table simultaneously. - -In this example, the following table test command is run - - -``` -table "TEST" - create table test (x int, y int, z int, primary key(x)); -``` - -This example describes two aspects of the same test - user experience (operations in the example) and retry requirements. - -**Example Pessimistic Approach - Used in Disk-based Tables** - -The following is an example of the Pessimistic approach (which is not Mot). Any Isolation Level may apply. - -The following two sessions perform a transaction that attempts to update a single table. - -A WAIT LOCK action occurs and the client experience is that session #2 is *stuck* until Session #1 has completed a COMMIT. Only afterwards, is Session #2 able to progress. - -However, when this approach is used, both sessions succeed and no abort occurs (unless SERIALIZABLE or REPEATABLE-READ isolation level is applied), which results in the entire transaction needing to be retried. - -**Table 1** Pessimistic Approach Code Example - -| | Session 1 | Session 2 | -| :--- | :------------------------------- | :----------------------------------------------------------- | -| t0 | Begin | Begin | -| t1 | update test set y=200 where x=1; | | -| t2 | y=200 | Update test set y=300 where x=1; - Wait on lock | -| t4 | Commit | | -| | | Unlock | -| | | Commit(in READ-COMMITTED this will succeed, in SERIALIZABLE it will fail) | -| | | y = 300 | - -**Example Optimistic Approach - Used in MOT** - -The following is an example of the Optimistic approach. - -It describes the situation of creating an MOT table and then having two concurrent sessions updating that same MOT table simultaneously - - -``` -create foreign table test (x int, y int, z int, primary key(x)); -``` - -- The advantage of OCC is that there are no locks until COMMIT. -- The disadvantage of using OCC is that the update may fail if another session updates the same record. If the update fails (in all supported isolation levels), an entire SESSION #2 transaction must be retried. -- Update conflicts are detected by the kernel at commit time by using a version checking mechanism. -- SESSION #2 will not wait in its update operation and will be aborted because of conflict detection at commit phase. - -**Table 2** Optimistic Approach Code Example - Used in MOT - -| | Session 1 | Session 2 | -| :--- | :------------------------------- | :------------------------------- | -| t0 | Begin | Begin | -| t1 | update test set y=200 where x=1; | | -| t2 | y=200 | Update test set y=300 where x=1; | -| t4 | Commit | y = 300 | -| | | Commit | -| | | ABORT | -| | | y = 200 | diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-3.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-3.md deleted file mode 100644 index 3af83103..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-3.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Extended FDW and Other MogDB Features -summary: Extended FDW and Other MogDB Features -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Extended FDW and Other MogDB Features - -MogDB is based on PostgreSQL, which does not have a built-in storage engine adapter, such as MySQL handlerton. To enable the integration of the MOT storage engine into MogDB, we have leveraged and extended the existing Foreign Data Wrapper (FDW) mechanism. With the introduction of FDW into PostgreSQL 9.1, externally managed databases can now be accessed in a way that presents these foreign tables and data sources as united, locally accessible relations. - -In contrast, the MOT storage engine is embedded inside MogDB and its tables are managed by it. Access to tables is controlled by the MogDB planner and executor. MOT gets logging and checkpointing services from MogDB and participates in the MogDB recovery process in addition to other processes. - -We refer to all the components that are in use or are accessing the MOT storage engine as the *Envelope*. - -The following figure shows how the MOT storage engine is embedded inside MogDB and its bi-directional access to database functionality. - -**Figure 1** MOT Storage Engine Embedded inside MogDB - FDW Access to External Databases - -![mot-architecture](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-scale-up-architecture-2.png) - -We have extended the capabilities of FDW by extending and modifying the FdwRoutine structure in order to introduce features and calls that were not required before the introduction of MOT. For example, support for The following new features was added - Add Index, Drop Index/Table, Truncate, Vacuum and Table/Index Memory Statistics. A significant emphasis was put on integration with MogDB logging, replication and checkpointing mechanisms in order to provide consistency for cross-table transactions through failures. In this case, the MOT itself sometimes initiates calls to MogDB functionality through the FDW layer. - -## Creating Tables and Indexes - -In order to support the creation of MOT tables, standard FDW syntax was reused. - -For example, create FOREIGN table. - -The MOT FDW mechanism passes the instruction to the MOT storage engine for actual table creation. Similarly, we support index creation (create index …). This feature was not previously available in FDW, because it was not needed since its tables are managed externally. - -To support both in MOT FDW, the **ValidateTableDef** function actually creates the specified table. It also handles the index creation of that relation, as well as DROP TABLE and DROP INDEX, in addition to VACUUM and ALTER TABLE, which were not previously supported in FDW. - -## Index Usage for Planning and Execution - -A query has two phases - **Planning** and **Execution**. During the Planning phase (which may take place once per multiple executions), the best index for the scan is chosen. This choice is made based on the matching query's WHERE clauses, JOIN clauses and ORDER BY conditions. During execution, a query iterates over the relevant table rows and performs various tasks, such as update or delete, per iteration. An insert is a special case where the table adds the row to all indexes and no scanning is required. - -- **Planner -** In standard FDW, a query is passed for execution to a foreign data source. This means that index filtering and the actual planning (such as the choice of indexes) is not performed locally in the database, rather it is performed in the external data source. Internally, the FDW returns a general plan to the database planner. MOT tables are handled in a similar manner as disk tables. This means that relevant MOT indexes are filtered and matched, and the indexes that minimize the set of traversed rows are selected and are added to the plan. -- **Executor -** The Query Executor uses the chosen MOT index in order to iterate over the relevant rows of the table. Each row is inspected by the MogDB envelope, and according to the query conditions, an update or delete is called to handle the relevant row. - -## Durability, Replication and High Availability - -A storage engine is responsible for storing, reading, updating and deleting data in the underlying memory and storage systems. The logging, checkpointing and recovery are not handled by the storage engine, especially because some transactions encompass multiple tables with different storage engines. Therefore, in order to persist and replicate data, the high-availability facilities from the MogDB envelope are used as follows - - -- **Durability -** In order to ensure Durability, the MOT engine persists data by Write-Ahead Logging (WAL) records using the MogDB's XLOG interface. This also provides the benefits of MogDB's replication capabilities that use the same APIs. You may refer to the **MOT Durability Concepts** for more information. -- **Checkpointing -** A MOT Checkpoint is enabled by registering a callback to the MogDB Checkpointer. Whenever a general database Checkpoint is performed, the MOT Checkpoint process is called as well. MOT keeps the Checkpoint's Log Sequence Number (LSN) in order to be aligned with MogDB recovery. The MOT Checkpointing algorithm is highly optimized and asynchronous and does not stop concurrent transactions. You may refer to the **MOT Checkpoint Concepts** for more information. -- **Recovery -** Upon startup, MogDB first calls an MOT callback that recovers the MOT Checkpoint by loading into memory rows and creating indexes, followed by the execution of the WAL recovery by replaying records according to the Checkpoint's LSN. The MOT Checkpoint is recovered in parallel using multiple threads - each thread reads a different data segment. This makes MOT Checkpoint recovery quite fast on many-core hardware, though it is still potentially slower compared to disk-based tables where only WAL records are replayed. You may refer to the **MOT Recovery Concepts** for more information. - -## VACUUM and DROP - -In order to maximize MOT functionality, we added support for VACUUM, DROP TABLE and DROP INDEX. All three execute with an exclusive table lock, meaning without allowing concurrent transactions on the table. The system VACUUM calls a new FDW function to perform the MOT vacuuming, while DROP was added to the ValidateTableDef() function. - -## Deleting Memory Pools - -Each index and table tracks all the memory pools that it uses. A DROP INDEX command is used to remove metadata. Memory pools are deleted as a single consecutive block. The MOT VACUUM only compacts used memory, because memory reclamation is performed continuously in the background by the epoch-based Garbage Collector (GC). In order to perform the compaction, we switch the index or the table to new memory pools, traverse all the live data, delete each row and insert it using the new pools and finally delete the pools as is done for a drop. - -## Query Native Compilation (JIT) - -The FDW adapter to MOT engine also contains a lite execution path that employs Just-In-Time (JIT) compiled query execution using the LLVM compiler. More information about MOT Query Native Compilation can be found in the **Query Native Compilation (JIT)** section. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-4.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-4.md deleted file mode 100644 index 0194a6b5..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-4.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: NUMA Awareness Allocation and Affinity -summary: NUMA Awareness Allocation and Affinity -author: Zhang Cuiping -date: 2021-03-04 ---- - -# NUMA Awareness Allocation and Affinity - -Non-Uniform Memory Access (NUMA) is a computer memory design used in multiprocessing, where the memory access time depends on the memory location relative to the processor. Under NUMA, a processor can take advantage of NUMA by preferring to access its own local memory (which is faster), rather than accessing non-local memory (meaning that it will prefer **not** to access the local memory of another processor or memory shared between processors). - -MOT memory access has been designed with NUMA awareness. This means that MOT is aware that memory is not uniform and achieves best performance by accessing the quickest and most local memory. - -The benefits of NUMA are limited to certain types of workloads, particularly on servers where the data is often strongly associated with certain tasks or users. - -In-memory database systems running on NUMA platforms face several issues, such as the increased latency and the decreased bandwidth when accessing remote main memory. To cope with these NUMA-related issues, NUMA awareness must be considered as a major design principle for the fundamental architecture of a database system. - -To facilitate quick operation and make efficient use of NUMA nodes, MOT allocates a designated memory pool for rows per table and for nodes per index. Each memory pool is composed from 2 MB chunks. A designated API allocates these chunks from a local NUMA node, from pages coming from all nodes or in a round-robin fashion, where each chunk is allocated on the next node. By default, pools of shared data are allocated in a round robin fashion in order to balance access, while not splitting rows between different NUMA nodes. However, thread private memory is allocated from a local node. It must also be verified that a thread always operates in the same NUMA node. - -**Summary** - -MOT has a smart memory control module that has preallocated memory pools intended for various types of memory objects. This smart memory control improves performance, reduces locks and ensures stability. The allocation of the memory objects of a transaction is always NUMA-local, ensuring optimal performance for CPU memory access and resulting in low latency and reduced contention. Deallocated objects go back to the memory pool. Minimized use of OS malloc functions during transactions circumvents unnecessary locks. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-5.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-5.md deleted file mode 100644 index 016a23dd..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-5.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: MOT Indexes -summary: MOT Indexes -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Indexes - -MOT Index is a lock-free index based on state-of-the-art Masstree, which is a fast and scalable Key Value (KV) store for multicore systems, implemented as tries of B+ trees. It achieves excellent performance on many-core servers and high concurrent workloads. It uses various advanced techniques, such as an optimistic lock approach, cache-awareness and memory prefetching. - -After comparing various state-of-the-art solutions, we chose Masstree for the index because it demonstrated the best overall performance for point queries, iterations and modifications. Masstree is a combination of tries and a B+ tree that is implemented to carefully exploit caching, prefetching, optimistic navigation and fine-grained locking. It is optimized for high contention and adds various optimizations to its predecessors, such as OLFIT. However, the downside of a Masstree index is its higher memory consumption. While row data consumes the same memory size, the memory per row per each index (primary or secondary) is higher on average by 16 bytes - 29 bytes in the lock-based B-Tree used in disk-based tables vs. 45 bytes in MOT's Masstree. - -Our empirical experiments showed that the combination of the mature lock-free Masstree implementation and our robust improvements to Silo have provided exactly what we needed in that regard. - -Another challenge was making an optimistic insertion into a table with multiple indexes. - -The Masstree index is at the core of MOT memory layout for data and index management. Our team enhanced and significantly improved Masstree and submitted some of the key contributions to the Masstree open source. These improvements include - - -- Dedicated memory pools per index - Efficient allocation and fast index drop -- Global GC for Masstree - Fast, on-demand memory reclamation -- Masstree iterator implementation with access to an insertion key -- ARM architecture support - -We contributed our Masstree index improvements to the Masstree open-source implementation, which can be found here - . - -MOT's main innovation was to enhance the original Masstree data structure and algorithm, which did not support Non-Unique Indexes (as a Secondary index). You may refer to the **Non-unique Indexes** section for the design details. - -MOT supports both Primary, Secondary and Keyless indexes (subject to the limitations specified in the **Unsupported Index DDLs and Index**section). - -## Non-unique Indexes - -A non-unique index may contain multiple rows with the same key. Non-unique indexes are used solely to improve query performance by maintaining a sorted order of data values that are used frequently. For example, a database may use a non-unique index to group all people from the same family. However, the Masstree data structure implementation does not allow the mapping of multiple objects to the same key. Our solution for enabling the creation of non-unique indexes (as shown in the figure below) is to add a symmetry-breaking suffix to the key, which maps the row. This added suffix is the pointer to the row itself, which has a constant size of 8 bytes and a value that is unique to the row. When inserting into a non-unique index, the insertion of the sentinel always succeeds, which enables the row allocated by the executing transaction to be used. This approach also enable MOT to have a fast, reliable, order-based iterator for a non-unique index. - -**Figure 1** Non-unique Indexes - -![non-unique-indexes](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-indexes-2.png) - -The structure of an MOT table T that has three rows and two indexes is depicted in the figure above. The rectangles represent data rows, and the indexes point to sentinels (the elliptic shapes) which point to the rows. The sentinels are inserted into unique indexes with a key and into non-unique indexes with a key + a suffix. The sentinels facilitate maintenance operations so that the rows can be replaced without touching the index data structure. In addition, there are various flags and a reference count embedded in the sentinel in order to facilitate optimistic inserts. - -When searching a non-unique secondary index, the required key (for example, the family name) is used. The fully concatenated key is only used for insert and delete operations. Insert and delete operations always get a row as a parameter, thereby making it possible to create the entire key and to use it in the execution of the deletion or the insertion of the specific row for the index. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-6.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-6.md deleted file mode 100644 index 11005768..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-6.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -title: MOT Durability Concepts -summary: MOT Durability Concepts -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Durability Concepts - -Durability refers to long-term data protection (also known as *disk persistence*). Durability means that stored data does not suffer from any kind of degradation or corruption, so that data is never lost or compromised. Durability ensures that data and the MOT engine are restored to a consistent state after a planned shutdown (for example, for maintenance) or an unplanned crash (for example, a power failure). - -Memory storage is volatile, meaning that it requires power to maintain the stored information. Disk storage, on the other hand, is non-volatile, meaning that it does not require power to maintain stored information, thus, it can survive a power shutdown. MOT uses both types of storage - it has all data in memory, while persisting transactional changes to disk **MOT Durability** and by maintaining frequent periodic **MOT Checkpoints** in order to ensure data recovery in case of shutdown. - -The user must ensure sufficient disk space for the logging and Checkpointing operations. A separated drive can be used for the Checkpoint to improve performance by reducing disk I/O load. - -You may refer to **MOT Key Technologies** section__for an overview of how durability is implemented in the MOT engine. - -MOTs WAL Redo Log and checkpoints enabled durability, as described below - - -- **MOT Logging - WAL Redo Log Concepts** -- **MOT Checkpoint Concepts** - -## MOT Logging - WAL Redo Log Concepts - -### Overview - -Write-Ahead Logging (WAL) is a standard method for ensuring data durability. The main concept of WAL is that changes to data files (where tables and indexes reside) are only written after those changes have been logged, meaning only after the log records that describe the changes have been flushed to permanent storage. - -The MOT is fully integrated with the MogDB envelope logging facilities. In addition to durability, another benefit of this method is the ability to use the WAL for replication purposes. - -Three logging methods are supported, two standard Synchronous and Asynchronous, which are also supported by the standard MogDB disk-engine. In addition, in the MOT a Group-Commit option is provided with special NUMA-Awareness optimization. The Group-Commit provides the top performance while maintaining ACID properties. - -To ensure Durability, MOT is fully integrated with the MogDB's Write-Ahead Logging (WAL) mechanism, so that MOT persists data in WAL records using MogDB's XLOG interface. This means that every addition, update, and deletion to an MOT table's record is recorded as an entry in the WAL. This ensures that the most current data state can be regenerated and recovered from this non-volatile log. For example, if three new rows were added to a table, two were deleted and one was updated, then six entries would be recorded in the log. - -- MOT log records are written to the same WAL as the other records of MogDB disk-based tables. - -- MOT only logs an operation at the transaction commit phase. - -- MOT only logs the updated delta record in order to minimize the amount of data written to disk. - -- During recovery, data is loaded from the last known or a specific Checkpoint; and then the WAL Redo log is used to complete the data changes that occur from that point forward. - -- The WAL (Redo Log) retains all the table row modifications until a Checkpoint is performed (as described above). The log can then be truncated in order to reduce recovery time and to save disk space. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** In order to ensure that the log IO device does not become a bottleneck, the log file must be placed on a drive that has low latency. - -### Logging Types - -Two synchronous transaction logging options and one asynchronous transaction logging option are supported (these are also supported by the standard MogDB disk engine). MOT also supports synchronous Group Commit logging with NUMA-awareness optimization, as described below. - -According to your configuration, one of the following types of logging is implemented: - -- **Synchronous Redo Logging** - - The **Synchronous Redo Logging** option is the simplest and most strict redo logger. When a transaction is committed by a client application, the transaction redo entries are recorded in the WAL (Redo Log), as follows - - - 1. While a transaction is in progress, it is stored in the MOT’s memory. - 2. After a transaction finishes and the client application sends a **Commit** command, the transaction is locked and then written to the WAL Redo Log on the disk. This means that while the transaction log entries are being written to the log, the client application is still waiting for a response. - 3. As soon as the transaction's entire buffer is written to the log, the changes to the data in memory take place and then the transaction is committed. After the transaction has been committed, the client application is notified that the transaction is complete. - -- **Technical Description** - - When a transaction ends, the SynchronousRedoLogHandler serializes its transaction buffer and write it to the XLOG iLogger implementation. - - **Figure 1** Synchronous Logging - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-durability-concepts-6.png) - - **Summary** - - The **Synchronous Redo Logging** option is the safest and most strict because it ensures total synchronization of the client application and the WAL Redo log entries for each transaction as it is committed; thus ensuring total durability and consistency with absolutely no data loss. This logging option prevents the situation where a client application might mark a transaction as successful, when it has not yet been persisted to disk. - - The downside of the **Synchronous Redo Logging** option is that it is the slowest logging mechanism of the three options. This is because a client application must wait until all data is written to disk and because of the frequent disk writes (which typically slow down the database). - -- **Group Synchronous Redo Logging** - - The **Group Synchronous Redo Logging** option is very similar to the **Synchronous Redo Logging** option, because it also ensures total durability with absolutely no data loss and total synchronization of the client application and the WAL (Redo Log) entries. The difference is that the **Group Synchronous Redo Logging** option writes _groups of transaction_redo entries to the WAL Redo Log on the disk at the same time, instead of writing each and every transaction as it is committed. Using Group Synchronous Redo Logging reduces the amount of disk I/Os and thus improves performance, especially when running a heavy workload. - - The MOT engine performs synchronous Group Commit logging with Non-Uniform Memory Access (NUMA)-awareness optimization by automatically grouping transactions according to the NUMA socket of the core on which the transaction is running. - - You may refer to the **NUMA Awareness Allocation and Affinity** section for more information about NUMA-aware memory access. - - When a transaction commits, a group of entries are recorded in the WAL Redo Log, as follows - - - 1. While a transaction is in progress, it is stored in the memory. The MOT engine groups transactions in buckets according to the NUMA socket of the core on which the transaction is running. This means that all the transactions running on the same socket are grouped together and that multiple groups will be filling in parallel according to the core on which the transaction is running. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > Each thread runs on a single core/CPU which belongs to a single socket and each thread only writes to the socket of the core on which it is running. - - 2. After a transaction finishes and the client application sends a Commit command, the transaction redo log entries are serialized together with other transactions that belong to the same group. - - 3. After the configured criteria are fulfilled for a specific group of transactions (quantity of committed transactions or timeout period as describes in the **REDO LOG (MOT)** section), the transactions in this group are written to the WAL on the disk. This means that while these log entries are being written to the log, the client applications that issued the commit are waiting for a response. - - 4. As soon as all the transaction buffers in the NUMA-aware group have been written to the log, all the transactions in the group are performing the necessary changes to the memory store and the clients are notified that these transactions are complete. - - Writing transactions to the WAL is more efficient in this manner because all the buffers from the same socket are written to disk together. - - **Technical Description** - - The four colors represent 4 NUMA nodes. Thus each NUMA node has its own memory log enabling a group commit of multiple connections. - - **Figure 2** Group Commit - with NUMA-awareness - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-durability-concepts-7.png) - - **Summary** - - The **Group Synchronous Redo Logging** option is a an extremely safe and strict logging option because it ensures total synchronization of the client application and the WAL Redo log entries; thus ensuring total durability and consistency with absolutely no data loss. This logging option prevents the situation where a client application might mark a transaction as successful, when it has not yet been persisted to disk. - - On one hand this option has fewer disk writes than the **Synchronous Redo Logging** option, which may mean that it is faster. The downside is that transactions are locked for longer, meaning that they are locked until after all the transactions in the same NUMA memory have been written to the WAL Redo Log on the disk. - - The benefits of using this option depend on the type of transactional workload. For example, this option benefits systems that have many transactions (and less so for systems that have few transactions, because there are few disk writes anyway). - -- **Asynchronous Redo Logging** - - The **Asynchronous Redo Logging** option is the fastest logging method, However, it does not ensure no data loss, meaning that some data that is still in the buffer and was not yet written to disk may get lost upon a power failure or database crash. When a transaction is committed by a client application, the transaction redo entries are recorded in internal buffers and written to disk at preconfigured intervals. The client application does not wait for the data being written to disk. It continues to the next transaction. This is what makes asynchronous redo logging the fastest logging method. - - When a transaction is committed by a client application, the transaction redo entries are recorded in the WAL Redo Log, as follows - - - 1. While a transaction is in progress, it is stored in the MOT's memory. - 2. After a transaction finishes and the client application sends a Commit command, the transaction redo entries are written to internal buffers, but are not yet written to disk. Then changes to the MOT data memory take place and the client application is notified that the transaction is committed. - 3. At a preconfigured interval, a redo log thread running in the background collects all the buffered redo log entries and writes them to disk. - - **Technical Description** - - Upon transaction commit, the transaction buffer is moved (pointer assignment - not a data copy) to a centralized buffer and a new transaction buffer is allocated for the transaction. The transaction is released as soon as its buffer is moved to the centralized buffer and the transaction thread is not blocked. The actual write to the log uses the Postgres walwriter thread. When the walwriter timer elapses, it first calls the AsynchronousRedoLogHandler (via registered callback) to write its buffers and then continues with its logic and flushes the data to the XLOG. - - **Figure 3** Asynchronous Logging - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-durability-concepts-8.png) - - **Summary** - - The Asynchronous Redo Logging option is the fastest logging option because it does not require the client application to wait for data being written to disk. In addition, it groups many transactions redo entries and writes them together, thus reducing the amount of disk I/Os that slow down the MOT engine. - - The downside of the Asynchronous Redo Logging option is that it does not ensure that data will not get lost upon a crash or failure. Data that was committed, but was not yet written to disk, is not durable on commit and thus cannot be recovered in case of a failure. The Asynchronous Redo Logging option is most relevant for applications that are willing to sacrifice data recovery (consistency) over performance. - - Logging Design Details - - The following describes the design details of each persistence-related component in the In-Memory Engine Module. - - **Figure 4** Three Logging Options - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-durability-concepts-9.png) - - The RedoLog component is used by both by backend threads that use the In-Memory Engine and by the WAL writer in order to persist their data. Checkpoints are performed using the Checkpoint Manager, which is triggered by the Postgres checkpointer. - -- **Logging Design Overview** - - Write-Ahead Logging (WAL) is a standard method for ensuring data durability. WAL's central concept is that changes to data files (where tables and indexes reside) are only written after those changes have been logged, meaning after the log records that describe these changes have been flushed to permanent storage. - - The MOT Engine uses the existing MogDB logging facilities, enabling it also to participate in the replication process. - -- **Per-transaction Logging** - - In the In-Memory Engine, the transaction log records are stored in a transaction buffer which is part of the transaction object (TXN). The transaction buffer is logged during the calls to addToLog() - if the buffer exceeds a threshold it is then flushed and reused. When a transaction commits and passes the validation phase (OCC SILO**[Comparison - Disk vs. MOT] validation)** or aborts for some reason, the appropriate message is saved in the log as well in order to make it possible to determine the transaction's state during a recovery. - - **Figure 5** Per-transaction Logging - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/mot-durability-concepts-10.png) - - Parallel Logging is performed both by MOT and disk engines. However, the MOT engine enhances this design with a log-buffer per transaction, lockless preparation and a single log record. - -- **Exception Handling** - - The persistence module handles exceptions by using the Postgres error reporting infrastructure (ereport). An error message is recorded in the system log for each error condition. In addition, the error is reported to the envelope using Postgres’s built-in error reporting infrastructure. - - The following exceptions are reported by this module - - - **Table 1** Exception Handling - - | Exception Condition | Exception Code | Scenario | Resulting Outcome | - | :----------------------------------- | :----------------------------- | :----------------------------------------------------------- | :--------------------- | - | WAL write failure | ERRCODE_FDW_ERROR | On any case the WAL write fails | Transaction terminates | - | File IO error: write, open and so on | ERRCODE_IO_ERROR | Checkpoint - Called on any file access error | FATAL - process exists | - | Out of Memory | ERRCODE_INSUFFICIENT_RESOURCES | Checkpoint - Local memory allocation failures | FATAL - process exists | - | Logic, DB errors | ERRCODE_INTERNAL_ERROR | Checkpoint: algorithm fails or failure to retrieve table data or indexes. | FATAL - process exists | - -## MOT Checkpoint Concepts - -In MogDB, a Checkpoints is a snapshot of a point in the sequence of transactions at which it is guaranteed that the heap and index data files have been updated with all information written before the checkpoint. - -At the time of a Checkpoint, all dirty data pages are flushed to disk and a special checkpoint record is written to the log file. - -The data is stored directly in memory. The MOT does not store its data it the same way as MogDB so that the concept of dirty pages does not exist. - -For this reason, we have researched and implemented the CALC algorithm, which is described in the paper named Low-Overhead Asynchronous Checkpointing in Main-Memory Database Systems, SIGMOD 2016 from Yale University. - -Low-overhead asynchronous checkpointing in main-memory database systems. - -### CALC Checkpoint Algorithm - Low Overhead in Memory and Compute - -The checkpoint algorithm provides the following benefits - - -- **Reduced Memory Usage -** At most two copies of each record are stored at any time. Memory usage is minimized by only storing a single physical copy of a record while it is live and stable versions are equal or when no checkpoint is actively being recorded. -- **Low Overhead -** CALC's overhead is smaller than other asynchronous checkpointing algorithms. -- **Uses Virtual Points of Consistency -** CALC does not require quiescing of the database in order to achieve a physical point of consistency. - -### Checkpoint Activation - -MOT checkpoints are integrated into MogDB's envelope's Checkpoint mechanism. The Checkpoint process can be triggered manually by executing the **CHECKPOINT;** command or automatically according to the envelope's Checkpoint triggering settings (time/size). - -Checkpoint configuration is performed in the mot.conf file - see the **CHECKPOINT (MOT)** section. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-7.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-7.md deleted file mode 100644 index 1cb8bcb9..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-7.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: MOT Recovery Concepts -summary: MOT Recovery Concepts -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Recovery Concepts - -The MOT Recovery Module provides all the required functionality for recovering the MOT tables data. The main objective of the Recovery module is to restore the data and the MOT engine to a consistent state after a planned (maintenance for example) shut down or an unplanned (power failure for example) crash. - -MogDB database recovery, which is also sometimes called a *Cold Start*, includes MOT tables and is performed automatically with the recovery of the rest of the database. The MOT Recovery Module is seamlessly and fully integrated into the MogDB recovery process. - -MOT recovery has two main stages - Checkpoint Recovery and WAL Recovery (Redo Log). - -MOT checkpoint recovery is performed before the envelope's recovery takes place. This is done only at cold-start events (start of a PG process). It recovers the metadata first (schema) and then inserts all the rows from the current valid checkpoint, which is done in parallel by checkpoint_recovery_workers, each working on a different table. The indexes are created during the insert process. - -When checkpointing a table, it is divided into 16MB chunks, so that multiple recovery workers can recover the table in parallel. This is done in order to speed-up the checkpoint recovery, it is implemented as a multi-threaded procedure where each thread is responsible for recovering a different segment. There are no dependencies between different segments therefore there is no contention between the threads and there is no need to use locks when updating table or inserting new rows. - -WAL records are recovered as part of the envelope's WAL recovery. MogDB envelope iterates through the XLOG and performs the necessary operation based on the xlog record type. In case of entry with record type MOT, the envelope forwards it to MOT RecoveryManager for handling. The xlog entry will be ignored by MOT recovery, if it is 'too old' - its LSN is older than the checkpoint's LSN (Log Sequence Number). - -In an active-standby deployment, the standby server is always in a Recovery state for an automatic WAL recovery process. - -The MOT recovery parameters are set in the mot.conf file explained in the **[MOT Recovery](../../../administrator-guide/mot-engine/2-using-mot/5-mot-administration.md#mot-recovery)** section. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-8.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-8.md deleted file mode 100644 index 661cac1f..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-8.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: MOT Query Native Compilation (JIT) -summary: MOT Query Native Compilation (JIT) -author: Zhang Cuiping -date: 2021-03-04 ---- - -# MOT Query Native Compilation (JIT) - -MOT enables you to prepare and parse *pre-compiled full queries* in a native format (using a **PREPARE** statement) before they are needed for execution. - -This native format can later be executed (using an **EXECUTE** command) more efficiently. This type of execution is much more efficient because during execution the native format bypasses multiple database processing layers. This division of labor avoids repetitive parse analysis operations. The Lite Executor module is responsible for executing **prepared** queries and has a much faster execution path than the regular generic plan performed by the envelope. This is achieved using Just-In-Time (JIT) compilation via LLVM. In addition, a similar solution that has potentially similar performance is provided in the form of pseudo-LLVM. - -The following is an example of a **PREPARE** syntax in SQL: - -``` -PREPARE name [ ( data_type [, ...] ) ] AS statement -``` - -The following is an example of how to invoke a PREPARE and then an EXECUTE statement in a Java application - - -``` -conn = DriverManager.getConnection(connectionUrl, connectionUser, connectionPassword); - -// Example 1: PREPARE without bind settings -String query = "SELECT * FROM getusers"; -PreparedStatement prepStmt1 = conn.prepareStatement(query); -ResultSet rs1 = pstatement.executeQuery()) -while (rs1.next()) {…} - -// Example 2: PREPARE with bind settings -String sqlStmt = "SELECT * FROM employees where first_name=? and last_name like ?"; -PreparedStatement prepStmt2 = conn.prepareStatement(sqlStmt); -prepStmt2.setString(1, "Mark"); // first name "Mark" -prepStmt2.setString(2, "%n%"); // last name contains a letter "n" -ResultSet rs2 = prepStmt2.executeQuery()) -while (rs2.next()) {…} -``` - -## Prepare - -**PREPARE** creates a prepared statement. A prepared statement is a server-side object that can be used to optimize performance. When the **PREPARE** statement is executed, the specified statement is parsed, analyzed and rewritten. - -If the tables mentioned in the query statement are MOT tables, the MOT compilation takes charge of the object preparation and performs a special optimization by compiling the query into IR byte code based on LLVM. - -Whenever a new query compilation is required, the query is analyzed and a proper tailored IR byte code is generated for the query using the utility GsCodeGen object and standard LLVM JIT API (IRBuilder). After byte-code generation is completed, the code is JIT-compiled into a separate LLVM module. The compiled code results in a C function pointer that can later be invoked for direct execution. Note that this C function can be invoked concurrently by many threads, as long as each thread provides a distinct execution context (details are provided below). Each such execution context is referred to as *JIT Context*. - -To improve performance further, MOT JIT applies a caching policy for its LLVM code results, enabling them to be reused for the same queries across different sessions. - -## Execute - -When an EXECUTE command is issued, the prepared statement (described above) is planned and executed. This division of labor avoids repetitive parse analysis work, while enabling the execution plan to depend on the specific setting values supplied. - -When the resulting execute query command reaches the database, it uses the corresponding IR byte code which is executed directly and more efficiently within the MOT engine. This is referred to as *Lite Execution*. - -In addition, for availability, the Lite Executor maintains a preallocated pool of JIT sources. Each session preallocates its own session-local pool of JIT context objects (used for repeated executions of precompiled queries). - -For more details you may refer to the Supported Queries for Lite Execution and Unsupported Queries for Lite Execution sections. - -## JIT Compilation Comparison - MogDB Disk-based vs. MOT Tables - -Currently, MogDB contains two main forms of JIT / CodeGen query optimizations for its disk-based tables - - -- Accelerating expression evaluation, such as in WHERE clauses, target lists, aggregates and projections. -- Inlining small function invocations. - -These optimizations are partial (in the sense they do not optimize the entire interpreted operator tree or replace it altogether) and are targeted mostly at CPU-bound complex queries, typically seen in OLAP use cases. The execution of queries is performed in a pull-model (Volcano-style processing) using an interpreted operator tree. When activated, the compilation is performed at each query execution. At the moment, caching of the generated LLVM code and its reuse across sessions and queries is not yet provided. - -In contrast, MOT JIT optimization provides LLVM code for entire queries that qualify for JIT optimization by MOT. The resulting code is used for direct execution over MOT tables, while the interpreted operator model is abandoned completely. The result is *practically* handwritten LLVM code that has been generated for an entire specific query execution. - -Another significant conceptual difference is that MOT LLVM code is only generated for prepared queries during the PREPARE phase of the query, rather than at query execution. This is especially important for OLTP scenarios due to the rather short runtime of OLTP queries, which cannot allow for code generation and relatively long query compilation time to be performed during each query execution. - -Finally, in PostgreSQL the activation of a PREPARE implies the reuse of the resulting plan across executions with different parameters in the same session. Similarly, the MOT JIT applies a caching policy for its LLVM code results, and extends it for reuse across different sessions. Thus, a single query may be compiled just once and its LLVM code may be reused across many sessions, which again is beneficial for OLTP scenarios. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-9.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-9.md deleted file mode 100644 index c8cfa476..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/3-9.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Comparison - Disk vs. MOT -summary: Comparison - Disk vs. MOT -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Comparison - Disk vs. MOT - -The following table briefly compares the various features of the MogDB disk-based storage engine and the MOT storage engine. - -**Table 1** Comparison - Disk-based vs. MOT - -| Feature | MogDB Disk Store | MogDB MOT Engine | -| :--------------------------- | :---------------------------------- | :---------------------------------------- | -| Intel x86 + Kunpeng ARM | Yes | Yes | -| SQL and Feature-set Coverage | 100% | 98% | -| Scale-up (Many-cores, NUMA) | Low Efficiency | High Efficiency | -| Throughput | High | Extremely High | -| Latency | Low | Extremely Low | -| Distributed (Cluster Mode) | Yes | Yes | -| Isolation Levels | - RC+SI
- RR
- Serializable | - RC
- RR
- RC+SI (in V2 release) | -| Concurrency Control | Pessimistic | Optimistic | -| Data Capacity (Data + Index) | Unlimited | Limited to DRAM | -| Native Compilation | No | Yes | -| Replication, Recovery | Yes | Yes | -| Replication Options | 2 (sync, async) | 3 (sync, async, group-commit) | - -**Legend -** - -- RR = Repeatable Reads -- RC = Read Committed -- SI = Snapshot Isolation diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/concepts-of-mot.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/concepts-of-mot.md deleted file mode 100644 index 09ebea27..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/3-concepts-of-mot/concepts-of-mot.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Concepts of MOT -summary: Concepts of MOT -author: Guo Huan -date: 2023-05-22 ---- - -# Concepts of MOT - -This chapter describes how MogDB MOT is designed and how it works. It also sheds light on its advanced features and capabilities and how to use them. This chapter serves to educate the reader about various technical details of how MOT operates, details of important MOT features and innovative differentiators. The content of this chapter may be useful for decision-making regarding MOT's suitability to specific application requirements and for using and managing it most efficiently. - -+ **[MOT Scale-up Architecture](3-1.md)** -+ **[MOT Concurrency Control Mechanism](3-2.md)** -+ **[Extended FDW and Other openGauss Features](3-3.md)** -+ **[NUMA Awareness Allocation and Affinity](3-4.md)** -+ **[MOT Indexes](3-5.md)** -+ **[MOT Durability Concepts](3-6.md)** -+ **[MOT Recovery Concepts](3-7.md)** -+ **[MOT Query Native Compilation (JIT)](3-8.md)** -+ **[Comparison – Disk vs. MOT](3-9.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/1-references.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/1-references.md deleted file mode 100644 index 5354e913..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/1-references.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: References -summary: References -author: Zhang Cuiping -date: 2021-05-18 ---- - -# References - -[1] Y. Mao, E. Kohler, and R. T. Morris. Cache craftiness for fast multicore key-value storage. In Proc. 7th ACM European Conference on Computer Systems (EuroSys), Apr. 2012. - -[2] K. Ren, T. Diamond, D. J. Abadi, and A. Thomson. Low-overhead asynchronous checkpointing in main-memory database systems. In Proceedings of the 2016 ACM SIGMOD International Conference on Management of Data, 2016. - -[5] Tu, S., Zheng, W., Kohler, E., Liskov, B., and Madden, S. Speedy transactions in multicore in-memory databases. In Proceedings of the Twenty-Fourth ACM Symposium on Operating Systems Principles (New York, NY, USA, 2013), SOSP ’13, ACM, pp. 18-32. - -[6] H. Avni at al. Industrial-Strength OLTP Using Main Memory and Many-cores, VLDB 2020. - -[7] Bernstein, P. A., and Goodman, N. Concurrency control in distributed database systems. ACM Comput. Surv. 13, 2 (1981), 185-221. - -[8] Felber, P., Fetzer, C., and Riegel, T. Dynamic performance tuning of word-based software transactional memory. In Proceedings of the 13th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, PPOPP 2008, Salt Lake City, UT, USA, February 20-23, 2008 (2008), - -pp. 237-246. - -[9] Appuswamy, R., Anadiotis, A., Porobic, D., Iman, M., and Ailamaki, A. Analyzing the impact of system architecture on the scalability of OLTP engines for high-contention workloads. PVLDB 11, 2 (2017), - -121-134. - -[10] R. Sherkat, C. Florendo, M. Andrei, R. Blanco, A. Dragusanu, A. Pathak, P. Khadilkar, N. Kulkarni, C. Lemke, S. Seifert, S. Iyer, S. Gottapu, R. Schulze, C. Gottipati, N. Basak, Y. Wang, V. Kandiyanallur, S. Pendap, D. Gala, R. Almeida, and P. Ghosh. Native store extension for SAP HANA. PVLDB, 12(12): - -2047-2058, 2019. - -[11] X. Yu, A. Pavlo, D. Sanchez, and S. Devadas. Tictoc: Time traveling optimistic concurrency control. In Proceedings of the 2016 International Conference on Management of Data, SIGMOD Conference 2016, San Francisco, CA, USA, June 26 - July 01, 2016, pages 1629-1642, 2016. - -[12] V. Leis, A. Kemper, and T. Neumann. The adaptive radix tree: Artful indexing for main-memory databases. In C. S. Jensen, C. M. Jermaine, and X. Zhou, editors, 29th IEEE International Conference on Data Engineering, ICDE 2013, Brisbane, Australia, April 8-12, 2013, pages 38-49. IEEE Computer Society, 2013. - -[13] S. K. Cha, S. Hwang, K. Kim, and K. Kwon. Cache-conscious concurrency control of main-memory indexes on shared-memory multiprocessor systems. In P. M. G. Apers, P. Atzeni, S. Ceri, S. Paraboschi, K. Ramamohanarao, and R. T. Snodgrass, editors, VLDB 2001, Proceedings of 27th International Conference on Very Large Data Bases, September 11-14, 2001, Roma, Italy, pages 181-190. Morga Kaufmann, 2001. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/2-glossary.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/2-glossary.md deleted file mode 100644 index f2d15760..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/2-glossary.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Glossary -summary: Glossary -author: Zhang Cuiping -date: 2021-05-18 ---- - -# Glossary - -| Acronym | Definition/Description | -| :------ | :----------------------------------------------------------- | -| 2PL | 2-Phase Locking | -| ACID | Atomicity, Consistency, Isolation, Durability | -| AP | Analytical Processing | -| ARM | Advanced RISC Machine, a hardware architecture alternative to x86 | -| CC | Concurrency Control | -| CPU | Central Processing Unit | -| DB | Database | -| DBA | Database Administrator | -| DBMS | Database Management System | -| DDL | Data Definition Language. Database Schema management language | -| DML | Data Modification Language | -| ETL | Extract, Transform, Load or Encounter Time Locking | -| FDW | Foreign Data Wrapper | -| GC | Garbage Collector | -| HA | High Availability | -| HTAP | Hybrid Transactional-Analytical Processing | -| IoT | Internet of Things | -| IM | In-Memory | -| IMDB | In-Memory Database | -| IR | Intermediate Representation of a source code, used in compilation and optimization | -| JIT | Just In Time | -| JSON | JavaScript Object Notation | -| KV | Key Value | -| LLVM | Low-Level Virtual Machine, refers to a compilation code or queries to IR | -| M2M | Machine-to-Machine | -| ML | Machine Learning | -| MM | Main Memory | -| MO | Memory Optimized | -| MOT | Memory Optimized Tables storage engine (SE), pronounced as /em/ /oh/ /tee/ | -| MVCC | Multi-Version Concurrency Control | -| NUMA | Non-Uniform Memory Access | -| OCC | Optimistic Concurrency Control | -| OLTP | Online Transaction Processing | -| PG | PostgreSQL | -| RAW | Reads-After-Writes | -| RC | Return Code | -| RTO | Recovery Time Objective | -| SE | Storage Engine | -| SQL | Structured Query Language | -| TCO | Total Cost of Ownership | -| TP | Transactional Processing | -| TPC-C | An On-Line Transaction Processing Benchmark | -| Tpm-C | Transactions-per-minute-C. A performance metric for TPC-C benchmark that counts new-order transactions. | -| TVM | Tiny Virtual Machine | -| TSO | Time Sharing Option | -| UDT | User-Defined Type | -| WAL | Write Ahead Log | -| XLOG | A PostgreSQL implementation of transaction logging (WAL - described above) | diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/mot-appendix.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/mot-appendix.md deleted file mode 100644 index ffc33f00..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/4-appendix/mot-appendix.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Appendix -summary: Appendix -author: Guo Huan -date: 2023-05-22 ---- - -# Appendix - -+ **[References](1-references.md)** -+ **[Glossary](2-glossary.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/mot-engine.md b/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/mot-engine.md deleted file mode 100644 index 880db8a1..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/mot-engine/mot-engine.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: MOT -summary: MOT -author: Guo Huan -date: 2023-05-22 ---- - -# MOT - -- **[Introducing MOT](1-introducing-mot/introducing-mot.md)** -- **[Using MOT](2-using-mot/using-mot.md)** -- **[Concepts of MOT](3-concepts-of-mot/concepts-of-mot.md)** -- **[Appendix](4-appendix/mot-appendix.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/primary-and-standby-management.md b/product/en/docs-mogdb/v5.2/administrator-guide/primary-and-standby-management.md deleted file mode 100644 index f628f197..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/primary-and-standby-management.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Primary and Standby Management -summary: Primary and Standby Management -author: Guo Huan -date: 2021-03-11 ---- - -# Primary and Standby Management - -## Scenarios - -During MogDB database running, the database administrator needs to manually perform an primary/standby switchover on the database node. For example, after a primary/standby database node failover, you need to restore the original primary/standby roles, or you need to manually perform a primary/standby switchover due to a hardware fault. A cascaded standby server cannot be directly switched to a primary server. You must perform a switchover or failover to change the cascaded standby server to a standby server, and then to a primary server. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - The primary/standby switchover is a maintenance operation. Ensure that the MogDB database is normal and perform the switchover after all services are complete. -> - When the ultimate RTO is enabled, cascaded standby servers are not supported. The standby server cannot be connected when the ultimate RTO is enabled. As a result, the cascaded standby server cannot synchronize data. - -## Procedure - -1. Log in to any database node as the OS user **omm** and run the following command to check the primary/standby status: - - ```bash - gs_om -t status --detail - ``` - -2. Log in to the standby node to be switched to the primary node as the OS user **omm** and run the following command: - - ```bash - gs_ctl switchover -D /home/omm/cluster/dn1/ - ``` - - **/home/omm/cluster/dn1/** is the data directory of the standby database node. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** For the same database, you cannot perform a new primary/standby switchover if the previous switchover has not completed. If a switchover is performed when the host thread is processing services, the thread cannot stop, and switchover timeout will be reported. Actually, the switchover is ongoing in the background and will complete after the thread finishes service processing and stops. For example, when a host is deleting a large partitioned table, it may fail to respond to the switchover request. - -3. After the switchover is successful, run the following command to record the information about the current primary and standby nodes: - - ```bash - gs_om -t refreshconf - ``` - -## Examples - -Run the following command to switch the standby database instance to the primary database instance: - -1. Queries database status. - - ```bash - $ gs_om -t status --detail - [ Cluster State ] - - cluster_state : Normal - redistributing : No - current_az : AZ_ALL - - [ Datanode State ] - - node node_ip port instance state - -------------------------------------------------------------------------------------------------- - 1 pekpopgsci00235 10.244.62.204 5432 6001 /home/omm/cluster/dn1/ P Primary Normal - 2 pekpopgsci00238 10.244.61.81 5432 6002 /home/omm/cluster/dn1/ S Standby Normal - ``` - -2. Log in to the standby node and perform a primary/standby switchover. In addition, after a cascaded standby node is switched over, the cascaded standby server becomes a standby server, and the original standby server becomes a cascaded standby server. - - ```bash - $ gs_ctl switchover -D /home/omm/cluster/dn1/ - [2020-06-17 14:28:01.730][24438][][gs_ctl]: gs_ctl switchover ,datadir is -D "/home/omm/cluster/dn1" - [2020-06-17 14:28:01.730][24438][][gs_ctl]: switchover term (1) - [2020-06-17 14:28:01.768][24438][][gs_ctl]: waiting for server to switchover............ - [2020-06-17 14:28:11.175][24438][][gs_ctl]: done - [2020-06-17 14:28:11.175][24438][][gs_ctl]: switchover completed (/home/omm/cluster/dn1) - ``` - -3. Save the information about the primary and standby nodes in the database. - - ```bash - $ gs_om -t refreshconf - Generating dynamic configuration file for all nodes. - Successfully generated dynamic configuration file. - ``` - -## Troubleshooting - -If a switchover fails, troubleshoot the problem according to the log information. For details, see [Log Reference](../administrator-guide/routine-maintenance/11-log-reference.md). - -## Exception Handling - -Exception handling rules are as follows: - -- A switchover takes a long time under high service loads. In this case, no further operation is required. - -- When standby nodes are being built, a primary node can be demoted to a standby node only after sending logs to one of the standby nodes. As a result, the primary/standby switchover takes a long time. In this case, no further operation is required. However, you are not advised to perform a primary/standby switchover during the build process. - -- During a switchover, due to network faults and high disk usage, it is possible that the primary and standby instances are disconnected, or two primary nodes exist in a single pair. In this case, perform the following steps: - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-warning.gif) **WARNING:** After two primary nodes appear, perform the following steps to restore the normal primary/standby state: Otherwise, data loss may occur. - -1. Run the following commands to query the current instance status of the database: - - ```bash - gs_om -t status --detail - ``` - - The query result shows that the status of two instances is **Primary**, which is abnormal. - -2. Determine the node that functions as the standby node and run the following command on the node to stop the service: - - ```bash - gs_ctl stop -D /home/omm/cluster/dn1/ - ``` - -3. Run the following command to start the standby node in standby mode: - - ```bash - gs_ctl start -D /home/omm/cluster/dn1/ -M standby - ``` - -4. Save the information about the primary and standby nodes in the database. - - ```bash - gs_om -t refreshconf - ``` - -5. Check the database status and ensure that the instance status is restored. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/0-starting-and-stopping-mogdb.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/0-starting-and-stopping-mogdb.md deleted file mode 100644 index 6e8bce27..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/0-starting-and-stopping-mogdb.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: Starting and Stopping MogDB -summary: Starting and Stopping MogDB -author: Guo Huan -date: 2021-06-24 ---- - -# Starting and Stopping MogDB - -## Operating By OM - -### Starting MogDB - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to start MogDB: - - ```bash - gs_om -t start - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** An HA cluster must be started in HA mode. If the cluster is started in standalone mode, you need to restore the HA relationship by running the **gs_ctl build** command. For details about how to use the **gs_ctl** tool, see [gs_ctl](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_ctl.md). - -### Stopping MogDB - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to stop MogDB: - - ```bash - gs_om -t stop - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** For details about how to start and stop nodes and availability zones (AZs), see [gs_om](../../reference-guide/tool-reference/server-tools/gs_om.md). - -### Examples - -Start MogDB: - -```bash -gs_om -t start -Starting cluster. -========================================= -========================================= -Successfully started. -``` - -Stop MogDB: - -```bash -gs_om -t stop -Stopping cluster. -========================================= -Successfully stopped cluster. -========================================= -End stop cluster. -``` - -### Troubleshooting - -If starting or stopping MogDB fails, troubleshoot the problem based on log information. For details, see [Log Reference](11-log-reference.md). - -If the startup fails due to timeout, you can run the following command to set the startup timeout interval, which is 300s by default: - -```bash -gs_om -t start --time-out=300 -``` - -
- -## Operating By PTK - -### Start the Cluster - -> The following uses the `cluster_slirist` cluster as an example. - -After the database cluster is installed, PTK will start the database cluster by default. - -If the `--skip-launch-db` parameter is specified during installation, the database is in the stopped status. - -You can run the `cluster start` command to start the database cluster. You need to specify the cluster name in the command. - -Example: - -```shell -# ptk cluster -n cluster_slirist start -INFO[2022-08-02T11:40:48.728] Operating: Starting. -INFO[2022-08-02T11:40:48.728] ========================================= -INFO[2022-08-02T11:40:48.784] starting host 192.168.122.101 -INFO[2022-08-02T11:40:54.097] starting host 192.168.122.101 successfully -INFO[2022-08-02T11:40:54.097] starting host 192.168.122.102 -INFO[2022-08-02T11:40:56.329] starting host 192.168.122.102 successfully -INFO[2022-08-02T11:40:56.613] waiting for check cluster state... -INFO[2022-08-02T11:41:01.861] ========================================= -INFO[2022-08-02T11:41:01.861] Successfully started. -INFO[2022-08-02T11:41:01.861] Operation succeeded: Start. -``` - -PTK starts all instances in the cluster by default. It also supports the starting of a single instance. - -```shell -# ptk cluster -n cluster_slirist start -H 192.168.122.101 -INFO[2022-08-02T11:50:04.442] Operating: Starting. -INFO[2022-08-02T11:50:04.442] ========================================= -INFO[2022-08-02T11:50:06.692] starting host 192.168.122.101 successfully -``` - -For more parameters, see the help information. - -```shell -# ptk cluster start -h -Start a database instance or cluster. - -Usage: - ptk cluster start [flags] - -Flags: - -h, --help help for start - -H, --host string Specifies the IP address of an instance. - -n, --name string Specifies the cluster name. - --security-mode string Specifies whether to start a database in safe mode. - The value can be on and off. - --time-out duration Specifies the start timeout duration. The default value is 10 minutes. -``` - -### Stop the Cluster - -> The following uses the `cluster_slirist` cluster as an example. - -You can run the `cluster stop` command to stop a database cluster. PTK will stop all instances in a cluster by default. - -```shell -# ptk cluster -n cluster_slirist stop -INFO[2022-08-02T11:49:40.685] Operating: Stopping. -INFO[2022-08-02T11:49:40.685] ========================================= -INFO[2022-08-02T11:49:40.891] stopping host 192.168.122.102 -INFO[2022-08-02T11:49:41.946] stopping host 192.168.122.102 successfully -INFO[2022-08-02T11:49:41.946] stopping host 192.168.122.101 -INFO[2022-08-02T11:49:43.004] stopping host 192.168.122.101 successfully -INFO[2022-08-02T11:49:43.004] ========================================= -INFO[2022-08-02T11:49:43.004] Successfully stoped. -INFO[2022-08-02T11:49:43.004] Operation succeeded: Stop. -``` - -You can use the `-H` parameter to specify a instance to stop it. - -```shell -# ptk cluster -n cluster_slirist stop -H 192.168.122.101 -INFO[2022-08-02T11:56:32.880] Operating: Stopping. -INFO[2022-08-02T11:56:32.881] ========================================= -INFO[2022-08-02T11:56:34.154] stopping host 192.168.122.101 successfully -``` - -For more parameters, see the help information. - -```shell -# ptk cluster stop -h -Stop a database instance or cluster. - -Usage: - ptk cluster stop [flags] - -Flags: - -h, --help help for stop - -H, --host string Specifies the IP address of an instance. - -n, --name string Specifies the cluster name. - --time-out duration Specifies the stop timeout duration. The default value is 10 minutes. -``` - -### Restart the Cluster - -> The following uses the `cluster_slirist` cluster as an example. - -Restarting a cluster is actually to stop the database first and then start the database. - -You can run the `cluster restart` command to restart the cluster. - -```shell -# ptk cluster -n cluster_slirist restart -INFO[2022-08-02T11:59:31.037] Operating: Stopping. -INFO[2022-08-02T11:59:31.037] ========================================= -INFO[2022-08-02T11:59:31.217] stopping host 192.168.122.102 -INFO[2022-08-02T11:59:32.269] stopping host 192.168.122.102 successfully -INFO[2022-08-02T11:59:32.269] stopping host 192.168.122.101 -INFO[2022-08-02T11:59:33.309] stopping host 192.168.122.101 successfully -INFO[2022-08-02T11:59:33.309] ========================================= -INFO[2022-08-02T11:59:33.309] Successfully stoped. -INFO[2022-08-02T11:59:33.309] Operation succeeded: Stop. - -INFO[2022-08-02T11:59:33.310] Operating: Starting. -INFO[2022-08-02T11:59:33.310] ========================================= -INFO[2022-08-02T11:59:33.376] starting host 192.168.122.101 -INFO[2022-08-02T11:59:35.583] starting host 192.168.122.101 successfully -INFO[2022-08-02T11:59:35.583] starting host 192.168.122.102 -INFO[2022-08-02T11:59:36.787] starting host 192.168.122.102 successfully -INFO[2022-08-02T11:59:36.995] waiting for check cluster state... -INFO[2022-08-02T11:59:42.247] ========================================= -INFO[2022-08-02T11:59:42.247] Successfully started. -INFO[2022-08-02T11:59:42.247] Operation succeeded: Start. -``` - -For more parameters, see the help information. - -```shell -# ptk cluster restart -h -Restart a database instance or cluster. - -Usage: - ptk cluster restart [flags] - -Flags: - -h, --help help for restart - -H, --host string Specifies the IP address of an instance. - -n, --name string Specifies the cluster name. - --security-mode string Specifies whether to start a database in safe mode. - The value can be on and off. - --time-out duration Specifies the start timeout duration. The default value is 10 minutes. -``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/1-routine-maintenance-check-items.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/1-routine-maintenance-check-items.md deleted file mode 100644 index b27725fd..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/1-routine-maintenance-check-items.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: Routine Maintenance Check Items -summary: Routine Maintenance Check Items -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Routine Maintenance Check Items - -## Checking MogDB Status - -MogDB provides tools to check database and instance status, ensuring that databases and instances are running properly to provide data services. - -- Check instance status. - - ```bash - gs_check -U omm -i CheckClusterState - ``` - -- Check parameters. - - ```sql - mogdb=# SHOW parameter_name; - ``` - -- Modify parameters. - - ```bash - gs_guc reload -D /mogdb/data/dbnode -c "paraname=value" - ``` - -## Checking Lock Information - -The lock mechanism is an important method to ensure data consistency. Information check helps learn database transactions and database running status. - -- Query lock information in the database. - - ```sql - mogdb=# SELECT * FROM pg_locks; - ``` - -- Query the status of threads waiting to acquire locks. - - ```sql - mogdb=# SELECT * FROM pg_thread_wait_status WHERE wait_status = 'acquire lock'; - ``` - -- Kill a system process. - - Search for a system process that is running and run the following command to end the process: - - ``` - ps ux - kill -9 pid - ``` - -## Collecting Event Statistics - -Long-time running of SQL statements will occupy a lot of system resources. You can check event occurrence time and occupied memory to learn about database running status. - -- Query the time points about an event. - - Run the following command to query the thread start time, transaction start time, SQL start time, and status change time of the event: - - ```sql - mogdb=# SELECT backend_start,xact_start,query_start,state_change FROM pg_stat_activity; - ``` - -- Query the number of sessions on the current server. - - ```sql - mogdb=# SELECT count(*) FROM pg_stat_activity; - ``` - -- Query system-level statistics. - - Run the following command to query information about the session that uses the maximum memory: - - ```sql - mogdb=# SELECT * FROM pv_session_memory_detail() ORDER BY usedsize desc limit 10; - ``` - -## Checking Objects - -Tables, indexes, partitions, and constraints are key storage objects of a database. A database administrator needs to routinely maintain key information and these objects. - -- View table details. - - ```sql - mogdb=# \d+ table_name - ``` - -- Query table statistics. - - ```sql - mogdb=# SELECT * FROM pg_statistic; - ``` - -- View index details. - - ```sql - mogdb=# \d+ index_name - ``` - -- Query partitioned table information. - - ```sql - mogdb=# SELECT * FROM pg_partition; - ``` - -- Collect statistics. - - Run the **ANALYZE** statement to collect related statistics on the database. - - Run the **VACUUM** statement to reclaim space and update statistics. - -- Query constraint information. - - ```sql - mogdb=# SELECT * FROM pg_constraint; - ``` - -## Checking an SQL Report - -Run the **EXPLAIN** statement to view execution plans. - -## Backing Up Data - -Never forget to back up data. During the routine work, the backup execution and backup data validity need to be checked to ensure data security and encryption security. - -- Export a specified user. - - ```bash - gs_dump dbname -p port -f out.sql -U user_name -W password - ``` - -- Export a schema. - - ```bash - gs_dump dbname -p port -n schema_name -f out.sql - ``` - -- Export a table. - - ```bash - gs_dump dbname -p port -t table_name -f out.sql - ``` - -## Checking Basic Information - -Basic information includes versions, components, and patches. Periodic database information checks and records are important for database life cycle management. - -- Check version information. - - ```sql - mogdb=# SELECT version(); - ``` - -- Check table size and database size. - - ```sql - mogdb=# SELECT pg_table_size('table_name'); - mogdb=# SELECT pg_database_size('database_name'); - ``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/10-data-security-maintenance-suggestions.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/10-data-security-maintenance-suggestions.md deleted file mode 100644 index 410b7d78..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/10-data-security-maintenance-suggestions.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Data Security Maintenance Suggestions -summary: Data Security Maintenance Suggestions -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Data Security Maintenance Suggestions - -To ensure data security in MogDB Kernel and prevent accidents, such as data loss and illegal data access, read this section carefully. - -**Preventing Data Loss** - -You are advised to plan routine physical backup and store backup files in a reliable medium. If a serious error occurs in the system, you can use the backup files to restore the system to the state at the backup point. - -**Preventing Illegal Data Access** - -- You are advised to manage database users based on their permission hierarchies. A database administrator creates users and grants permissions to the users based on service requirements to ensure users properly access the database. -- You are advised to deploy MogDB Kernel servers and clients (or applications developed based on the client library) in trusted internal networks. If the servers and clients must be deployed in an untrusted network, enable SSL encryption before services are started to ensure data transmission security. Note that enabling the SSL encryption function compromises database performance. - -**Preventing System Logs from Leaking Personal Data** - -- Delete personal data before sending debug logs to others for analysis. - - **NOTE:** The log level **log_min_messages** is set to **DEBUG**x (*x* indicates the debug level and the value ranges from 1 to 5). The information recorded in debug logs may contain personal data. - -- Delete personal data before sending system logs to others for analysis. If the execution of a SQL statement fails, the error SQL statement will be recorded in a system log by default. SQL statements may contain personal data. - -- Set **log_min_error_statement** to **PANIC** to prevent error SQL statements from being recorded in system logs. If this function is disabled, it is difficult to locate fault causes when a fault occurs. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/11-log-reference.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/11-log-reference.md deleted file mode 100644 index eb869613..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/11-log-reference.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: Log Reference -summary: Log Reference -author: Guo Huan -date: 2021-06-24 ---- - -# Log Reference - -## Log Overview - -During database running, a large number of logs are generated, including write-ahead logs (WALs, also called Xlogs) for ensuring database security and reliability and run logs and operation logs for daily database maintenance. If the database is faulty, you can refer to these logs to locate the fault and restore the database. - -**Log Type** - -The following table describes details about log types. - -**Table 1** Log types - -| Type | Description | -| :-------------- | :----------------------------------------------------------- | -| System log | Logs generated during database running. They are used to record abnormal process information. | -| Operation log | Logs generated when a client tool (such as **gs_guc**) is operating databases. | -| Trace log | Logs generated after the database debug switch is enabled. They are used to analyze database exceptions. | -| Black box log | Logs generated when the database system breaks down. You can analyze the process context when the fault occurs based on the heap and stack information in the logs to facilitate fault locating. A black box dumps stack, heap, and register information about processes and threads when a system breaks down. | -| Audit log | Logs used to record some of the database user operations after the database audit function is enabled. | -| WAL | Logs used to restore a damaged database. They are also called redo logs. You are advised to routinely back up WALs. | -| Performance log | Logs used to record the status of physical resources and the performance of access to external resources (such as disks and OBS). | - -## System Logs - -System logs include those generated by database nodes when MogDB is running, and those generated when MogDB is deployed. If an error occurs during MogDB running, you can locate the cause and troubleshoot it based on system logs. - -**Log Storage Directory** - -Run logs of database nodes are stored in the corresponding folders in the **/var/log/mogdb/username/pg_log** directory. - -Logs generated during OM MogDB installation and uninstallation are stored in the **/var/log/mogdb/username/om** directory. - -**Log Naming Rules** - -- The name format of database node run logs is: - -postgresql-creation time.log - -By default, a new log file is generated at 0:00 every day, or when the size of the latest log file exceeds 16 MB or a database instance (database node) is restarted. - -- The name formats of CM run logs are: - - cm_agent logs: cm_agent-creation time.log, cm_agent-creation time-current.log, system_call-creation time.log, and system_call-creation time-current.log - - cm_server logs: cm_server-creation time.log, cm_server creation time-current.log, key_event-creation time.log, and key_event-creation time-current.log - - om_monitor logs: om_monitor-creation time.log and om_monitor-creation time-current.log. - -Logs whose names do not contain **current** are historical log files. Logs whose names contain **current** are current log files. When a process is invoked for the first time, a log file whose name contains **current** is created. If the size of this file exceeds 16 MB, the file is renamed in the historical log file name format, and a new log file is generated at the current time point. - -**Log Content Description** - -- Content of a line in a database node log: - -Date+Time+Time zone+Username+Database name+Session ID+Log level+Log content. - -- By default, a line in a cm_agent, cm_server, om_monitor log is arranged in the following format: - -Time+Time zone+Session ID+Log content - -The **SYSTEM_CALL** log records tool commands invoked by cm_agent. - -By default, a line in a key_event log is arranged in the following format: Time+Thread ID+Thread name:Key event type+Arbitration object instance ID+Arbitration details. - -## Operation Logs - -Operation logs are generated when database tools are used by a database administrator or invoked by a cluster. If the cluster is faulty, you can backtrack user operations on the database and reproduce the fault based on the operation logs. - -**Log Storage Directory** - -The default path is **$GAUSSLOG/bin**. If the environmental variable **$GAUSSLOG** does not exist or its value is empty, the log information generated for a tool will be displayed, but not recorded in the log file of the tool. - -The default value of **$GAUSSLOG** is **/var/log/mogdb/username**. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If a database is deployed using the OM script, the log path is **/var/log/mogdb/username**. - -**Log Naming Rules** - -The log file name format is as follows: - -- **tool name-log creation time.log** -- **tool name-log creation time-current.log** - -**tool name-log creation time.log** is a historical log file, and **tool name-log creation time-current.log** is a current log file. - -If the size of a log file exceeds 16 MB, the next time the tool is invoked, the log file is renamed in the historical log file name format, and a new log file is generated at the current time point. - -For example, **gs_guc-2015-01-16_183728-current.log** is renamed as **gs_guc-2015-01-16_183728.log**, and **gs_guc-2015-01-17_142216-current.log** is generated. - -**Maintenance Suggestions** - -You are advised to dump expired logs periodically to save disk space and prevent important logs from being lost. - -## Audit Logs - -After the audit function is enabled, a large number of audit logs will be generated, which occupy large storage space. You can customize an audit log maintenance policy based on the size of available storage space. - -For details, see “Configuring Database Audit > Maintaining Audit Logs” in the *Security Guide*. - -## WALs - -In a system using write-ahead logs (WALs or Xlogs), all data file modifications are written to a log before they are applied. That is, the corresponding log must be written into a permanent memory before a data file is modified. You can use WALs to restore the cluster if the system crashes. - -**Log Storage Directory** - -Take a DN as an example. Its WALs are stored in the **/mogdb/data/data_dn/pg_xlog** directory. - -**/mogdb/data/data_dn** is the data directory of a node in the cluster. - -**Log Naming Rules** - -Log files are saved as segment files. Each segment is 16 MB and is divided into multiple 8 KB pages. The name of a WAL file consists of 24 hexadecimal characters. Each name has three parts, with each part having eight hexadecimal characters. The first part indicates the time line, the second part indicates the log file identifier, and the third part indicates the file segment identifier. A time line starts from 1, and a log file identifier and a file segment identifier start from 0. - -For example, the name of the first transaction log is **000000010000000000000000**. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The numbers in each part are used in ascending order in succession. Exhausting all available numbers takes a long time, and the numbers will start from zero again after they reach the maximum. - -**Log Content Description** - -The content of WALs depends on the types of recorded transactions. WALs can be used to restore a system after the system breaks down. - -By default, MogDB reads WALs for system restoration during each startup. - -**Maintenance Suggestions** - -WALs are important for database restoration. You are advised to routinely back up WALs. - -## Performance Logs - -Performance logs focus on the access performance of external resources. Performance logs are used to record the status of physical resources and the performance of access to external resources (such as disks and OBS). When a performance issue occurs, you can locate the cause using performance logs, which greatly improves troubleshooting efficiency. - -**Log Storage Directory** - -The performance logs of database are stored in the directories under **$GAUSSLOG/gs_profile**. - -**Log Naming Rules** - -The name format ofdatabase performance logs is: - -**postgresql-creation time.prf** - -By default, a new log file is generated at 0:00 every day, or when the latest log file exceeds 20 MB or a database instance (CN or DN) is restarted. - -**Log Content Description** - -Content of a line in a database log: - -**Host name+Date+Time+Instance name+Thread number+Log content** diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/2-checking-os-parameters.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/2-checking-os-parameters.md deleted file mode 100644 index 9ee8af4b..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/2-checking-os-parameters.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: Checking OS Parameters -summary: Checking OS Parameters -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Checking OS Parameters - -## Check Method - -Use the **gs_checkos** tool provided by MogDB to check the OS status. - -**Prerequisites** - -- The hardware and network are working properly. -- The trust relationship of user **root** among the hosts is normal. -- Only user **root** is authorized to run the **gs_checkos** command. - -**Procedure** - -1. Log in to a server as user **root**. - -2. Run the following command to check OS parameters of servers where the MogDB nodes are deployed: - - ``` - gs_checkos -i A - ``` - - The purpose of checking the OS parameters is to ensure that MogDB is preinstalled properly and can efficiently operate after it is installed. - -**Examples** - -Before running the **gs_checkos** command, execute pre-processing scripts by running **gs_preinstall** to prepare the environment. The following uses parameter **A** as an example: - -``` -gs_checkos -i A -Checking items: - A1. [ OS version status ] : Normal - A2. [ Kernel version status ] : Normal - A3. [ Unicode status ] : Normal - A4. [ Time zone status ] : Normal - A5. [ Swap memory status ] : Normal - A6. [ System control parameters status ] : Normal - A7. [ File system configuration status ] : Normal - A8. [ Disk configuration status ] : Normal - A9. [ Pre-read block size status ] : Normal - A10.[ IO scheduler status ] : Normal - A11.[ Network card configuration status ] : Normal - A12.[ Time consistency status ] : Warning - A13.[ Firewall service status ] : Normal - A14.[ THP service status ] : Normal -Total numbers:14. Abnormal numbers:0. Warning number:1. -``` - -The following uses parameter **B** as an example: - -``` -gs_checkos -i B -Setting items: - B1. [ Set system control parameters ] : Normal - B2. [ Set file system configuration value ] : Normal - B3. [ Set pre-read block size value ] : Normal - B4. [ Set IO scheduler value ] : Normal - B5. [ Set network card configuration value ] : Normal - B6. [ Set THP service ] : Normal - B7. [ Set RemoveIPC value ] : Normal - B8. [ Set Session Process ] : Normal -Total numbers:6. Abnormal numbers:0. Warning number:0. -``` - -## Exception Handling - -If you use the **gs_checkos** tool to check the OS and the command output shows **Abnormal**, run the following command to view detailed error information: - -``` -gs_checkos -i A --detail -``` - -The **Abnormal** state cannot be ignored because the OS in this state affects cluster installation. The **Warning** state does not affect cluster installation and thereby can be ignored. - -- If the check result for OS version status (**A1**) is **Abnormal**, replace OSs out of the mixed programming scope with those within the scope. - -- If the check result for kernel version status (**A2**) is **Warning**, the platform kernel versions in the cluster are inconsistent. - -- If the check result for Unicode status (**A3**) is **Abnormal**, set the same character set for all the hosts. You can add **export LANG=unicode** to the **/etc/profile** file. - - ``` - vim /etc/profile - ``` - -- If the check result for time zone status (**A4**) is **Abnormal**, set the same time zone for all the hosts. You can copy the time zone file in the **/usr/share/zoneinfo/** directory as the **/etc/localtime** file. - - ``` - cp /usr/share/zoneinfo/$primary time zone/$secondary time zone /etc/localtime - ``` - -- If the check result for swap memory status (**A5**) is **Abnormal**, a possible cause is that the swap memory is larger than the physical memory. You can troubleshoot this issue by reducing the swap memory or increasing the physical memory. - -- If the check result for system control parameter status (**A6**) is **Abnormal**, troubleshoot this issue in either of the following two ways: - - - Run the following command: - - ``` - gs_checkos -i B1 - ``` - - - Modify the **/etc/sysctl.conf** file based on the error message and run **sysctl -p** to make it take effect. - - ``` - vim /etc/sysctl.conf - ``` - -- If the check result for file system configuration status (**A7**) is **Abnormal**, run the following command to troubleshoot this issue: - - ``` - gs_checkos -i B2 - ``` - -- If the check result for disk configuration status (**A8**) is **Abnormal**, set the disk mounting format to **rw,noatime,inode64,allocsize=16m**. - - Run the **man mount** command to mount the XFS parameter: - - ``` - rw,noatime,inode64,allocsize=16m - ``` - - You can also set the XFS parameter in the **/etc/fstab** file. For example: - - ``` - /dev/data /data xfs rw,noatime,inode64,allocsize=16m 0 0 - ``` - -- If the check result for pre-read block size status (**A9**) is **Abnormal**, run the following command to troubleshoot this issue: - - ``` - gs_checkos -i B3 - ``` - -- If the check result for I/O scheduling status (**A10**) is **Abnormal**, run the following command to troubleshoot this issue: - - ``` - gs_checkos -i B4 - ``` - -- If the check result for NIC configuration status (**A11**) is **Warning**, run the following command to troubleshoot this issue: - - ``` - gs_checkos -i B5 - ``` - -- If the check result for time consistency status (**A12**) is **Abnormal**, verify that the NTP service has been installed and started and has synchronized time from the NTP clock. - -- If the check result for firewall status (**A13**) is **Abnormal**, disable the firewall. Run the following commands: - - - SUSE: - - ``` - SuSEfirewall2 stop - ``` - - - RedHat7: - - ``` - systemctl disable firewalld - ``` - - - RedHat6: - - ``` - service iptables stop - ``` - -- If the check result for THP service status (**A14**) is **Abnormal**, run the following command to troubleshoot this issue: - - ``` - gs_checkos -i B6 - ``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/3-checking-mogdb-health-status.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/3-checking-mogdb-health-status.md deleted file mode 100644 index 8fc8e7d0..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/3-checking-mogdb-health-status.md +++ /dev/null @@ -1,645 +0,0 @@ ---- -title: Checking MogDB Health Status -summary: Checking MogDB Health Status -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Checking MogDB Health Status - -## Check Method - -Use the **gs_check** tool provided by MogDB to check the MogDB health status. - -**Precautions** - -- Only user **root** is authorized to check new nodes added during cluster scale-out. In other cases, the check can be performed only by user **omm**. -- Parameter **-i** or **-e** must be set. **-i** specifies a single item to be checked, and **-e** specifies an inspection scenario where multiple items will be checked. -- If **-i** is not set to a **root** item or no such items are contained in the check item list of the scenario specified by **-e**, you do not need to enter the name or password of user **root**. -- You can run **-skip-root-items** to skip **root** items. -- Check the consistency between the new node and existing nodes. Run the **gs_check** command on an existing node and specify the **-hosts** parameter. The IP address of the new node needs to be written into the **hosts** file. - -**Procedure** - -Method 1: - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to check the MogDB database status: - - ```bash - gs_check -i CheckClusterState - ``` - - In the command, **-i** indicates the check item and is case-sensitive. The format is **-i CheckClusterState**, **-i CheckCPU** or **-i CheckClusterState,CheckCPU**. - - Checkable items are listed in "Table 1 MogDB status checklist" in "Tool Reference > Server Tools > [gs_check](../../reference-guide/tool-reference/server-tools/gs_check.md)". You can create a check item as needed. - -Method 2: - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to check the MogDB database health status: - - ```bash - gs_check -e inspect - ``` - - In the command, **-e** indicates the inspection scenario and is case-sensitive. The format is **-e inspect** or **-e upgrade**. - - The inspection scenarios include **inspect** (routine inspection), **upgrade** (inspection before upgrade), **Install** (install inspection ), **binary_upgrade** (inspection before in-place upgrade), **slow_node** (node inspection), **longtime** (time-consuming inspection) and **health** (health inspection). You can create an inspection scenario as needed. - -The MogDB inspection is performed to check MogDB status during MogDB running or to check the environment and conditions before critical operations, such as upgrade or scale-out. For details about the inspection items and scenarios, see "Server Tools > gs_check > MogDB status checks" in the *MogDB Tool Reference*. - -**Examples** - -Check result of a single item: - -```bash -perfadm@lfgp000700749:/opt/huawei/perfadm/tool/script> gs_check -i CheckCPU -Parsing the check items config file successfully -Distribute the context file to remote hosts successfully -Start to health check for the cluster. Total Items:1 Nodes:3 - -Checking... [=========================] 1/1 -Start to analysis the check result -CheckCPU....................................OK -The item run on 3 nodes. success: 3 - -Analysis the check result successfully -Success. All check items run completed. Total:1 Success:1 Failed:0 -For more information please refer to /opt/mogdb/tools/script/gspylib/inspection/output/CheckReport_201902193704661604.tar.gz -``` - -Local execution result: - -```bash -perfadm@lfgp000700749:/opt/huawei/perfadm/tool/script> gs_check -i CheckCPU -L - -2017-12-29 17:09:29 [NAM] CheckCPU -2017-12-29 17:09:29 [STD] Check the CPU usage of the host. If the value of idle is greater than 30% and the value of iowait is less than 30%, this item passes the check. Otherwise, this item fails the check. -2017-12-29 17:09:29 [RST] OK - -2017-12-29 17:09:29 [RAW] -Linux 4.4.21-69-default (lfgp000700749) 12/29/17 _x86_64_ - -17:09:24 CPU %user %nice %system %iowait %steal %idle -17:09:25 all 0.25 0.00 0.25 0.00 0.00 99.50 -17:09:26 all 0.25 0.00 0.13 0.00 0.00 99.62 -17:09:27 all 0.25 0.00 0.25 0.13 0.00 99.37 -17:09:28 all 0.38 0.00 0.25 0.00 0.13 99.25 -17:09:29 all 1.00 0.00 0.88 0.00 0.00 98.12 -Average: all 0.43 0.00 0.35 0.03 0.03 99.17 -``` - -Check result of a scenario: - -```bash -[perfadm@SIA1000131072 Check]$ gs_check -e inspect -Parsing the check items config file successfully -The below items require root privileges to execute:[CheckBlockdev CheckIOrequestqueue CheckIOConfigure CheckCheckMultiQueue CheckFirewall CheckSshdService CheckSshdConfig CheckCrondService CheckBootItems CheckFilehandle CheckNICModel CheckDropCache] -Please enter root privileges user[root]:root -Please enter password for user[root]: -Please enter password for user[root] on the node[10.244.57.240]: -Check root password connection successfully -Distribute the context file to remote hosts successfully -Start to health check for the cluster. Total Items:57 Nodes:2 - -Checking... [ ] 21/57 -Checking... [=========================] 57/57 -Start to analysis the check result -CheckClusterState...........................OK -The item run on 2 nodes. success: 2 - -CheckDBParams...............................OK -The item run on 1 nodes. success: 1 - -CheckDebugSwitch............................OK -The item run on 2 nodes. success: 2 - -CheckDirPermissions.........................OK -The item run on 2 nodes. success: 2 - -CheckReadonlyMode...........................OK -The item run on 1 nodes. success: 1 - -CheckEnvProfile.............................OK -The item run on 2 nodes. success: 2 (consistent) -The success on all nodes value: -GAUSSHOME /usr1/mogdb/app -LD_LIBRARY_PATH /usr1/mogdb/app/lib -PATH /usr1/mogdb/app/bin - - -CheckBlockdev...............................OK -The item run on 2 nodes. success: 2 - -CheckCurConnCount...........................OK -The item run on 1 nodes. success: 1 - -CheckCursorNum..............................OK -The item run on 1 nodes. success: 1 - -CheckPgxcgroup..............................OK -The item run on 1 nodes. success: 1 - -CheckDiskFormat.............................OK -The item run on 2 nodes. success: 2 - -CheckSpaceUsage.............................OK -The item run on 2 nodes. success: 2 - -CheckInodeUsage.............................OK -The item run on 2 nodes. success: 2 - -CheckSwapMemory.............................OK -The item run on 2 nodes. success: 2 - -CheckLogicalBlock...........................OK -The item run on 2 nodes. success: 2 - -CheckIOrequestqueue.....................WARNING -The item run on 2 nodes. warning: 2 -The warning[host240,host157] value: -On device (vdb) 'IO Request' RealValue '256' ExpectedValue '32768' -On device (vda) 'IO Request' RealValue '256' ExpectedValue '32768' - -CheckMaxAsyIOrequests.......................OK -The item run on 2 nodes. success: 2 - -CheckIOConfigure............................OK -The item run on 2 nodes. success: 2 - -CheckMTU....................................OK -The item run on 2 nodes. success: 2 (consistent) -The success on all nodes value: -1500 - -CheckPing...................................OK -The item run on 2 nodes. success: 2 - -CheckRXTX...................................NG -The item run on 2 nodes. ng: 2 -The ng[host240,host157] value: -NetWork[eth0] -RX: 256 -TX: 256 - - -CheckNetWorkDrop............................OK -The item run on 2 nodes. success: 2 - -CheckMultiQueue.............................OK -The item run on 2 nodes. success: 2 - -CheckEncoding...............................OK -The item run on 2 nodes. success: 2 (consistent) -The success on all nodes value: -LANG=en_US.UTF-8 - -CheckFirewall...............................OK -The item run on 2 nodes. success: 2 - -CheckKernelVer..............................OK -The item run on 2 nodes. success: 2 (consistent) -The success on all nodes value: -3.10.0-957.el7.x86_64 - -CheckMaxHandle..............................OK -The item run on 2 nodes. success: 2 - -CheckNTPD...................................OK -host240: NTPD service is running, 2020-06-02 17:00:28 -host157: NTPD service is running, 2020-06-02 17:00:06 - - -CheckOSVer..................................OK -host240: The current OS is centos 7.6 64bit. -host157: The current OS is centos 7.6 64bit. - -CheckSysParams..........................WARNING -The item run on 2 nodes. warning: 2 -The warning[host240,host157] value: -Warning reason: variable 'net.ipv4.tcp_retries1' RealValue '3' ExpectedValue '5'. -Warning reason: variable 'net.ipv4.tcp_syn_retries' RealValue '6' ExpectedValue '5'. - -CheckTHP....................................OK -The item run on 2 nodes. success: 2 - -CheckTimeZone...............................OK -The item run on 2 nodes. success: 2 (consistent) -The success on all nodes value: -+0800 - -CheckCPU....................................OK -The item run on 2 nodes. success: 2 - -CheckSshdService............................OK -The item run on 2 nodes. success: 2 - -Warning reason: UseDNS parameter is not set; expected: no - -CheckCrondService...........................OK -The item run on 2 nodes. success: 2 - -CheckStack..................................OK -The item run on 2 nodes. success: 2 (consistent) -The success on all nodes value: -8192 - -CheckSysPortRange...........................OK -The item run on 2 nodes. success: 2 - -CheckMemInfo................................OK -The item run on 2 nodes. success: 2 (consistent) -The success on all nodes value: -totalMem: 31.260929107666016G - -CheckHyperThread............................OK -The item run on 2 nodes. success: 2 - -CheckTableSpace.............................OK -The item run on 1 nodes. success: 1 - -CheckSysadminUser...........................OK -The item run on 1 nodes. success: 1 - - -CheckGUCConsistent..........................OK -All DN instance guc value is consistent. - -CheckMaxProcMemory..........................OK -The item run on 1 nodes. success: 1 - -CheckBootItems..............................OK -The item run on 2 nodes. success: 2 - -CheckHashIndex..............................OK -The item run on 1 nodes. success: 1 - -CheckPgxcRedistb............................OK -The item run on 1 nodes. success: 1 - -CheckNodeGroupName..........................OK -The item run on 1 nodes. success: 1 - -CheckTDDate.................................OK -The item run on 1 nodes. success: 1 - -CheckDilateSysTab...........................OK -The item run on 1 nodes. success: 1 - -CheckKeyProAdj..............................OK -The item run on 2 nodes. success: 2 - -CheckProStartTime.......................WARNING -host157: -STARTED COMMAND -Tue Jun 2 16:57:18 2020 /usr1/dmuser/dmserver/metricdb1/server/bin/mogdb --single_node -D /usr1/dmuser/dmb1/data -p 22204 -Mon Jun 1 16:15:15 2020 /usr1/mogdb/app/bin/mogdb -D /usr1/mogdb/data/dn1 -M standby - - -CheckFilehandle.............................OK -The item run on 2 nodes. success: 2 - -CheckRouting................................OK -The item run on 2 nodes. success: 2 - -CheckNICModel...............................OK -The item run on 2 nodes. success: 2 (consistent) -The success on all nodes value: -version: 1.0.1 -model: Red Hat, Inc. Virtio network device - - -CheckDropCache..........................WARNING -The item run on 2 nodes. warning: 2 -The warning[host240,host157] value: -No DropCache process is running - -CheckMpprcFile..............................NG -The item run on 2 nodes. ng: 2 -The ng[host240,host157] value: -There is no mpprc file - -Analysis the check result successfully -Failed. All check items run completed. Total:57 Success:50 Warning:5 NG:2 -For more information please refer to /usr1/mogdb/tool/script/gspylib/inspection/output/CheckReport_inspect611.tar.gz -``` - -## Exception Handling - -Troubleshoot exceptions detected in the inspection by following instructions in this section. - -**Table 1** Check of MogDB running status - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Check Item Abnormal Status Solution
CheckClusterState (Checks the MogDB status.) MogDB or MogDB instances are not started. Run the following command to start MogDB and instances:

gs_om -t start
The status of MogDB or MogDB instances is abnormal. Check the status of hosts and instances. Troubleshoot this issue based on the status information.
gs_check -i CheckClusterState
CheckDBParams (Checks database parameters.) Database parameters have incorrect values. Use the gs_guc tool to set the parameters to specified values.
CheckDebugSwitch (Checks debug logs.) The log level is incorrect. Use the gs_guc tool to set log_min_messages to specified content.
CheckDirPermissions (Checks directory permissions.) The permission for a directory is incorrect. Change the directory permission to a specified value (750 or 700).
chmod 750 DIR
CheckReadonlyMode (Checks the read-only mode.) The read-only mode is enabled. Verify that the usage of the disk where database nodes are located does not exceed the threshold (85% by default) and no other O&M operations are performed.
gs_check -i CheckDataDiskUsage ps ux
Use the gs_guc tool to disable the read-only mode of MogDB.
gs_guc reload -N all -I all -c 'default_transaction_read_only = off'
CheckEnvProfile (Checks environment variables.) Environment variables are inconsistent. Update the environment variable information.
CheckBlockdev (Checks pre-read blocks.) The size of a pre-read block is not 16384 KB. Use the gs_checkos tool to set the size of the pre-read block to 16384 KB and write the setting into the auto-startup file.
gs_checkos -i B3
CheckCursorNum (Checks the number of cursors.) The number of cursors fails to be checked. Check whether the database is properly connected and whether the MogDB status is normal.
CheckPgxcgroup (Checks the data redistribution status.) There are pgxc_group tables that have not been redistributed. Proceed with the redistribution.
gs_expand、gs_shrink
CheckDiskFormat (Checks disk configurations.) Disk configurations are inconsistent between nodes. Configure disk specifications to be consistent between nodes.
CheckSpaceUsage (Checks the disk space usage.) Disk space is insufficient. Clear or expand the disk for the directory.
CheckInodeUsage (Checks the disk index usage.) Disk indexes are insufficient. Clear or expand the disk for the directory.
CheckSwapMemory (Checks the swap memory.) The swap memory is greater than the physical memory. Reduce or disable the swap memory.
CheckLogicalBlock (Checks logical blocks.) The size of a logical block is not 512 KB. Use the gs_checkos tool to set the size of the logical block to 512 KB and write the setting into the auto-startup file.
gs_checkos -i B4
CheckIOrequestqueue (Checks I/O requests.) The requested I/O is not 32768. Use the gs_checkos tool to set the requested I/O to 32768 and write the setting into the auto-startup file.
gs_checkos -i B4
CheckCurConnCount (Checks the number of current connections.) The number of current connections exceeds 90% of the allowed maximum number of connections. Break idle primary database node connections.
CheckMaxAsyIOrequests (Checks the maximum number of asynchronous requests.) The maximum number of asynchronous requests is less than 104857600 or (Number of database instances on the current node x 1048576). Use the gs_checkos tool to set the maximum number of asynchronous requests to the larger one between 104857600 and (Number of database instances on the current node x 1048576).
gs_checkos -i B4
CheckMTU (Checks MTU values.) MTU values are inconsistent between nodes. Set the MTU value on each node to 1500 or 8192.
ifconfig eth* MTU 1500
CheckIOConfigure (Checks I/O configurations.) The I/O mode is not deadline. Use the gs_checkos tool to set the I/O mode to deadline and write the setting into the auto-startup file.
gs_checkos -i B4
CheckRXTX (Checks the RX/TX value.) The NIC RX/TX value is not 4096. Use the checkos tool to set the NIC RX/TX value to 4096 for MogDB.
gs_checkos -i B5
CheckPing (Checks whether the network connection is normal.) There are MogDB IP addresses that cannot be pinged. Check the network settings, network status, and firewall status between the abnormal IP addresses.
CheckNetWorkDrop (Checks the network packet loss rate.) The network packet loss rate is greater than 1%. Check the network load and status between the corresponding IP addresses.
CheckMultiQueue (Checks the NIC multi-queue function.) Multiqueue is not enabled for the NIC, and NIC interruptions are not bound to different CPU cores. Enable multiqueue for the NIC, and bind NIC interruptions to different CPU cores.
CheckEncoding (Checks the encoding format.) Encoding formats are inconsistent between nodes. Write the same encoding format into /etc/profile for each node.
echo "export LANG=XXX" >> /etc/profile
CheckActQryCount (Checks the archiving mode.) The archiving mode is enabled, and the archiving directory is not under the primary database node directory. Disable archiving mode or set the archiving directory to be under the primary database node directory.
CheckFirewall (Checks the firewall.) The firewall is enabled. Disable the firewall.
systemctl disable firewalld.service
CheckKernelVer (Checks kernel versions.) Kernel versions are inconsistent between nodes.
CheckMaxHandle (Checks the maximum number of file handles.) The maximum number of handles is less than 1000000. Set the soft and hard limits in the 91-nofile.conf or 90-nofile.conf file to 1000000.
gs_checkos -i B2
CheckNTPD (Checks the time synchronization service.) The NTPD service is disabled or the time difference is greater than 1 minute. Enable the NTPD service and set the time to be consistent.
CheckSysParams (Checks OS parameters.) OS parameter settings do not meet requirements. Use the gs_checkos tool or manually set parameters to values meeting requirements.
gs_checkos -i B1 vim /etc/sysctl.conf
CheckTHP (Checks the THP service.) The THP service is disabled. Use the gs_checkos to enable the THP service.
gs_checkos -i B6
CheckTimeZone (Checks time zones.) Time zones are inconsistent between nodes. Set time zones to be consistent between nodes.
cp /usr/share/zoneinfo/\$primary time zone/$secondary time zone\ /etc/localtime
CheckCPU (Checks the CPU.) CPU usage is high or I/O waiting time is too long. Upgrade CPUs or improve disk performance.
CheckSshdService (Checks the SSHD service.) The SSHD service is disabled. Enable the SSHD service and write the setting into the auto-startup file.
service sshd start echo "server sshd start" >> initFile
CheckSshdConfig (Checks SSHD configurations.) The SSHD service is incorrectly configured. Reconfigure the SSHD service.
PasswordAuthentication=no; MaxStartups=1000; UseDNS=yes; ClientAliveInterval=10800/ClientAliveInterval=0
Restart the service.
server sshd start
CheckCrondService (Checks the Crond service.) The Crond service is disabled. Install and enable the Crond service.
CheckStack (Checks the stack size.) The stack size is less than 3072. Use the gs_checkos tool to set the stack size to 3072 and restart the processes with a smaller stack size.
gs_checkos -i B2
CheckSysPortRange (Checks OS port configurations.) OS IP ports are not within the required port range or MogDB ports are within the OS IP port range. Set the OS IP ports within 26000 to 65535 and set the MogDB ports beyond the OS IP port range.
vim /etc/sysctl.conf
CheckMemInfo (Checks the memory information.) Memory sizes are inconsistent between nodes. Use physical memory of the same specifications between nodes.
CheckHyperThread (Checks the hyper-threading.) The CPU hyper-threading is disabled. Enable the CPU hyper-threading.
CheckTableSpace (Checks tablespaces.) The tablespace path is nested with the MogDB path or nested with the path of another tablespace. Migrate tablespace data to the tablespace with a valid path.
- -## Querying Status - -### Background - -MogDB allows you to view the status of the entire MogDB. The query result shows whether the database or a single host is running properly. - -### Prerequisites - -The database has started. - -### Procedure - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to query the database status: - - ```bash - $ gs_om -t status --detail - ``` - - Table 1 describes parameters in the query result. - - To query the instance status on a host, add **-h** to the command. For example: - - ```bash - $ gs_om -t status -h plat2 - ``` - - **plat2** indicates the name of the host to be queried. - -### Parameter Description - -**Table 1** Node role description - -| Field | Description | Value | -| :------------ | :----------------------------------------------------------- | :----------------------------------------------------------- | -| cluster_state | The database status, which indicates whether the entire database is running properly. | **Normal**: The database is available and the data has redundancy backup. All the processes are running and the primary/standby relationship is normal.**Unavailable**: The database is unavailable.**Degraded**: The database is available and faulty database nodes and primary database nodes exist. | -| node | Host name. | Specifies the name of the host where the instance is located. If multiple AZs exist, the AZ IDs will be displayed. | -| node_ip | Host IP Address. | Specifies the IP address of the host where the instance is located. | -| instance | Instance ID. | Specifies the instance ID. | -| state | Instance role | **Normal**: a single host instance.**Primary**: The instance is a primary instance.**Standby**: The instance is a standby instance.**Cascade Standby**: The instance is a cascaded standby instance.**Secondary**: The instance is a secondary instance.**Pending**: The instance is in the quorum phase.**Unknown**: The instance status is unknown.**Down**: The instance is down.**Abnormal**: The node is abnormal.**Manually stopped**: The node has been manually stopped. | - -Each role has different states, such as startup and connection. The states are described as follows: - -**Table 2** Node state description - -| State | Description | -| :------------- | :----------------------------------------------------------- | -| Normal | The node starts up normally. | -| Need repair | The node needs to be restored. | -| Starting | The node is starting up. | -| Wait promoting | The node is waiting for upgrade. For example, after the standby node sends an upgrade request to the primary node, the standby node is waiting for the response from the primary node. | -| Promoting | The standby node is being upgraded to the primary node. | -| Demoting | The node is being downgraded, for example, the primary node is being downgraded to the standby node. | -| Building | The standby node fails to be started and needs to be rebuilt. | -| Catchup | The standby node is catching up with the primary node. | -| Coredump | The node program breaks down. | -| Unknown | The node status is unknown. | - -If a node is in **Need repair** state, you need to rebuild the node to restore it. Generally, the reasons for rebuilding a node are as follows: - -**Table 3** Node rebuilding causes - -| State | Description | -| :-------------------- | :----------------------------------------------------------- | -| Normal | The node starts up normally. | -| WAL segment removed | WALs of the primary node do not exist, and logs of the standby node are later than those of the primary node. | -| Disconnect | Standby node cannot be connected to the primary node. | -| Version not matched | The binary versions of the primary and standby nodes are inconsistent. | -| Mode not matched | Nodes do not match the primary and standby roles. For example, two standby nodes are connected. | -| System id not matched | The database system IDs of the primary and standby nodes are inconsistent. The system IDs of the primary and standby nodes must be the same. | -| Timeline not matched | The log timelines are inconsistent. | -| Unknown | Unknown cause. | - -## Examples - -View the database status details, including instance status. - -```bash -$ gs_om -t status --detail -[ Cluster State ] - -cluster_state : Normal -redistributing : No -current_az : AZ_ALL - -[ Datanode State ] - - node node_ip port instance state ------------------------------------------------------------------------------------------------------ -1 pekpopgsci00235 10.244.62.204 5432 6001 /opt/mogdb/cluster/data/dn1 P Primary Normal -2 pekpopgsci00238 10.244.61.81 5432 6002 /opt/mogdb/cluster/data/dn1 S Standby Normal -``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/4-checking-database-performance.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/4-checking-database-performance.md deleted file mode 100644 index 2341c20b..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/4-checking-database-performance.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Checking Database Performance -summary: Checking Database Performance -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Checking Database Performance - -## Check Method - -Use the **gs_checkperf** tool provided by MogDB to check hardware performance. - -**Prerequisites** - -- MogDB is running properly. -- Services are running properly on the database. - -**Procedure** - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to check the MogDB database performance: - - ``` - gs_checkperf - ``` - -For details about performance statistical items, see "Table 1 Performance check items" in "Tool Reference > Server Tools > [gs_checkperf](../../reference-guide/tool-reference/server-tools/gs_checkperf.md)". - -**Examples** - -Simple performance statistical result is displayed on the screen as follows: - -``` -gs_checkperf -i pmk -U omm -Cluster statistics information: - Host CPU busy time ratio : 1.43 % - MPPDB CPU time % in busy time : 1.88 % - Shared Buffer Hit ratio : 99.96 % - In-memory sort ratio : 100.00 % - Physical Reads : 4 - Physical Writes : 25 - DB size : 70 MB - Total Physical writes : 25 - Active SQL count : 2 - Session count : 3 -``` - -## Exception Handling - -After you use the **gs_checkperf** tool to check the cluster performance, if the performance is abnormal, troubleshoot the issue by following instructions in this section. - -**Table 1** Cluster-level performance status - -| Abnormal Status | Solution | -| ---------------------------------- | ------------------------------------------------------------ | -| High CPU usage of hosts | 1. Add high-performance CPUs, or replace current CPUs with them.2. Run the **top** command to check which system processes cause high CPU usage, and run the **kill** command to stop unused processes.
`top` | -| High CPU usage of MogDB Kernel | 1. Add high-performance CPUs, or replace current CPUs with them.
2. Run the **top** command to check which database processes cause high CPU usage, and run the **kill** command to stop unused processes.
`top`
3. Use the **gs_expand** tool to add new hosts to lower the CPU usage. | -| Low hit ratio of the shared memory | 1. Expand the memory.
2. Run the following command to check the OS configuration file **/etc/sysctl.conf** and increase the value of **kernel.shmmax**.
`vim /etc/sysctl.conf` | -| Low in-memory sort ratio | Expand the memory. | -| High I/O and disk usage | 1. Replace current disks with high-performance ones.
2. Adjust the data layout to evenly distribute I/O requests to all the physical disks.
3. Run **VACUUM FULL** for the entire database.
`vacuum full;`
4. Clean up the disk space.
5. Reduce the number of concurrent connections. | -| Transaction statistics | Query the **pg_stat_activity** system catalog and disconnect unnecessary connections. (Log in to the database and run the **mogdb=# \d+ pg_stat_activity;** command.) | - -**Table 2** Node-level performance status - -| Abnormal Status | Solution | -| ----------------- | ------------------------------------------------------------ | -| High CPU usage | 1. Add high-performance CPUs, or replace current CPUs with them.
2. Run the **top** command to check which system processes cause high CPU usage, and run the **kill** command to stop unused processes.
`top` | -| High memory usage | Expand or clean up the memory. | -| High I/O usage | 1. Replace current disks with high-performance ones.
2. Clean up the disk space.
3. Use memory read/write to replace as much disk I/O as possible, putting frequently accessed files or data in the memory. | - -**Table 3** Session/process-level performance status - -| Abnormal Status | Solution | -| ------------------------------- | ------------------------------------------------------------ | -| High CPU, memory, and I/O usage | Check which processes cause high CPU, memory, or I/O usage. If they are unnecessary processes, kill them; otherwise, analyze the specific cause of high usage. For example, if SQL statement execution occupies much memory, check whether the SQL statements need optimization. | - -**Table 4** SSD performance status - -| Abnormal Status | Solution | -| -------------------- | ------------------------------------------------------------ | -| SSD read/write fault | Run the following command to check whether SSD is faulty. If yes, analyze the specific cause.
`gs_checkperf -i SSD -U omm` | diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/5-checking-and-deleting-logs.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/5-checking-and-deleting-logs.md deleted file mode 100644 index 92358a20..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/5-checking-and-deleting-logs.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: Checking and Deleting Logs -summary: Checking and Deleting Logs -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Checking and Deleting Logs - -You are advised to check OS logs and database run logs monthly for monitoring system status and troubleshooting, and to delete database run logs monthly for saving disk space. - -## Checking OS Logs - -You are advised to monthly check OS logs to detect and prevent potential OS problems. - -**Procedure** - -Run the following command to check OS log files: - -``` -vim /var/log/messages -``` - -(Pay attention to words like **kernel**, **error**, and **fatal** in logs generated within the last month and handle the problems based on the alarm information.) - -## Checking MogDB Run Logs - -A database can still run when errors occur during the execution of some operations. However, data may be inconsistent before and after the error occurrences. Therefore, you are advised to monthly check MogDB run logs to detect potential problems in time. - -**Prerequisites** - -- The host used for collecting logs is running properly, and the network connection is normal. Database installation users trust each other. -- An OS tool (for example, **gstack**) that the log collection tool requires has been installed. If it is not installed, an error message is displayed, and this collection item is skipped. - -**Procedure** - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to collect database logs: - - ``` - gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" - ``` - - In the command, **20160616 01:01** indicates the start time of the log and **20160616 23:59** indicates the end time of the log. - -3. Based on command output in [2](#2), access the related log collection directory, decompress collected database logs, and check these logs. - - Assume that collected logs are stored in **/opt/mogdb/tmp/gaussdba_mppdb/collector_20160726_105158.tar.gz**. - - ``` - tar -xvzf /opt/mogdb/tmp/gaussdba_mppdb/collector_20160726_105158.tar.gz - cd /opt/mogdb/tmp/gaussdba_mppdb/collector_20160726_105158 - ``` - -**Examples** - -- Run the **gs_collector** command together with parameters **-begin-time** and **-end-time**: - - ```bash - gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" - ``` - - If information similar to the following is displayed, the logs have been archived: - - ``` - Successfully collected files - All results are stored in /tmp/gaussdba_mppdb/collector_20160616_175615.tar.gz. - ``` - -- Run the **gs_collector** command together with parameters **-begin-time**, **-end-time**, and **-h**: - - ```bash - gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" -h plat2 - ``` - - If information similar to the following is displayed, the logs have been archived: - - ``` - Successfully collected files - All results are stored in /tmp/gaussdba_mppdb/collector_20160616_190225.tar.gz. - ``` - -- Run the **gs_collector** command together with parameters **-begin-time**, **-end-time**, and **-f**: - - ```bash - gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" -f /opt/software/mogdb/output - ``` - - If information similar to the following is displayed, the logs have been archived: - - ``` - Successfully collected files - All results are stored in /opt/software/mogdb/output/collector_20160616_190511.tar.gz. - ``` - -- Run the **gs_collector** command together with parameters **-begin-time**, **-end-time**, and **-keyword**: - - ```bash - gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" --keyword="os" - ``` - - If information similar to the following is displayed, the logs have been archived: - - ``` - Successfully collected files. - All results are stored in /tmp/gaussdba_mppdb/collector_20160616_190836.tar.gz. - ``` - -- Run the **gs_collector** command together with parameters **-begin-time**, **-end-time**, and **-o**: - - ```bash - gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" -o /opt/software/mogdb/output - ``` - - If information similar to the following is displayed, the logs have been archived: - - ``` - Successfully collected files. - All results are stored in /opt/software/mogdb/output/collector_20160726_113711.tar.gz. - ``` - -- Run the **gs_collector** command together with parameters **-begin-time**, **-end-time**, and **-l** (the file name extension must be .log): - - ```bash - gs_collector --begin-time="20160616 01:01" --end-time="20160616 23:59" -l /opt/software/mogdb/logfile.log - ``` - - If information similar to the following is displayed, the logs have been archived: - - ``` - Successfully collected files. - All results are stored in /opt/software/mogdb/output/collector_20160726_113711.tar.gz. - ``` - -## Cleaning Run Logs - -A large number of run logs will be generated during database running and occupy huge disk space. You are advised to delete expired run logs and retain logs generated within one month. - -**Procedure** - -1. Log in as the OS user **omm** to any host in the MogDB Kernel cluster. - -2. Clean logs. - - a. Back up logs generated over one month ago to other disks. - - b. Access the directory where logs are stored. - - ``` - cd $GAUSSLOG - ``` - - c. Access the corresponding sub-directory and run the following command to delete logs generated one month ago: - - ``` - rm log name - ``` - - The naming convention of a log file is **mogdb-**year*-*month*-*day_**HHMMSS**. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/6-checking-time-consistency.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/6-checking-time-consistency.md deleted file mode 100644 index 52090c92..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/6-checking-time-consistency.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Checking Time Consistency -summary: Checking Time Consistency -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Checking Time Consistency - -Database transaction consistency is guaranteed by a logical clock and is not affected by OS time. However, OS time inconsistency will lead to problems, such as abnormal backend O&M and monitoring functions. Therefore, you are advised to monthly check time consistency among nodes. - -**Procedure** - -1. Log in as the OS user **omm** to any host in the MogDB Kernel cluster. - -2. Create a configuration file for recording each cluster node. (You can specify the *mpphosts* file directory randomly. It is recommended that the file be stored in the **/tmp** directory.) - - ```bash - vim /tmp/mpphosts - ``` - - Add the host name of each node. - - ``` - plat1 - plat2 - plat3 - ``` - -3. Save the configuration file. - - ``` - :wq! - ``` - -4. Run the following command and write the time on each node into the **/tmp/sys_ctl-os1.log** file: - - ``` - for ihost in `cat /tmp/mpphosts`; do ssh -n -q $ihost "hostname;date"; done > /tmp/sys_ctl-os1.log - ``` - -5. Check time consistency between the nodes based on the command output. The time difference should not exceed 30s. - - ``` - cat /tmp/sys_ctl-os1.log - plat1 - Thu Feb 9 16:46:38 CST 2017 - plat2 - Thu Feb 9 16:46:49 CST 2017 - plat3 - Thu Feb 9 16:46:14 CST 2017 - ``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/7-checking-the-number-of-application-connections.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/7-checking-the-number-of-application-connections.md deleted file mode 100644 index 798fdc46..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/7-checking-the-number-of-application-connections.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: Checking the Number of Application Connections -summary: Checking the Number of Application Connections -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Checking the Number of Application Connections - -If the number of connections between applications and the database exceeds the maximum value, new connections cannot be established. You are advised to daily check the number of connections, release idle connections in time, or increase the allowed maximum number of connections. - -**Procedure** - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to connect to the database: - - ``` - gsql -d mogdb -p 8000 - ``` - - **mogdb** is the name of the database to be connected, and **8000** is the port number of the database primary node. - - If information similar to the following is displayed, the connection succeeds: - - ``` - gsql ((MogDB x.x.x build 56189e20) compiled at 2022-01-07 18:47:53 commit 0 last mr ) - Non-SSL connection (SSL connection is recommended when requiring high-security) - Type "help" for help. - - mogdb=# - ``` - -3. Run the following SQL statement to check the number of connections: - - ``` - mogdb=# SELECT count(*) FROM (SELECT pg_stat_get_backend_idset() AS backendid) AS s; - ``` - - Information similar to the following is displayed. **2** indicates that two applications are connected to the database. - - ``` - count - ------- - 2 - (1 row) - ``` - -4. View the allowed maximum connections. - - ``` - mogdb=# SHOW max_connections; - ``` - - Information similar to the following is displayed. **200** indicates the currently allowed maximum number of connections. - - ``` - max_connections - ----------------- - 200 - (1 row) - ``` - -## Exception Handling - -If the number of connections in the command output is close to the value of **max_connections** of the database, delete existing connections or change the upper limit based on site requirements. - -1. Run the following SQL statement to view information about connections whose **state** is set to **idle**, and **state_change** column is not updated for a long time. - - ``` - mogdb=# SELECT * FROM pg_stat_activity where state='idle' order by state_change; - ``` - - Information similar to the following is displayed: - - ``` - datid | datname | pid | usesysid | usename | application_name | client_addr - | client_hostname | client_port | backend_start | xact_start | quer - y_start | state_change | waiting | enqueue | state | resource_pool - | query - -------+----------+-----------------+----------+----------+------------------+--------------- - -+-----------------+-------------+-------------------------------+------------+-------------- - -----------------+-------------------------------+---------+---------+-------+--------------- - +---------------------------------------------- - 13626 | mogdb | 140390162233104 | 10 | gaussdba | | - | | -1 | 2016-07-15 14:08:59.474118+08 | | 2016-07-15 14 - :09:04.496769+08 | 2016-07-15 14:09:04.496975+08 | f | | idle | default_pool - | select count(group_name) from pgxc_group; - 13626 | mogdb | 140390132872976 | 10 | gaussdba | cn_5002 | 10.180.123.163 - | | 48614 | 2016-07-15 14:11:16.014871+08 | | 2016-07-15 14 - :21:17.346045+08 | 2016-07-15 14:21:17.346095+08 | f | | idle | default_pool - | SET SESSION AUTHORIZATION DEFAULT;RESET ALL; - (2 rows) - ``` - -2. Release idle connections. - - Check each connection and release them after obtaining approval from the users of the connections. Run the following SQL command to release a connection using **pid** obtained in the previous step: - - ``` - mogdb=# SELECT pg_terminate_backend(140390132872976); - ``` - - Information similar to the following is displayed: - - ``` - mogdb=# SELECT pg_terminate_backend(140390132872976); - pg_terminate_backend - ---------------------- - t - (1 row) - ``` - - If no connections can be released, go to the next step. - -3. Increase the maximum number of connections. - - ``` - gs_guc set -D /mogdb/data/dbnode -c "max_connections= 800" - ``` - - **800** is the new maximum value. - -4. Restart database services to make the new settings take effect. - - > **NOTE:** The restart results in operation interruption. Properly plan the restart to avoid affecting users. - - ``` - gs_om -t stop && gs_om -t start - ``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/8-routinely-maintaining-tables.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/8-routinely-maintaining-tables.md deleted file mode 100644 index 53342024..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/8-routinely-maintaining-tables.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Routinely Maintaining Tables -summary: Routinely Maintaining Tables -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Routinely Maintaining Tables - -To ensure proper database running, after insert and delete operations, you need to routinely run **VACUUM FULL** and **ANALYZE** as appropriate for customer scenarios and update statistics to obtain better performance. - -**Related Concepts** - -You need to routinely run **VACUUM**, **VACUUM FULL**, and **ANALYZE** to maintain tables, because: - -- **VACUUM FULL** can be used to reclaim disk space occupied by updated or deleted data and combine small-size data files. -- **VACUUM** can be used to maintain a visualized mapping for each table to track pages that contain arrays visible to other active transactions. A common index scan uses the mapping to obtain the corresponding arrays and check whether the arrays are visible to the current transaction. If the arrays cannot be obtained, capture a batch of arrays to check the visibility. Therefore, updating the visualized mapping of a table can accelerate unique index scans. -- Running **VACUUM** can avoid original data loss caused by duplicate transaction IDs when the number of executed transactions exceeds the database threshold. -- **ANALYZE** can be used to collect statistics on tables in databases. The statistics are stored in the system catalog **PG_STATISTIC**. Then the query optimizer uses the statistics to work out the most efficient execution plan. - -**Procedure** - -1. Run the **VACUUM** or **VACUUM FULL** command to reclaim disk space. - - - **VACUUM**: - - Run **VACUUM** for a table. - - ``` - mogdb=# VACUUM customer; - ``` - - ``` - VACUUM - ``` - - This statement can be concurrently executed with database operation commands, including **SELECT**, **INSERT**, **UPDATE**, and **DELETE**; excluding **ALTER TABLE**. - - Run **VACUUM** for the table partition. - - ``` - mogdb=# VACUUM customer_par PARTITION ( P1 ); - ``` - - ``` - VACUUM - ``` - - - **VACUUM FULL**: - - ``` - mogdb=# VACUUM FULL customer; - ``` - - ``` - VACUUM - ``` - - During the command running, exclusive locks need to be added to the table and all other database operations need to be suspended. - -2. Run **ANALYZE** to update statistics. - - ``` - mogdb=# ANALYZE customer; - ``` - - ``` - ANALYZE - ``` - - Run **ANALYZE VERBOSE** to update statistics and display table information. - - ``` - mogdb=# ANALYZE VERBOSE customer; - ``` - - ``` - ANALYZE - ``` - - You can run **VACUUM ANALYZE** at the same time to optimize the query. - - ``` - mogdb=# VACUUM ANALYZE customer; - ``` - - ``` - VACUUM - ``` - - > **NOTE:** **VACUUM** and **ANALYZE** cause a substantial increase in I/O traffic, which may affect other active sessions. Therefore, you are advised to set the cost-based vacuum delay feature by specifying the **vacuum_cost_delay** parameter. For details, see "GUC Parameters > Resource Consumption > Cost-based Vacuum Delay" in the *Developer Guide*. - -3. Delete a table. - - ``` - mogdb=# DROP TABLE customer; - mogdb=# DROP TABLE customer_par; - mogdb=# DROP TABLE part; - ``` - - If the following information is displayed, the tables have been deleted: - - ``` - DROP TABLE - ``` - -**Maintenance Suggestions** - -- Routinely run **VACUUM FULL** for large tables. If the database performance deteriorates, run **VACUUM FULL** for the entire database. If the database performance is stable, you are advised to run **VACUUM FULL** monthly. -- Routinely run **VACUUM FULL** on system catalogs, especially **PG_ATTRIBUTE**. -- Enable automatic vacuum processes (**AUTOVACUUM**) in the system. The processes automatically run the **VACUUM** and **ANALYZE** statements to reclaim the record space marked as the deleted state and update statistics in the table. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/9-routinely-recreating-an-index.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/9-routinely-recreating-an-index.md deleted file mode 100644 index 5ddb4d73..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/9-routinely-recreating-an-index.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Routinely Recreating an Index -summary: Routinely Recreating an Index -author: Zhang Cuiping -date: 2021-03-04 ---- - -# Routinely Recreating an Index - -## **Background** - -When data deletion is repeatedly performed in the database, index keys will be deleted from the index pages, resulting in index bloat. Recreating an index routinely improves query efficiency. - -The database supports B-tree indexes. Recreating a B-tree index routinely helps improve query efficiency. - -- If a large amount of data is deleted, index keys on the index pages will be deleted. But index pages will not be directly deleted, that is the number of index pages will not decrease, then index bloat occurs. Recreating an index helps reclaim wasted space. -- In a newly created index, pages with adjacent logical structures tend to have adjacent physical structures. Therefore, a new index achieves a higher access speed than an index that has been updated for multiple times. - -**Methods** - -Use either of the following two methods to recreate an index: - -- Run the **DROP INDEX** statement to delete the index and then run the **CREATE INDEX** statement to create an index. - - When you delete an index, a temporary exclusive lock is added in the parent table to block related read/write operations. During index creation, the write operation is locked, whereas the read operation is not locked and can use only sequential scans. - -- Run **REINDEX** to recreate an index. - - - When you run the **REINDEX TABLE** statement to recreate an index, an exclusive lock is added to block related read/write operations. - - When you run the **REINDEX INTERNAL TABLE** statement to recreate an index for a **desc** table (such as column-store **cudesc** table), an exclusive lock is added to block related read/write operations on the table. - -**Procedure** - -Assume the ordinary index **areaS_idx** exists in the **area_id** column of the imported table **areaS**. Use either of the following two methods to recreate an index: - -- Run the **DROP INDEX** statement to delete the index and run the **CREATE INDEX** statement to create an index. - - 1. Delete the index. - - ``` - mogdb=# DROP INDEX areaS_idx; - ``` - - If the following information is displayed, the deletion is successful: - - ``` - DROP INDEX - ``` - - 2. Create an index - - ``` - mogdb=# CREATE INDEX areaS_idx ON areaS (area_id); - ``` - - If the following information is displayed, the creation is successful: - - ``` - CREATE INDEX - ``` - -- Run **REINDEX** to recreate an index. - - - Run **REINDEX TABLE** to recreate an index. - - ``` - mogdb=# REINDEX TABLE areaS; - ``` - - If the following information is displayed, the recreating is successful: - - ``` - REINDEX - ``` - - - Run **REINDEX INTERNAL TABLE** to recreate an index for a **desc** table (such as column-store **cudesc** table). - - ``` - mogdb=# REINDEX INTERNAL TABLE areaS; - ``` - - If the following information is displayed, the recreating is successful: - - ``` - REINDEX - ``` - -> **NOTE:** Before you recreate an index, you can increase the values of **maintenance_work_mem** and **psort_work_mem** to accelerate the index recreation. diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/exporting-and-viewing-the-wdr.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/exporting-and-viewing-the-wdr.md deleted file mode 100644 index 5c383e5b..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/exporting-and-viewing-the-wdr.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Exporting and Viewing the WDR -summary: Exporting and Viewing the WDR -author: GUO HUAN -date: 2022-10-13 ---- - -# Exporting and Viewing the WDR - -Accessing WDR snapshot data requires the **sysadmin** or **monadmin** permission. Therefore, to generate a WDR, you must use the **root** account or other accounts with the aforementioned permissions. - -1. Run the following command to create a report file: - - ``` - touch /home/om/wdrTestNode.html - ``` - -2. Connect to the Postgres system database. - - ``` - gsql -d postgres -p [*Port number*] -r - ``` - -3. Select two different snapshots in the **snapshot.snapshot** table. If no service restart occurs between the two snapshots, use the two snapshots to generate a report. - - ``` - gsql> select * from snapshot.snapshot order by start_ts desc limit 10; - ``` - -4. Run the following commands to generate a WDR in HTML format on the local PC: - - 1. Run the following commands to set the report format. **\a** indicates that table row and column symbols are not displayed. **\t** indicates that column names are not displayed. **\o** specifies an output file. - - ``` - gsql> \a - gsql> \t - gsql> \o {*Report path*} - ``` - - 2. Run the following command to generate a WDR in HTML format: - - ``` - gsql> select generate_wdr_report(begin_snap_id Oid, end_snap_id Oid, int report_type, int report_scope, int node_name ); - ``` - - Example 1: Generate a cluster-level report. - - ``` - select generate_wdr_report(1, 2, 'all', 'cluster',null); - ``` - - Example 2: Generate a report for a node. - - ``` - select generate_wdr_report(1, 2, 'all', 'node', pgxc_node_str()::cstring); - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Currently, the name of the MogDB node is fixed to **dn_6001_6002_6003**. You can also replace it with the actual node name. - - **Table 1** Parameter description - - | **Parameter** | **Description** | **Value Range** | - | ------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | - | begin_snap_id | ID of a snapshot that starts at a performance period (**snapshot_id** in the **snapshot.snaoshot** table) | - | - | end_snap_id | ID of the end snapshot. By default, the value of **end_snap_id** is greater than the value of **begin_snap_id** (**snapshot_id** in the **snapshot.snaoshot** table). | - | - | report_type | Specifies the type of the generated report. | - summary
- detail
- all: Both **summary** and **detail** types are included. | - | report_scope | Specifies the scope for the report to be generated. | - cluster: cluster
- node: a node in the cluster | - | node_name | - When report_scope is set to single node, set this parameter to the name of the corresponding node.
- When report_scope is set to cluster, this parameter can be omitted or set to NULL. | - | - -5. Run the following command to disable the output options and format the output: - - ``` - \o \a \t - ``` - -6. View the WDR in **/home/om/** as required. - - **Table 2** Main content of the WDR - - | **Item** | **Description** | - | :------------------------------------------------------ | :----------------------------------------------------------- | - | Database Stat (cluster scope) | Database performance statistics: transactions, read and write operations, row activities, write conflicts, and deadlocks | - | Load Profile (cluster scope) | Cluster performance statistics: CPU time, DB time, logical or physical read, I/O performance, login and logout, load strength, and load performance | - | Instance Efficiency Percentages (cluster or node scope) | Cluster-level or node-level cache hit ratio | - | I/O Profile (cluster or node scope) | I/O usage in the cluster or node dimension | - | Top 10 Events by Total Wait Time (node scope) | Most time-consuming event | - | Wait Classes by Total Wait Time (node scope) | Category of the wait time that is most time-consuming | - | Host CPU (node scope) | CPU usage of the host | - | Memory Statistics (node scope) | Kernel memory usage distribution | - | Time Model (node scope) | Time distribution information about the statements on a node | - | Wait Events (node scope) | Statistics on wait events at the node level | - | Cache I/O Stats (cluster or node scope) | I/O statistics on user tables and indexes | - | Utility Status (node scope) | Status information about the replication slot and background checkpoint | - | Object Stats (cluster or node scope) | Performance statistics in the index and table dimensions | - | Configuration settings (node scope) | Node configuration | - | SQL Statistics (cluster or node scope) | SQL statement performance statistics: end-to-end time, row activities, cache hit, CPU consumption, and time consumption | - | SQL Detail (cluster or node scope) | SQL statement text details | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/routine-maintenance.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/routine-maintenance.md deleted file mode 100644 index b7236244..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/routine-maintenance.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Routine Maintenance -summary: Routine Maintenance -author: Guo Huan -date: 2023-05-22 ---- - -# Routine Maintenance - -+ **[Starting and Stopping MogDB](0-starting-and-stopping-mogdb.md)** -+ **[Using the gsql Client for Connection](using-the-gsql-client-for-connection.md)** -+ **[Routine Maintenance Check Items](1-routine-maintenance-check-items.md)** -+ **[Checking OS Parameters](2-checking-os-parameters.md)** -+ **[Checking MogDB Health Status](3-checking-mogdb-health-status.md)** -+ **[Checking Database Performance](4-checking-database-performance.md)** -+ **[Checking and Deleting Logs](5-checking-and-deleting-logs.md)** -+ **[Checking Time Consistency](6-checking-time-consistency.md)** -+ **[Checking the Number of Application Connections](7-checking-the-number-of-application-connections.md)** -+ **[Routinely Maintaining Tables](8-routinely-maintaining-tables.md)** -+ **[Routinely Recreating an Index](9-routinely-recreating-an-index.md)** -+ **[Exporting and Viewing the WDR](exporting-and-viewing-the-wdr.md)** -+ **[Data Security Maintenance Suggestions](10-data-security-maintenance-suggestions.md)** -+ **[Slow SQL Diagnosis](slow-sql-diagnosis.md)** -+ **[Log Reference](11-log-reference.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/slow-sql-diagnosis.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/slow-sql-diagnosis.md deleted file mode 100644 index 224b6635..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/slow-sql-diagnosis.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Slow SQL Diagnosis -summary: Slow SQL Diagnosis -author: Guo Huan -date: 2022-04-13 ---- - -# Slow SQL Diagnosis - -## Background - -If the SQL statement execution performance does not meet expectations, you can view the SQL statement execution information to analyze the behavior and diagnose problems that occur during the execution. - -## Prerequisites - -- The database instance is running properly. - -- The GUC parameter **track_stmt_stat_level** is properly set for querying the SQL statement information. - -- Only the system administrator and monitor administrator can perform this operation. - - ```sql - Run the following command to check the execution information about the SQL statements in the database instance: - gsql> select * from dbe_perf.get_global_full_sql_by_timestamp(start_timestamp, end_timestamp); - Run the following command to check the execution information about the slow SQL statements in the database instance: - gsql> select * from dbe_perf.get_global_slow_sql_by_timestamp(start_timestamp, end_timestamp); - Check the execution information about the SQL statement on the current primary node. - gsql> select * from statement_history; - Check the execution information about the SQL statement on the current standby node. - gsql> select * from dbe_perf.standby_statement_history(is_only_slow, start_timestamp, end_timestamp); - ``` diff --git a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/using-the-gsql-client-for-connection.md b/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/using-the-gsql-client-for-connection.md deleted file mode 100644 index 48a07df1..00000000 --- a/product/en/docs-mogdb/v5.2/administrator-guide/routine-maintenance/using-the-gsql-client-for-connection.md +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: Using the gsql Client for Connection -summary: Using the gsql Client for Connection -author: Zhang Cuiping -date: 2021-04-14 ---- - -# Using the gsql Client for Connection - -## Confirming Connection Information - -You can use a client tool to connect a database through the primary node of the database. Before the connection, obtain the IP address of the primary node of the database and the port number of the server where the primary node of the database is deployed. - -1. Log in to the primary node of the database as the OS user **omm**. - -2. Run the **gs_om -t status --detail** command to query instances in the MogDB cluster. - - ```bash - gs_om -t status --detail - - [ Datanode State ] - - node node_ip instance state - --------------------------------------------------------------------------------- - 1 mogdb-kernel-0005 172.16.0.176 6001 /mogdb/data/db1 P Primary Normal - ``` - - For example, the server IP address where the primary node of the database is deployed is 172.16.0.176. The data path of the primary node of the database is **/mogdb/data/db1**. - -3. Confirm the port number of the primary node of the database. - - View the port number in the **postgresql.conf** file in the data path of the primary database node obtained in step 2. The command is as follows: - - ```bash - cat /mogdb/data/db1/postgresql.conf | grep port - - port = 26000 # (change requires restart) - #comm_sctp_port = 1024 # Assigned by installation (change requires restart) - #comm_control_port = 10001 # Assigned by installation (change requires restart) - # supported by the operating system: - # e.g. 'localhost=10.145.130.2 localport=12211 remotehost=10.145.130.3 remoteport=12212, localhost=10.145.133.2 localport=12213 remotehost=10.145.133.3 remoteport=12214' - # e.g. 'localhost=10.145.130.2 localport=12311 remotehost=10.145.130.4 remoteport=12312, localhost=10.145.133.2 localport=12313 remotehost=10.145.133.4 remoteport=12314' - # %r = remote host and port - alarm_report_interval = 10 - support_extended_features=true - ``` - - **26000** in the first line is the port number of the primary database node. - -
- -### Installing the gsql Client - -On the host, upload the client tool package and configure environment variables for the **gsql** client. - -1. Log in to the host where the client resides as any user. - -2. Run the following command to create the **/opt/mogdb/tools** directory: - - ```bash - mkdir /opt/mogdb/tools - ``` - -3. Obtain the file **MogDB-x.x.x-openEuler-64bit-tools.tar.gz** from the software installation package and upload it to the **/opt/mogdb/tools** directory. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - The software package is located where you put it before installation. Set it based on site requirements. - > - The tool package name may vary in different OSs. Select the tool package suitable for your OS. - -4. Run the following commands to decompress the package: - - ```bash - cd /opt/mogdb/tools - tar -zxvf MogDB-x.x.x-openEuler-64bit-tools.tar.gz - ``` - -5. Set environment variables. - - Run the following command to open the **~/.bashrc** file: - - ```bash - vi ~/.bashrc - ``` - - Enter the following content and run **:wq!** to save and exit. - - ```bash - export PATH=/opt/mogdb/tools/bin:$PATH - export LD_LIBRARY_PATH=/opt/mogdb/tools/lib:$LD_LIBRARY_PATH - ``` - -6. Run the following command to make the environment variables take effect: - - ```bash - source ~/.bashrc - ``` - -
- -## Connecting to a Database Using gsql - -
- -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - By default, if a client is idle state after connecting to a database, the client automatically disconnects from the database in the duration specified by **session_timeout**. To disable the timeout setting, set **session_timeout** to **0**. - -
- -### Connecting to a Database Locally - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Connect to a database. - - After the database is installed, a database named **postgres** is generated by default. When connecting to a database for the first time, you can connect to this database. - - Run the following command to connect to the **postgres** database: - - ```bash - gsql -d postgres -p 26000 - ``` - - **postgres** is the name of the database to be connected, and **26000** is the port number of the database primary node. Replace the values as required. - - If information similar to the following is displayed, the connection succeeds: - - ```sql - gsql ((MogDB x.x.x build 56189e20) compiled at 2022-01-07 18:47:53 commit 0 last mr ) - Non-SSL connection (SSL connection is recommended when requiring high-security) - Type "help" for help. - - postgres=# - ``` - - User **omm** is the administrator, and **postgres=#** is displayed. If you log in to and connect to the database as a common user, **postgres=>** is displayed. - - **Non-SSL connection** indicates that the database is not connected in SSL mode. If high security is required, connect to the database in SSL mode. - -3. Exit the database. - - ```sql - postgres=# \q - ``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - When connecting to the database locally as user **omm**, no password is required. This is due to the default setting in the **pg_hba.conf** file that allows the local machine to connect in the **trust** way. -> - For details about the client authentication methods, see the [Client Access Authentication](../../security-guide/security/1-client-access-authentication.md) chapter. - -
- -## Connecting to a Database Remotely - -
- -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - Due to security restrictions, you can not remotely connect to the database as user **omm**. - -
- -### Configuring a Whitelist Using gs_guc (Update pg_hba.conf) - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Configure the client authentication mode and enable the client to connect to the host as user **jack**. User **omm** cannot be used for remote connection. - - Assume you are to allow the client whose IP address is **172.16.0.245** to access the current host. - - ```sql - gs_guc set -N all -I all -h "host all jack 172.16.0.245/24 sha256" - ``` - - **NOTICE:** - - - Before using user **jack**, connect to the database locally and run the following command in the database to create user **jack**: - - ```sql - postgres=# CREATE USER jack PASSWORD 'Test@123'; - ``` - - - **-N all** indicates all hosts in MogDB. - - - **-I all** indicates all instances on the host. - - - **-h** specifies statements that need to be added in the **pg_hba.conf** file. - - - **all** indicates that a client can connect to any database. - - - **jack** indicates the user that accesses the database. - - - **172.16.0.245/24** indicates that only the client whose IP address is **172.16.0.245** can connect to the host. The specified IP address must be different from those used in MogDB. **24** indicates that there are 24 bits whose value is 1 in the subnet mask. That is, the subnet mask is 255.255.255.0. - - - **sha256** indicates that the password of user **jack** is encrypted using the SHA-256 algorithm. - - This command adds a rule to the **pg_hba.conf** file corresponds to the primary node of the database. The rule is used to authenticate clients that access primary node. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > + For details about the client authentication methods, see the [Client Access Authentication](../../security-guide/security/1-client-access-authentication.md) chapter. - -3. Connect to a database. - - After the database is installed, a database named **postgres** is generated by default. When connecting to a database for the first time, you can connect to this database. - - ```bash - gsql -d postgres -h 172.16.0.176 -U jack -p 26000 -W Test@123 - ``` - - **postgres** is the name of the database, **172.16.0.176** is the IP address of the server where the primary node of the database resides, **jack** is the user of the database, **26000** is the port number of the CN, and **Test@123** is the password of user **jack**. diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/adaptive-plan-selection.md b/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/adaptive-plan-selection.md deleted file mode 100644 index c202950f..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/adaptive-plan-selection.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Adaptive Plan Selection -summary: Adaptive Plan Selection -author: zhang cuiping -date: 2022-10-13 ---- - -# Adaptive Plan Selection - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -This feature triggers plan selection based on the base table condition selection rate, and provides cache multi-plan management and adaptive selection for queries that use partial indexes and offsets. In typical scenarios, the query throughput can be improved by several times. - -## Benefits - -Users can maintain multiple cache plans to adapt to different query parameters, improving query execution performance. - -## Description - -Adaptive plan selection applies to scenarios where a general cache plan is used for plan execution. Cache plan exploration is performed by using range linear expansion, and plan selection is performed by using range coverage matching. Adaptive plan selection makes up for the performance problem caused by the traditional single cache plan that cannot change according to the query condition parameter, and avoids frequent calling of query optimization. - -## Enhancements - -None - -## Constraints - -- Database services are running properly. -- Users have logged in to the database. -- Users have created a database and data table, and have imported data. - -## Dependencies - -It depends on the plan cache function in the database. - -## Related Pages - -[Adaptive Plan Selection](../../../AI-features/ai4db/abo-optimizer/adaptive-plan-selection/ai4db-adaptive-plan-selection.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/characteristic-description-abo-optimizer.md b/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/characteristic-description-abo-optimizer.md deleted file mode 100644 index b566d486..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/characteristic-description-abo-optimizer.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: ABO Optimizer -summary: ABO Optimizer -author: Guo Huan -date: 2023-05-22 ---- - -# ABO Optimizer - -+ **[Intelligent Cardinality Estimation](intelligent-cardinality-estimation.md)** -+ **[Adaptive Plan Selection](adaptive-plan-selection.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/intelligent-cardinality-estimation.md b/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/intelligent-cardinality-estimation.md deleted file mode 100644 index ced4aebc..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/abo-optimizer/intelligent-cardinality-estimation.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Intelligent Cardinality Estimation -summary: Intelligent Cardinality Estimation -author: zhang cuiping -date: 2022-10-13 ---- - -# Intelligent Cardinality Estimation - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -Intelligent cardinality estimation uses lightweight algorithms in databases to model multi-column data distribution and provides the capability of multi-column equality cardinality estimation. In scenarios where data skew occurs and columns are closely related, more accurate estimation results can be obtained to provide accurate cost reference for the optimizer, improving plan generation accuracy and database query execution efficiency. - -## Benefits - -Users can create intelligent statistics to improve the accuracy of multi-column statistics and improve the query optimization performance. - -## Description - -The intelligent estimation cardinality first uses data samples in the database to model data distribution, and compresses and stores the model in the database. The optimizer triggers intelligent estimation in the execution plan generation phase to estimate the cost more accurately and generate a better plan. - -## Enhancements - -None - -## Constraints - -- The database is running properly and resources are sufficient. -- Only the following data types are supported: FLOAT8, Double Precision, FlOAT4, REAL, INT16, BIGINT, INTEGER, VARCHAR, CHARACTER VARYING, CHAR, CHARACTER, and NUMERIC. -- Only query cardinality estimation with no more than 64 columns is supported. -- To ensure system performance, model creation uses only a maximum of 200,000 data samples. If the data is too sparse, the estimation result may be inaccurate. -- To make full use of the limited memory for model access acceleration, you are advised to create a maximum of 30 AI statistics columns. Otherwise, memory replacement may be triggered. -- If data of the variable-length string type is too long, the creation and estimation performance of cardinality estimation model may be affected. -- In the current version, if both MCV and Bayesian networks are created, the cardinality estimation performance is low. Therefore, you are not advised to create MCV and Bayesian networks. - -## Dependencies - -It depends on the multi-column statistics creation syntax and data sampling algorithms in databases. - -## Related Pages - -[Intelligent Cardinality Estimation](../../../AI-features/ai4db/abo-optimizer/intelligent-cardinality-estimation/ai4db-intelligent-cardinality-estimation.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai-capabilities.md b/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai-capabilities.md deleted file mode 100644 index e4e593ed..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai-capabilities.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AI Capabilities -summary: AI Capabilities -author: Guo Huan -date: 2023-05-22 ---- - -# AI Capabilities - -- **[AI4DB: Autonomous Database O&M](ai4db-autonomous-database-o-m/characteristic-description-ai4db.md)** -- **[DB4AI: Database-driven AI](db4ai-database-driven-ai.md)** -- **[ABO Optimizer](abo-optimizer/characteristic-description-abo-optimizer.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/1-database-metric-collection-forecast-and-exception-detection.md b/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/1-database-metric-collection-forecast-and-exception-detection.md deleted file mode 100644 index cdc20900..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/1-database-metric-collection-forecast-and-exception-detection.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Database Metric Collection, Forecast, and Exception Detection -summary: Database Metric Collection, Forecast, and Exception Detection -author: Guo Huan -date: 2022-05-10 ---- - -# Database Metric Collection, Forecast, and Exception Detection - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -This feature serves as an AI tool integrated into MogDB and can be used to collect and forecast database metrics, as well as monitor and diagnose exceptions. It is a component in the DBMind suite. Currently, this feature is compatible with the Prometheus platform to collect database system metrics. Prometheus exporters are provided to collect and process database monitoring metrics. By monitoring the time series data of metrics, you can forecast the future load trend and diagnose problems. In addition, you can perform exception detection. - -## Benefits - -- This feature greatly simplifies the work of O&M personnel, releases a large number of labor resources, and reduces costs for the company. -- You can use the metric collection, monitoring, and forecast functions to detect problems in advance, preventing database exceptions from causing greater loss. - -## Description - -Prometheus is a popular open-source monitoring system in the industry. It is also a time series database. The collector of Prometheus is called exporter, which is used to collect metrics of monitored modules. To interconnect with the Prometheus platform, DBMind provides two types of exporters: openGauss-exporter for collecting database metrics and reprocessing-exporter for reprocessing the collected metrics. - -This feature supports forecast of collected metrics. You can specify key performance indicators (KPIs) to be forecasted by modifying configuration files. This helps you find metric trends and perform O&M operations in a timely manner. For example, you can forecast the memory usage to detect memory leakage and forecast the disk usage to expand the capacity at a proper time. The AI-based exception detection algorithm can detect the trend fluctuation of metrics, helping users detect problems in time. - -## Enhancements - -This feature is greatly improved in MogDB 3.0.0 and is compatible with the Prometheus platform. Two exporters are used to connect to Prometheus. - -## Constraints - -- The database is normal, and the data directory has been written into environment variables. -- The Python version must be 3.6 or later. -- The Prometheus monitoring platform is configured and the Prometheus service is started so that monitoring data can be collected. - -## Dependencies - -Prometheus - -## Related Pages - -[Prometheus Exporter](../../../AI-features/ai4db/components-that-support-dbmind/prometheus-exporter/prometheus-exporter.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/2-root-cause-analysis-for-slow-sql-statements.md b/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/2-root-cause-analysis-for-slow-sql-statements.md deleted file mode 100644 index fa9b5fc7..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/2-root-cause-analysis-for-slow-sql-statements.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Root Cause Analysis for Slow SQL Statements -summary: Root Cause Analysis for Slow SQL Statements -author: Guo Huan -date: 2022-05-10 ---- - -# Root Cause Analysis for Slow SQL Statements - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Slow SQL statements have always been a pain point in data O&M. How to effectively diagnose the root causes of slow SQL statements is a big challenge. Based on the characteristics of MogDB and the slow SQL statement diagnosis experience of DBAs on the live network, this tool supports more than 15 root causes of slow SQL statements, outputs multiple root causes based on the possibility, and provides specific solutions. - -## Benefits - -This feature provides customers with fast and reliable slow SQL statement discovery and root cause analysis functions, greatly simplifying the work of O&M personnel. - -## Description - -Based on the Prometheus data collection solution, data required for root cause analysis for slow SQL statements is collected, including system resource information (CPU usage, memory usage, and I/O), load information (QPS), large process information (including external large processes and scheduled database tasks), slow SQL statement text information, start time and end time of slow SQL statement execution, slow SQL statement execution plan, temporary file information, and so on. Then, this feature calculates the most matched root cause of slow SQL statements based on the AI algorithm, and provides suggestions and confidence. - -## Enhancements - -None. - -## Constraints - -- The database is normal, and the client can be connected properly. -- An environment running Python 3.6 or later is available. -- The information about slow SQL statements is obtained from the workload diagnosis report (WDR). In the database WDR, slow SQL statements are marked. The GUC parameter **track_stmt_stat_level** is enabled by default. Otherwise, you need to manually enable it. Generally, the **track_stmt_stat_level** is set to **'off, L0'**. Higher levels will affect the performance. Data collection is implemented by the Prometheus solution. Therefore, you need to configure the Prometheus data collection platform. This feature focuses on algorithms and obtains metric sequence information from Prometheus. - -## Dependencies - -None. - -## Related Pages - -[Slow Query Diagnosis: Root Cause Analysis for Slow SQL Statements](../../../AI-features/ai4db/ai-sub-functions-of-the-dbmind/slow-query-diagnosis-root-cause-analysis-for-slow-sql-statements/slow-sql-statements.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/3-index-recommendation.md b/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/3-index-recommendation.md deleted file mode 100644 index 807bbd6e..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/3-index-recommendation.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Index Recommendation -summary: Index Recommendation -author: Guo Huan -date: 2022-05-10 ---- - -# Index Recommendation - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -This feature serves as an intelligent database index recommendation tool that covers multiple task levels and application scenarios. It provides the single-query index recommendation function, virtual index function, and workload-level index recommendation function to provide reliable index recommendations for users. - -## Benefits - -This feature provides the quick and reliable index recommendation function, greatly simplifying the work of O&M personnel. - -## Description - -The single-query index recommendation function allows users to directly perform operations in the database. This feature generates recommended indexes for a single query statement entered by users based on the semantic information of the query statement and the statistics of the database. The virtual index function allows users to directly perform operations in the database. This feature simulates the creation of a real index to avoid the time and space overhead required for creating a real index. Based on the virtual index, users can evaluate the impact of the index on the specified query statement by using the optimizer. The workload-level index recommendation can be used by running scripts outside the database. This feature uses the workload of multiple DML statements as the input to generate a batch of indexes that can optimize the overall workload execution performance. - -## Enhancements - -None. - -## Constraints - -The database is normal, and the client can be connected properly. - -The gsql tool has been installed by the current user, and the tool path has been added to the "_PATH_" environment variable. - -An environment running Python 3.6 or later is available. - -## Dependencies - -None. - -## Related Pages - -[Index-advisor: Index Recommendation](../../../AI-features/ai4db/ai-sub-functions-of-the-dbmind/index-advisor-index-recommendation/index-advisor-index-recommendation.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/4-parameter-tuning-and-diagnosis.md b/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/4-parameter-tuning-and-diagnosis.md deleted file mode 100644 index f0f7c255..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/4-parameter-tuning-and-diagnosis.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Parameter Tuning and Diagnosis -summary: Parameter Tuning and Diagnosis -author: Guo Huan -date: 2022-05-10 ---- - -# Parameter Tuning and Diagnosis - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -This feature servers as a parameter tuning tool integrated into databases. It uses AI technologies such as deep reinforcement learning and global search algorithms to obtain the optimal database parameter settings without manual intervention. It is not forcibly deployed with the database environment. It can be independently deployed and run without the database installation environment. - -## Benefits - -This tool can quickly provide the parameter adjustment configuration of the current load in any scenario, reducing database administrator's manual intervention, improving the O&M effect, and meeting customer expectations. - -## Description - -The tuning program can run in any of the following modes: - -- **recommend**: Log in to the database using the specified username, obtain the feature information about the running workload, and generate a parameter recommendation report based on the feature information. Report improper parameter settings and potential risks in the current database. Output the currently running workload behavior and characteristics. Output the recommended parameter settings. In this mode, the database does not need to be restarted. In other modes, the database may need to be restarted repeatedly. -- **train**: Modify parameters and execute the benchmark based on the benchmark information provided by users. The reinforcement learning model is trained through repeated iteration so that you can load the model in **tune** mode for optimization. -- **tune**: Use an optimization algorithm to tune database parameters. Currently, two types of algorithms are supported: deep reinforcement learning and global search algorithm (global optimization algorithm). The deep reinforcement learning mode requires **train** mode to generate the optimized model after training. However, the global search algorithm does not need to be trained in advance and can be directly used for search and optimization. - -## Enhancements - -None. - -## Constraints - -- The database is normal, the client can be properly connected, and data can be imported to the database. As a result, the optimization program can perform the benchmark test for optimization effect. -- To use this tool, you need to specify the user who logs in to the database. The user who logs in to the database must have sufficient permissions to obtain sufficient database status information. -- If you log in to the database host as a Linux user, add **$GAUSSHOME/bin** to the PATH environment variable so that you can directly run database O&M tools, such as gsql, gs_guc, and gs\_ctl. -- The recommended Python version is Python 3.6 or later. The required dependency has been installed in the operating environment, and the optimization program can be started properly. You can install a Python 3.6+ environment independently without setting it as a global environment variable. You are not advised to install the tool as the root user. If you install the tool as the root user and run the tool as another user, ensure that you have the read permission on the configuration file. -- This tool can run in three modes. In **tune** and **train** modes, you need to configure the benchmark running environment and import data. This tool will iteratively run the benchmark to check whether the performance is improved after the parameters are modified. -- In **recommend** mode, you are advised to run the command when the database is executing the workload to obtain more accurate real-time workload information. -- By default, this tool provides benchmark running script samples of TPC-C, TPC-H, TPC-DS, and sysbench. If you use the benchmarks to perform pressure tests on the database system, you can modify or configure the preceding configuration files. To adapt to your own service scenarios, you need to compile the script file that drives your customized benchmark based on the **template.py** file in the **benchmark** directory. - -## Dependencies - -None. - -## Related Pages - -[X-Tuner: Parameter Tuning and Diagnosis](../../../AI-features/ai4db/ai-sub-functions-of-the-dbmind/x-tuner-parameter-optimization-and-diagnosis/x-tuner-parameter-optimization-and-diagnosis.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/5-slow-sql-statement-discovery.md b/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/5-slow-sql-statement-discovery.md deleted file mode 100644 index 1e752152..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/5-slow-sql-statement-discovery.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Slow SQL Statement Discovery -summary: Slow SQL Statement Discovery -author: Guo Huan -date: 2022-05-10 ---- - -# Slow SQL Statement Discovery - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -This feature servers as an SQL statement execution time forecast tool. It forecasts the execution time of SQL statements based on the statement logic similarity and historical execution records without obtaining the SQL statement execution plan using a template. - -## Benefits - -- This feature does not require users to provide SQL execution plans. Therefore, the database performance is not affected. -- Different from other algorithms in the industry that are limited to OLAP or OLTP, this feature is more widely used. - -## Description - -The SQLdiag focuses on the historical SQL statements of the database, summarizes the execution performance of the historical SQL statements, and then uses the historical SQL statements to infer unknown services. The execution duration of SQL statements in the database does not differ greatly in a short period of time. SQLdiag can detect the statement result set similar to the executed SQL statements from historical data and predict the execution duration of SQL statements based on the SQL vectorization technology and template-based method. - -## Enhancements - -None. - -## Constraints - -- The historical logs and the format of the workload to be predicted meet the requirements. You can use the GUC parameter of the database to enable the collection or use the monitoring tool to collect logs. -- To ensure the prediction accuracy, the historical statement logs provided by users should be as comprehensive and representative as possible. -- The Python environment has been configured as required. - -## Dependencies - -None. - -## Related Pages - -[SQLdiag: Slow SQL Discovery](../../../AI-features/ai4db/ai-sub-functions-of-the-dbmind/sqldiag-slow-sql-discovery/sqldiag-slow-sql-discovery.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/characteristic-description-ai4db.md b/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/characteristic-description-ai4db.md deleted file mode 100644 index de5c7351..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/ai4db-autonomous-database-o-m/characteristic-description-ai4db.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: AI4DB Autonomous Database O&M -summary: AI4DB Autonomous Database O&M -author: Guo Huan -date: 2023-05-22 ---- - -# AI4DB: Autonomous Database O&M - -+ [Database Metric Collection, Forecast, and Exception Detection](1-database-metric-collection-forecast-and-exception-detection.md) -+ [Root Cause Analysis for Slow SQL Statements](2-root-cause-analysis-for-slow-sql-statements.md) -+ [Index Recommendation](3-index-recommendation.md) -+ [Parameter Tuning and Diagnosis](4-parameter-tuning-and-diagnosis.md) -+ [Slow SQL Statement Discovery](5-slow-sql-statement-discovery.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/db4ai-database-driven-ai.md b/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/db4ai-database-driven-ai.md deleted file mode 100644 index a45df4dd..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/ai-capabilities/db4ai-database-driven-ai.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: DB4AI Database-driven AI -summary: DB4AI Database-driven AI -author: Guo Huan -date: 2022-05-10 ---- - -# DB4AI: Database-driven AI - -## Availability - -This feature is available since MogDB 2.1.0. - -## Introduction - -DB4AI uses database capabilities to drive AI tasks and implement data storage and technology stack isomorphism. By integrating AI algorithms into the database, MogDB supports the native AI computing engine, model management, AI operators, and native AI execution plan, providing users with inclusive AI technologies. Different from the traditional AI modeling process, DB4AI one-stop modeling eliminates repeated data flowing among different platforms, simplifies the development process, and plans the optimal execution path through the database, so that developers can focus on the tuning of specific services and models. It outcompetes similar products in ease-of-use and performance. - -## Benefits - -- With this feature, you do not need to manually compile AI model code. Instead, you can use out-of-the-box SQL statements to train and forecast machine learning models, reducing the learning and usage costs. -- Extra overhead that is caused by fragmented data storage and repeated data migration can be avoided. -- A higher execution efficiency can be achieved. With this feature, the AI model training efficiency is high. Compared with manual model training, the performance is improved by several times. -- Stricter security protection prevents data leakage during AI model training. - -## Description - -MogDB supports the native DB4AI capability. By introducing native AI operators, MogDB simplifies the operation process and fully utilizes the optimization and execution capabilities of the database optimizer and executor to obtain the high-performance model training capability in the database. With a simpler model training and forecast process and higher performance, developers can focus on model tuning and data analysis in a shorter period of time, avoiding fragmented technology stacks and redundant code implementation. - -## Enhancements - -More algorithms are supported in MogDB 3.0.0. - -## Constraints - -- The database is running properly. - -## Dependencies - -None. - -## Related Pages - -[DB4AI Database-driven AI](../../AI-features/db4ai/db4ai.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/1-standard-sql.md b/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/1-standard-sql.md deleted file mode 100644 index d372a0fb..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/1-standard-sql.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Standard SQL -summary: Standard SQL -author: Guo Huan -date: 2022-05-07 ---- - -# Standard SQL - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -SQL is a standard computer language used to control the access to databases and manage data in databases. SQL standards are classified into core features and optional features. Most databases do not fully support SQL standards. - -MogDB supports most of the core features of SQL:2011 and some optional features, providing a unified SQL interface for users. - -## Benefits - -All database vendors can use a unified SQL interface, reducing the costs of learning languages and migrating applications. - -## Description - -For details, see “SQL Syntax” in the *Reference Guide*. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - -[SQL Syntax](../../reference-guide/sql-syntax/sql-syntax.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/2-standard-development-interfaces.md b/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/2-standard-development-interfaces.md deleted file mode 100644 index dff0f137..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/2-standard-development-interfaces.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Standard Development Interfaces -summary: Standard Development Interfaces -author: Guo Huan -date: 2022-05-07 ---- - -# Standard Development Interfaces - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Standard ODBC 3.5 and JDBC 4.0 APIs are supported. - -## Benefits - -Standard ODBC and JDBC interfaces are provided to ensure quick migration of user services to MogDB. - -## Description - -Currently, the standard ODBC 3.5 and JDBC 4.0 APIs are supported. The ODBC interface supports SUSE Linux, Windows 32-bit, and Windows 64-bit platforms. The JDBC API supports all platforms. - -## Enhancements - -The function of connecting JDBC to a third-party log framework is added. JDBC can interconnect with a third-party log framework to meet users' log management and control requirements. - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - -[JDBC Interface Reference](../../developer-guide/dev/2-development-based-on-jdbc/15-JDBC/jdbc-interface-reference.md), [ODBC Interface Reference](../../developer-guide/dev/3-development-based-on-odbc/6-ODBC/odbc-interface-reference.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/3-postgresql-api-compatibility.md b/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/3-postgresql-api-compatibility.md deleted file mode 100644 index b69c9aeb..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/3-postgresql-api-compatibility.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: PostgreSQL API Compatibility -summary: PostgreSQL API Compatibility -author: Guo Huan -date: 2022-05-07 ---- - -# PostgreSQL API Compatibility - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Compatible with PostgreSQL clients and standard APIs. - -## Benefits - -Compatible with the PostgreSQL clients and standard APIs, and can be seamlessly interconnected with PostgreSQL ecosystem tools. - -## Description - -Compatible with PostgreSQL clients and standard APIs. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - -[Psycopg API Reference](../../developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/psycopg-api-reference.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/ECPG.md b/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/ECPG.md deleted file mode 100644 index e2d75bf2..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/ECPG.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Embedded SQL Preprocessor ECPG -summary: Embedded SQL Preprocessor ECPG -author: Guo Huan -date: 2023-04-04 ---- - -# Embedded SQL Preprocessor ECPG - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -Embedded SQL programs in C language are supported. - -## Benefits - -Embedded SQL programs compiled for other SQL databases can be easily migrated to MogDB, ensuring quick service migration. - -## Description - -An embedded SQL program consists of code written in an ordinary programming language, in this case C, mixed with SQL commands in specially marked sections. To build the program, the source code (*.pgc) is first passed through the embedded SQL preprocessor, which converts it to an ordinary C program (*.c), and afterwards it can be processed by a C compiler. Converted ECPG applications call functions in the libpq library through the embedded SQL library (ecpglib), and communicate with the MogDB server using the normal frontend-backend protocol. The embedded SQL program has an advantage over other methods in processing SQL commands for C code. - -1. It handles the lengthy information transfer between variables in C programs. -2. The SQL code in the program is checked during compilation to ensure syntax correctness. -3. SQL embedded in C is specified in SQL standards and is supported by many other SQL database systems. - -## Enhancements - -None. - -## Constraints - -ECPG supports most of the MogDB SQL syntax. However, the current syntax and lexical of the ECPG do not support the processing of anonymous block statements and package statements. Therefore, anonymous block statements and package creation statements cannot be used as embedded SQL statements. - -## Dependencies - -None. - -## Usage - -Using the following commands to compile ECPG: - -1. `ecpg testecpg.ecpg -o testecpg.c` - -2. `gcc -l$GAUSSHOME/include/postgresql/server/ -l$GAUSSHOME/include -L$GAUSSHOME/lib -lpq -lecpg -o testecpg testecpg.c` - -## Example - -```c -#include -#include - -/* error handlers for the whole program */ -EXEC SQL WHENEVER NOT FOUND DO BREAK; - - -int main(int argc, char **argv) -{ - EXEC SQL BEGIN DECLARE SECTION; - int v_id, v_name_ind; - char v_name[32]; - char *url="tcp:postgresql://127.0.0.1:5432/postgres"; - char *username="ecpg"; - char *password="Test@123"; - EXEC SQL END DECLARE SECTION; - - EXEC SQL DECLARE c CURSOR FOR - SELECT id, name - FROM test_ecpg_tab - ORDER BY 1; - - /* connect to the database */ - EXEC SQL CONNECT TO :url USER :username USING :password; - - /* open a cursor */ - EXEC SQL OPEN c; - - /* loop will be left if the cursor is done */ - for(;;) - { - /* get the next result row */ - EXEC SQL FETCH NEXT FROM c INTO :v_id, :v_name :v_name_ind; - - printf( - "id = %d, name = %s\n", - v_id, - v_name_ind ? "(null)" : v_name - ); - } - - EXEC SQL CLOSE c; - EXEC SQL COMMIT; - EXEC SQL DISCONNECT; - - return 0; -} -``` - -1. Create a database user - - ```sql - create user ecpg identified by 'Test@123'; - ``` - -2. Create a test table - - ```sql - drop table if exists ecpg.test_ecpg_tab; - create table ecpg.test_ecpg_tab as select id , ' name '||id name from generate_series(1,20) id; - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/MogDB-MySQL-compatibility.md b/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/MogDB-MySQL-compatibility.md deleted file mode 100644 index 0b7d7e49..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/MogDB-MySQL-compatibility.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: MogDB-MySQL Compatibility -summary: MogDB-MySQL Compatibility -author: Zhang Cuiping -date: 2022-06-21 ---- - -# MogDB-MySQL Compatibility - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -This feature mainly enhances the compatibility of MogDB with MySQL in the following three aspects. At the same time, the `insert` syntax is supported, and `insert into` can be abbreviated as `insert`. - -User lock allows users to add custom locks through SQL, which allows multiple programs to complete the lock-related interaction process, making the client access from any location to get a consistent lock view. - -When data is inserted into a table to be created, the current time is inserted by default. During data update, if the update time is not specified, the time when the data is updated is displayed by default. - -Session-level SQL mode can be set, allowing change in running, global change, and intra-session change. - -## Benefits - -By setting user locks, data, data structures or certain strings are protected from interfering with each other between sessions, ensuring consistency and security of information. It solves the problem of recording the timestamp of users' operation when their business data is written and modified. By setting SQL mode, it can solve the compatibility between the legacy problems of earlier versions and later versions. - -## Related Pages - -[Dolphin Extension](../../developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-extension.md), [CREATE TABLE](../../reference-guide/sql-syntax/CREATE-TABLE.md), [ALTER TABLE](../../reference-guide/sql-syntax/ALTER-TABLE.md), [INSERT](../../reference-guide/sql-syntax/INSERT.md), [Advisory Lock Functions](../../reference-guide/functions-and-operators/system-management-functions/advisory-lock-functions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/MogDB-Oracle-compatibility.md b/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/MogDB-Oracle-compatibility.md deleted file mode 100644 index c259c2d0..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/MogDB-Oracle-compatibility.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: MogDB-Oracle Compatibility -summary: MogDB-Oracle Compatibility -author: Zhang Cuiping -date: 2022-06-17 ---- - -# MogDB-Oracle Compatibility - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -MogDB is compatible with Oracle's related functions and package functions using whale extension. - -In the function part, it mainly adds instrb, nls_charset_id, nls_charset_name, nls_lower, nls_upper, ora_hash, remainder, replace, show, show_parameter, to_timestamp, to_yminterval, tz_offset, nullif, ratio_to_report, etc. - -packages are generally only used in stored procedures, and according to ORACLE data package rules, new packages are placed under the corresponding schema. The supported Oracle management packages are dbms_random, dbms_output, dbms_lock, dbms_application_info, dbms_metadata, dbms_job, dbms_utility. - -For more information about the functions and the packages, please see the [whale](../../developer-guide/extension/whale.md). - -## Benefits - -MogDB's compatibility with Oracle is enhanced by using the whale extension to enhance MogDB functions. - -## Related Pages - -[whale](../../developer-guide/extension/whale.md), [Character Processing Functions and Operators](../../reference-guide/functions-and-operators/character-processing-functions-and-operators.md), [Mathematical Functions and Operators](../../reference-guide/functions-and-operators/mathematical-functions-and-operators.md), [Date and Time Processing Functions and Operators](../../reference-guide/functions-and-operators/date-and-time-processing-functions-and-operators.md), [HLL Functions and Operators](../../reference-guide/functions-and-operators/hll-functions-and-operators.md), [Window Functions](../../reference-guide/functions-and-operators/window-functions.md), [System Information Functions](../../reference-guide/functions-and-operators/system-information-functions/system-information-functions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/application-development-interfaces.md b/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/application-development-interfaces.md deleted file mode 100644 index 4301d737..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/application-development-interfaces/application-development-interfaces.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Application Development Interfaces -summary: Application Development Interfaces -author: Guo Huan -date: 2023-05-22 ---- - -# Application Development Interfaces - -+ **[Standard SQL](1-standard-sql.md)** -+ **[Standard Development Interfaces](2-standard-development-interfaces.md)** -+ **[PostgreSQL API Compatibility](3-postgresql-api-compatibility.md)** -+ **[MogDB-Oracle Compatibility](MogDB-Oracle-compatibility.md)** -+ **[MogDB-MySQL Compatibility](MogDB-MySQL-compatibility.md)** -+ **[Embedded SQL Preprocessor ECPG](ECPG.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/characteristic-description-overview.md b/product/en/docs-mogdb/v5.2/characteristic-description/characteristic-description-overview.md deleted file mode 100644 index fb3fca5a..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/characteristic-description-overview.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Characteristic Description -summary: Characteristic Description -author: Guo Huan -date: 2022-09-01 ---- - -# Characteristic Description - -MogDB 5.2 has the following characteristics. - -+ High Performance - + [CBO Optimizer](./high-performance/1-cbo-optimizer.md) - + [LLVM](./high-performance/2-llvm.md) - + [Vectorized Engine](./high-performance/3-vectorized-engine.md) - + [Hybrid Row-Column Store](./high-performance/4-hybrid-row-column-store.md) - + [Adaptive Compression](./high-performance/5-adaptive-compression.md) - + [SQL Bpass](./high-performance/sql-bypass.md) - + [Kunpeng NUMA Architecture Optimization](./high-performance/7-kunpeng-numa-architecture-optimization.md) - + [High Concurrency of Thread Pools](./high-performance/8-high-concurrency-of-thread-pools.md) - + [SMP for Parallel Execution](./high-performance/9-smp-for-parallel-execution.md) - + [Xlog no Lock Flush](./high-performance/10-xlog-no-lock-flush.md) - + [Parallel Page-based Redo For Ustore](./high-performance/11-parallel-page-based-redo-for-ustore.md) - + [Row-Store Execution to Vectorized Execution](./high-performance/12-row-store-execution-to-vectorized-execution.md) - + [Astore Row Level Compression](./high-performance/astore-row-level-compression.md) - + [BTree Index Compression](./high-performance/btree-index-compression.md) - + [Tracing SQL Function](./high-performance/tracing-SQL-function.md) - + [Parallel Index Scan](./high-performance/parallel-index-scan.md) - + [Enhancement of Tracing Backend Key Thread](./high-performance/enhancement-of-tracing-backend-key-thread.md) - + [Ordering Operator Optimization](./high-performance/ordering-operator-optimization.md) - + [OCK-accelerated Data Transmission](./high-performance/ock-accelerated-data-transmission.md) - + [OCK SCRLock Accelerate Distributed Lock](./high-performance/ock-scrlock-accelerate-distributed-lock.md) -+ High Availability (HA) - + [Primary/Standby](./high-availability/1-primary-standby.md) - + [Logical Replication](./high-availability/2-logical-replication.md) - + [Logical Backup](./high-availability/4-logical-backup.md) - + [Physical Backup](./high-availability/5-physical-backup.md) - + [Automatic Job Retry upon Failure](./high-availability/6-automatic-job-retry-upon-failure.md) - + [Ultimate RTO](./high-availability/7-ultimate-rto.md) - + [High Availability Based on the Paxos Protocol](./high-availability/high-availability-based-on-the-paxos-protocol.md) - + [Cascaded Standby Server](./high-availability/8-cascaded-standby-server.md) - + [Delayed Replay](./high-availability/9-delayed-replay.md) - + [Adding or Deleting a Standby Server](./high-availability/10-adding-or-deleting-a-standby-server.md) - + [Delaying Entering the Maximum Availability Mode](./high-availability/11-delaying-entering-the-maximum-availability-mode.md) - + [Parallel Logical Decoding](./high-availability/12-parallel-logical-decoding.md) - + [DCF](./high-availability/13-dcf.md) - + [CM](./high-availability/14-cm.md) - + [Global SysCache](./high-availability/15-global-syscache.md) - + [Using a Standby Node to Build a Standby Node](./high-availability/16-using-a-standby-node-to-build-a-standby-node.md) - + [Two-City Three-DC DR](./high-availability/17-two-city-three-dc-dr.md) - + [CM Cluster Management Component Supporting Two Node Deployment](./high-availability/cm-cluster-management-component-supporting-two-node-deployment.md) -+ Maintainability - + [Workload Diagnosis Report (WDR)](./maintainability/2-workload-diagnosis-report.md) - + [Slow SQL Diagnosis](./maintainability/3-slow-sql-diagnosis.md) - + [Session Performance Diagnosis](./maintainability/4-session-performance-diagnosis.md) - + [System KPI-aided Diagnosis](./maintainability/5-system-kpi-aided-diagnosis.md) - + [Fault Diagnosis](./maintainability/fault-diagnosis.md) - + [Extension-Splitting](./maintainability/extension-splitting.md) - + [Built-in Stack Tool](./maintainability/built-in-stack-tool.md) - + [SQL PATCH](./maintainability/sql-patch.md) -+ Compatibility - + [Add %rowtype Attribute To The View](./compatibility/add-rowtype-attribute-to-the-view.md) - + [Aggregate Functions Distinct Performance Optimization](./compatibility/aggregate-functions-distinct-performance-optimization.md) - + [Aggregate Functions Support Keep Clause](./compatibility/aggregate-functions-support-keep-clause.md) - + [Aggregate Functions Support Scenario Extensions](./compatibility/aggregate-functions-support-scenario-extensions.md) - + [Compatible With MySQL Alias Support For Single Quotes](./compatibility/compatible-with-mysql-alias-support-for-single-quotes.md) - + [current_date/current_time Keywords As Field Name](./compatibility/current_date-current_time-keywords-as-field-name.md) - + [Custom Type Array](./compatibility/custom-type-array.md) - + [For Update Support Outer Join](./compatibility/for-update-supports-outer-join.md) - + [MogDB Supports Insert All](./compatibility/mogdb-supports-insert-all.md) - + [Oracle DBLink Syntax Compatibility](./compatibility/oracle-dblink-syntax-compatibility.md) - + [Remove Type Conversion Hint When Creating PACKAGE/FUNCTION/PROCEDURE](./compatibility/remove-type-conversion-hint-when-creating-package-function-procedure.md) - + [Support Bypass Method When Merge Into Hit Index](./compatibility/support-bypass-method-when-merge-into-hit-index.md) - + [Support For Adding Nocopy Attributes To Procedure And Function Parameters](./compatibility/support-for-adding-nocopy-attributes-to-procedure-and-function-parameters.md) - + [Support For Passing The Count Attribute Of An Array As A Parameter Of The Array Extend](./compatibility/support-passing-the-count-attribute.md) - + [Support Q Quote Escape Character](./compatibility/support-q-quote-escape-character.md) - + [Support Subtracting Two Date Types To Return Numeric Type](./compatibility/support-subtracting-two-date-types-to-return-numeric-type.md) - + [Support table()](./compatibility/support-table-function.md) - + [Support To Keep The Same Name After The End With Oracle](./compatibility/support-to-keep-the-same-name-after-the-end-with-oracle.md) - + [Support Where Current Of](./compatibility/support-where-current-of.md) - + [Support For Constants In Package As Default Values](./compatibility/support-for-constants-in-package-as-default-values.md) - + [Support PLPGSQL subtype](./compatibility/support-plpgsql-subtype.md) - + [Support Synonym Calls Without Parentheses For Function Without Parameters](./compatibility/support-synonym-calls-without-parentheses-for-function-without-parameters.md) - + [Support For dbms_utility.format_error_backtrace](./compatibility/format-error-backtrace.md) -+ Database Security - + [Access Control Model](./database-security/1-access-control-model.md) - + [Separation of Control and Access Permissions](./database-security/2-separation-of-control-and-access-permissions.md) - + [Database Encryption Authentication](./database-security/3-database-encryption-authentication.md) - + [Data Encryption and Storage](./database-security/4-data-encryption-and-storage.md) - + [Database Audit](./database-security/5-database-audit.md) - + [Network Communication Security](./database-security/6-network-communication-security.md) - + [Resource Label](./database-security/7-resource-label.md) - + [Unified Audit](./database-security/8-unified-audit.md) - + [Dynamic Data Anonymization](./database-security/9-dynamic-data-anonymization.md) - + [Row-Level Access Control](./database-security/10-row-level-access-control.md) - + [Password Strength Verification](./database-security/11-password-strength-verification.md) - + [Equality Query in a Fully-encrypted Database](./database-security/12-equality-query-in-a-fully-encrypted-database.md) - + [Ledger Database Mechanism](./database-security/13-ledger-database-mechanism.md) - + [Transparent Data Encryption](./database-security/14-transparent-data-encryption.md) -+ Enterprise-Level Features - + [Support for Functions and Stored Procedures](./enterprise-level-features/1-support-for-functions-and-stored-procedures.md) - + [SQL Hints](./enterprise-level-features/2-sql-hints.md) - + [Full-Text Indexing](./enterprise-level-features/3-full-text-indexing.md) - + [Copy Interface for Error Tolerance](./enterprise-level-features/4-copy-interface-for-error-tolerance.md) - + [Partitioning](./enterprise-level-features/5-partitioning.md) - + [Support for Advanced Analysis Functions](./enterprise-level-features/6-support-for-advanced-analysis-functions.md) - + [Materialized View](./enterprise-level-features/7-materialized-view.md) - + [HyperLogLog](./enterprise-level-features/8-hyperloglog.md) - + [Creating an Index Online](./enterprise-level-features/9-creating-an-index-online.md) - + [Autonomous Transaction](./enterprise-level-features/10-autonomous-transaction.md) - + [Global Temporary Table](./enterprise-level-features/11-global-temporary-table.md) - + [Pseudocolumn ROWNUM](./enterprise-level-features/12-pseudocolumn-rownum.md) - + [Stored Procedure Debugging](./enterprise-level-features/13-stored-procedure-debugging.md) - + [JDBC Client Load Balancing and Read/Write Isolation](./enterprise-level-features/14-jdbc-client-load-balancing-and-readwrite-isolation.md) - + [In-place Update Storage Engine](./enterprise-level-features/15-in-place-update-storage-engine.md) - + [Publication-Subscription](./enterprise-level-features/16-publication-subscription.md) - + [Foreign Key Lock Enhancement](./enterprise-level-features/17-foreign-key-lock-enhancement.md) - + [Data Compression in OLTP Scenarios](./enterprise-level-features/18-data-compression-in-oltp-scenarios.md) - + [Transaction Async Submit](./enterprise-level-features/19-transaction-async-submit.md) - + [Index Creation Parallel Control](./enterprise-level-features/23-index-creation-parallel-control.md) - + [Dynamic Partition Pruning](./enterprise-level-features/21-dynamic-partition-pruning.md) - + [COPY Import Optimization](./enterprise-level-features/20-copy-import-optimization.md) - + [SQL Running Status Observation](./enterprise-level-features/22-sql-running-status-observation.md) - + [BRIN Index](./enterprise-level-features/24-brin-index.md) - + [BLOOM Index](./enterprise-level-features/25-bloom-index.md) - + [Event Trigger](./enterprise-level-features/event-trigger.md) -+ Application Development Interfaces - + [Standard SQL](./application-development-interfaces/1-standard-sql.md) - + [Standard Development Interfaces](./application-development-interfaces/2-standard-development-interfaces.md) - + [PostgreSQL API Compatibility](./application-development-interfaces/3-postgresql-api-compatibility.md) - + [MogDB-Oracle Compatibility](./application-development-interfaces/MogDB-Oracle-compatibility.md) - + [MogDB-MySQL Compatibility](./application-development-interfaces/MogDB-MySQL-compatibility.md) - + [Embedded SQL Preprocessor ECPG](./application-development-interfaces/ECPG.md) -+ AI Capabilities - + AI4DB: Autonomous Database O&M - + [Database Metric Collection, Forecast, and Exception Detection](./ai-capabilities/ai4db-autonomous-database-o-m/1-database-metric-collection-forecast-and-exception-detection.md) - + [Root Cause Analysis for Slow SQL Statements](./ai-capabilities/ai4db-autonomous-database-o-m/2-root-cause-analysis-for-slow-sql-statements.md) - + [Index Recommendation](./ai-capabilities/ai4db-autonomous-database-o-m/3-index-recommendation.md) - + [Parameter Tuning and Diagnosis](./ai-capabilities/ai4db-autonomous-database-o-m/4-parameter-tuning-and-diagnosis.md) - + [Slow SQL Statement Discovery](./ai-capabilities/ai4db-autonomous-database-o-m/5-slow-sql-statement-discovery.md) - + [DB4AI: Database-driven AI](./ai-capabilities/db4ai-database-driven-ai.md) - + ABO Optimizer - + [Intelligent Cardinality Estimation](./ai-capabilities/abo-optimizer/intelligent-cardinality-estimation.md) - + [Adaptive Plan Selection](./ai-capabilities/abo-optimizer/adaptive-plan-selection.md) -+ Middleware - + [Distributed Database Capability](./middleware/distributed-database-capability.md) - + [Deploying a Distributed Database Using Kubernetes](./middleware/deploying-a-distributed-database-using-kubernetes.md) - + [Distributed Analysis Capabilities](./middleware/distributed-analysis-capabilities.md) -+ Workload Management - + [High-Latency Escape at the Infrastructure Layer](./workload-management/high-latency-escape-at-the-infrastructure-layer.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/characteristic-description.md b/product/en/docs-mogdb/v5.2/characteristic-description/characteristic-description.md deleted file mode 100644 index 2492d1be..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/characteristic-description.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Characteristic Description -summary: Characteristic Description -author: Guo Huan -date: 2023-05-22 ---- - -# Characteristic Description - -- **[Overview](characteristic-description-overview.md)** -- **[High Performance](high-performance/high-performance.md)** -- **[High Availability](high-availability/high-availability.md)** -- **[Maintainability](maintainability/maintainability.md)** -- **[Compatibility](compatibility/compatibility.md)** -- **[Database Security](database-security/database-security.md)** -- **[Enterprise-Level Features](enterprise-level-features/enterprise-level-features.md)** -- **[Application Development Interfaces](application-development-interfaces/application-development-interfaces.md)** -- **[AI Capabilities](ai-capabilities/ai-capabilities.md)** -- **[Middleware](middleware/middleware.md)** -- **[Workload Management](workload-management/workload-management.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/add-rowtype-attribute-to-the-view.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/add-rowtype-attribute-to-the-view.md deleted file mode 100644 index f9a8ebad..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/add-rowtype-attribute-to-the-view.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Add %rowtype Attribute To The View -summary: Add %rowtype Attribute To The View -author: Guo Huan -date: 2023-06-14 ---- - -# Add %rowtype Attribute To The View - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -This feature adds the %rowtype attribute to the view, which is supported as a parameter or variable. When you use %ROWTYPE to declare a variable that matches a view, the variable will have the same structure as the view row. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Syntax Description - -```sql -CREATE PROCEDURE pro_name (input IN view_name%rowtype) IS -BEGIN -... -END; -``` - -## Example - -```sql -MogDB=# DROP SCHEMA IF EXISTS test_view_sch; -NOTICE: SCHEMA "test_view_sch" does NOT exist, skipping - -MogDB=# CREATE SCHEMA test_view_sch; - -MogDB=# CREATE TABLE test_view_sch.tb_test (c1 INT, c2 varchar2); - -MogDB=# CREATE VIEW test_view_sch.v_terst AS SELECT c1, c2 FROM test_view_sch.tb_test; - -MogDB=# CREATE OR REPLACE PROCEDURE sp_p1 (i_proname IN test_view_sch.v_terst%rowtype) IS -BEGIN - RAISE NOTICE 'i_proname.c1 is %, i_proname.c2 is %', i_proname.c1, i_proname.c2; -END; -/ -CREATE PROCEDURE - -MogDB=# DECLARE -var1 test_view_sch.v_terst%rowtype := (1111, 'xxxx'); -BEGIN -sp_p1(i_proname => var1); -END; -/ -NOTICE: i_proname.c1 is 1111, i_proname.c2 is xxxx -ANONYMOUS BLOCK EXECUTE - -MogDB=# DROP SCHEMA test_view_sch CASCADE; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to table test_view_sch.tb_test -drop cascades to view test_view_sch.v_terst -drop cascades to function sp_p1(test_view_sch.v_terst) -DROP SCHEMA -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-distinct-performance-optimization.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-distinct-performance-optimization.md deleted file mode 100644 index 71e5795a..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-distinct-performance-optimization.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Aggregate Functions Distinct Performance Optimization -summary: Aggregate Functions Distinct Performance Optimization -author: Guo Huan -date: 2023-06-05 ---- - -# Aggregate Functions Distinct Performance Optimization - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -In practice, we often use the distinct statement in the aggregation function to remove duplicates of the data first and then aggregation operations, the characteristics of the distinct statement to optimize the performance of the implementation . - -## Benefits - -Improve the performance of distinct query statements in aggregation functions, with a performance improvement of up to 3 times or more in the test scenarios. - -## Description - -For the implementation of distinct, before the aggregation calculation, the scanned data will be cached first, sorted by distinct columns, and then only take the first line of data in each grouping for the aggregation operation, so as to realize the process of data filtering and then aggregation. - -The above implementation of the data de-duplication operation using the sorting method, and the sorting operation itself has a great impact on the performance, if the output is not sorted requirements, purely for the de-duplication of the data, HashAgg can quickly realize the data de-duplication, and the implementation performance is far better than the sorting operation, based on this, the characteristics of this feature through the two-layer agg arithmetic to achieve the distinct operation, the bottom layer through the HashAgg first de-emphasizes the data, and the top layer agg operator then aggregates the de-emphasized data, thus improving the execution performance. - -## Constraints - -- Distinct performance has been optimized for concurrent scenarios, and this performance optimization is only for non-concurrent scenarios. -- Scenarios where distinct is multiple columns are not supported. - -## Example - -```sql -MogDB=# CREATE TABLE tb_distinct(a INT, b INT, c INT, d INT, e INT); -CREATE TABLE -MogDB=# EXPLAIN VERBOSE SELECT a, COUNT(DISTINCT b) FROM tb_distinct GROUP BY a; - QUERY PLAN ----------------------------------------------------------------------------------- - HashAggregate (cost=41.59..43.59 rows=200 width=24) - Output: a, count(b) - Group By Key: tb_distinct.a - -> HashAggregate (cost=38.59..40.59 rows=200 width=16) - Output: a, b - Group By Key: tb_distinct.a, tb_distinct.b - -> Seq Scan on public.tb_distinct (cost=0.00..26.34 rows=1634 width=8) - Output: a, b -(8 rows) -``` diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-support-keep-clause.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-support-keep-clause.md deleted file mode 100644 index 8c077ffb..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-support-keep-clause.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Aggregate Functions Support Keep Clause -summary: Aggregate Functions Support Keep Clause -author: Guo Huan -date: 2023-06-02 ---- - -# Aggregate Functions Support Keep Clause - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -KEEP is an important analytic function in Oracle, and DENSE_RANK used in combination, similar to the syntax structure of OVER. The main use scenarios are as follows: take the same grouping to a certain field sorting, and then take the maximum and minimum value of the specified field. This feature is compatible with Oracle's KEEP syntax. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Description - -KEEP syntax and DENSE_RANK FIRST/LAST used in conjunction, mainly for the same grouping of a certain field sorting, access to the sorted first group of data or the last group of data, and then the group of data on the specified field to take the maximum and minimum values. - -MogDB on Oracle's KEEP syntax compatibility, syntax rules, execution results and Oracle to maintain consistency, no core in the execution process, no inconsistency with the Oracle exception error, the only difference is that currently does not support the window function contains KEEP clauses, that is, if the query statement contains both KEEP clauses and OVER clauses, the direct error to exit! The only difference is that there is no support for window functions containing KEEP clauses. - -## Constraints - -- The vectored aggregate operator is not supported. -- Window functions are not supported. - -## Syntax Description - -Oracle's KEEP syntax is defined as follows: - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/Agg函数支持Keep子句.png) - -The syntax rules of MogDB are basically the same as those of Oracle described above, the only difference being that MogDB does not support window functions that contain KEEP clauses. - -## Example - -```sql --- Create table tab_keep -DROP TABLE IF EXISTS tab_keep; -CREATE TABLE tab_keep (a INT, b INT, c INT, d INT, e INT, f INT, g INT); --- Insert data -INSERT INTO tab_keep VALUES (1, 1, 1, 1, 10, 100, 1000); -INSERT INTO tab_keep VALUES (1, 1, 1, 1, 10, 100, 2000); -INSERT INTO tab_keep VALUES (1, 1, 1, 1, 10, 200, 3000); -INSERT INTO tab_keep VALUES (1, 1, 1, 1, 20, 200, 4000); -INSERT INTO tab_keep VALUES (1, 1, 1, 2, 20, 300, 5000); -INSERT INTO tab_keep VALUES (1, 1, 2, 2, 20, 300, 6000); -INSERT INTO tab_keep VALUES (1, 2, 2, 2, 30, 400, 7000); -INSERT INTO tab_keep VALUES (1, 2, 2, 2, 30, 400, 8000); -INSERT INTO tab_keep VALUES (1, 2, 2, 3, 30, 500, 9000); -INSERT INTO tab_keep VALUES (1, 2, 2, 3, 40, 500, 10000); -INSERT INTO tab_keep VALUES (1, 2, 3, 3, 40, 600, 12000); -INSERT INTO tab_keep VALUES (1, 2, 3, 3, 40, 600, 13000); -INSERT INTO tab_keep VALUES (2, 3, 4, 4, 40, 400, 4000); -INSERT INTO tab_keep VALUES (2, 3, 4, 4, 40, 400, 5000); -INSERT INTO tab_keep VALUES (2, 3, 4, 4, 40, 500, 6000); -INSERT INTO tab_keep VALUES (2, 3, 4, 4, 50, 500, 7000); -INSERT INTO tab_keep VALUES (2, 3, 4, 5, 50, 600, 8000); -INSERT INTO tab_keep VALUES (2, 3, 5, 5, 50, 600, 9000); -INSERT INTO tab_keep VALUES (2, 4, 5, 5, 60, 700, 10000); -INSERT INTO tab_keep VALUES (2, 4, 5, 5, 60, 700, 11000); -INSERT INTO tab_keep VALUES (2, 4, 5, 6, 60, 800, 12000); -INSERT INTO tab_keep VALUES (2, 4, 5, 6, 70, 800, 13000); -INSERT INTO tab_keep VALUES (2, 4, 6, 6, 70, 900, 14000); -INSERT INTO tab_keep VALUES (2, 4, 6, 6, 70, 900, 15000); - --- Test KEEP(DENSE_RANK FIRST) -SELECT a, MAX(g) KEEP(DENSE_RANK FIRST ORDER BY b) FROM tab_keep GROUP BY a ORDER BY 1, 2; -SELECT a, MIN(g) KEEP(DENSE_RANK FIRST ORDER BY b) FROM tab_keep GROUP BY a ORDER BY 1, 2; - --- Test KEEP(DENSE_RANK LAST) -SELECT a, MAX(g) KEEP(DENSE_RANK LAST ORDER BY b) FROM tab_keep GROUP BY a ORDER BY 1, 2; -SELECT a, MIN(g) KEEP(DENSE_RANK LAST ORDER BY b) FROM tab_keep GROUP BY a ORDER BY 1, 2; - --- GROUP BY clause with GROUPING SETS, ROLLUP, CUBE grouping sets -SELECT a, b, c, MAX(g) KEEP(DENSE_RANK FIRST ORDER BY d) FROM tab_keep GROUP BY GROUPING SETS((a),(b), (c), (a, b), (a, b, c)) ORDER BY a, b, c; -SELECT a, b, c, MIN(g) KEEP(DENSE_RANK FIRST ORDER BY d) FROM tab_keep GROUP BY GROUPING SETS((a),(b), (c), (a, b), (a, b, c)) ORDER BY a, b, c; -SELECT a, b, c, MIN(g) KEEP(DENSE_RANK FIRST ORDER BY d) FROM tab_keep GROUP BY ROLLUP(a, b, c) ORDER BY a, b, c; -SELECT a, b, c, MIN(g) KEEP(DENSE_RANK FIRST ORDER BY d) FROM tab_keep GROUP BY CUBE(a, b, c) ORDER BY a, b, c; - --- Contains multiple aggregate functions -SELECT a, b, c, MAX(g) KEEP(DENSE_RANK FIRST ORDER BY d), - MAX(f) KEEP(DENSE_RANK FIRST ORDER BY d), - MIN(g) KEEP(DENSE_RANK FIRST ORDER BY d), - MIN(f) KEEP(DENSE_RANK FIRST ORDER BY d) - FROM tab_keep GROUP BY GROUPING SETS((a),(b), (c), (a, b), (a, b, c)) ORDER BY a, b, c; - --- Order by clause contains nulls first -SELECT a, MAX(c) KEEP(DENSE_RANK FIRST ORDER BY b nulls FIRST) FROM tab_keep GROUP BY a ORDER BY 1, 2; -SELECT a, MAX(c) KEEP(DENSE_RANK LAST ORDER BY b nulls FIRST) FROM tab_keep GROUP BY a ORDER BY 1, 2; - --- Order by clause contains nulls last -SELECT a, MAX(c) KEEP(DENSE_RANK FIRST ORDER BY b nulls LAST) FROM tab_keep GROUP BY a ORDER BY 1, 2; -SELECT a, MAX(c) KEEP(DENSE_RANK LAST ORDER BY b nulls LAST) FROM tab_keep GROUP BY a ORDER BY 1, 2; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-support-scenario-extensions.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-support-scenario-extensions.md deleted file mode 100644 index 5d3dc57f..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/aggregate-functions-support-scenario-extensions.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Aggregate Functions Support Scenario Extensions -summary: Aggregate Functions Support Scenario Extensions -author: Guo Huan -date: 2023-06-06 ---- - -# Aggregate Functions Support Scenario Extensions - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -Support for Aggregate functions that include order by without group by. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Description - -For SQL query containing Aggregate function, if order by clause is included, the order by must be required to be sorted in the group by clause, otherwise it will directly report an error and exit. Therefore, if the group by clause is not included and the order by clause is included, the query will exit with a check failure. This feature adapts to support queries in this scenario. - -## Example - -```sql --- Ordinary Aggregate Functions -CREATE TABLE tt01(a INT, b INT); -SELECT SUM(a) FROM tt01 ORDER BY a; --success -SELECT SUM(a) FROM tt01 ORDER BY b; --success -SELECT SUM(a) FROM tt01 GROUP BY a ORDER BY a; --success -SELECT SUM(a) FROM tt01 GROUP BY b ORDER BY a; --failed -SELECT a, SUM(a) FROM tt01; --failed -SELECT b, SUM(a) FROM tt01; --failed -SELECT SUM(a) FROM tt01; --success - --- Listagg Functions -CREATE TABLE tt02(deptno INT, name VARCHAR); -SELECT listagg(name, ',') within GROUP(ORDER BY name) FROM tt02 ORDER BY name; --success -SELECT listagg(name, ',') within GROUP(ORDER BY name) FROM tt02 ORDER BY deptno; --success -SELECT listagg(name, ',') within GROUP(ORDER BY name) FROM tt02 GROUP BY deptno ORDER BY deptno; --success -SELECT listagg(name, ',') within GROUP(ORDER BY name) FROM tt02 GROUP BY deptno ORDER BY name; --failed -SELECT name, listagg(name, ',') within GROUP(ORDER BY name) FROM tt02; --failed -SELECT deptno, listagg(name, ',') within GROUP(ORDER BY name) FROM tt02; --failed -SELECT listagg(name, ',') within GROUP(ORDER BY name) FROM tt02; --success -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/compatibility.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/compatibility.md deleted file mode 100644 index e4bd5c3d..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/compatibility.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Compatibility -summary: Compatibility -author: Guo Huan -date: 2023-06-20 ---- - -# Compatibility - -- **[Add %rowtype Attribute To The View](add-rowtype-attribute-to-the-view.md)** -- **[Aggregate Functions Distinct Performance Optimization](aggregate-functions-distinct-performance-optimization.md)** -- **[Aggregate Functions Support Keep Clause](aggregate-functions-support-keep-clause.md)** -- **[Aggregate Functions Support Scenario Extensions](aggregate-functions-support-scenario-extensions.md)** -- **[Compatible With MySQL Alias Support For Single Quotes](compatible-with-mysql-alias-support-for-single-quotes.md)** -- **[current_date/current_time Keywords As Field Name](current_date-current_time-keywords-as-field-name.md)** -- **[Custom Type Array](custom-type-array.md)** -- **[For Update Support Outer Join](for-update-supports-outer-join.md)** -- **[MogDB Supports Insert All](mogdb-supports-insert-all.md)** -- **[Oracle DBLink Syntax Compatibility](oracle-dblink-syntax-compatibility.md)** -- **[Remove Type Conversion Hint When Creating PACKAGE/FUNCTION/PROCEDURE](remove-type-conversion-hint-when-creating-package-function-procedure.md)** -- **[Support Bypass Method When Merge Into Hit Index](support-bypass-method-when-merge-into-hit-index.md)** -- **[Support For Adding Nocopy Attributes To Procedure And Function Parameters](support-for-adding-nocopy-attributes-to-procedure-and-function-parameters.md)** -- **[Support For Passing The Count Attribute Of An Array As A Parameter Of The Array Extend](support-passing-the-count-attribute.md)** -- **[Support Q Quote Escape Character](support-q-quote-escape-character.md)** -- **[Support Subtracting Two Date Types To Return Numeric Type](support-subtracting-two-date-types-to-return-numeric-type.md)** -- **[Support table()](support-table-function.md)** -- **[Support To Keep The Same Name After The End With Oracle](support-to-keep-the-same-name-after-the-end-with-oracle.md)** -- **[Support Where Current Of](support-where-current-of.md)** -- **[Support For Constants In Package As Default Values](support-for-constants-in-package-as-default-values.md)** -- **[Support PLPGSQL subtype](support-plpgsql-subtype.md)** -- **[Support Synonym Calls Without Parentheses For Function Without Parameters](support-synonym-calls-without-parentheses-for-function-without-parameters.md)** -- **[Support For dbms_utility.format_error_backtrace](format-error-backtrace.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/compatible-with-mysql-alias-support-for-single-quotes.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/compatible-with-mysql-alias-support-for-single-quotes.md deleted file mode 100644 index 79ea598f..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/compatible-with-mysql-alias-support-for-single-quotes.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Compatible With MySQL Alias Support For Single Quotes -summary: Compatible With MySQL Alias Support For Single Quotes -author: Guo Huan -date: 2023-06-12 ---- - -# Compatible With MySQL Alias Support For Single Quotes - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -Compatible support for MySQL aliases supports the syntax and functionality of single quotes (aliases for columns only). - -## Benefits - -Enhance MogDB compatibility with MySQL to reduce application migration costs. - -## Description - -Alias after `AS` supports single quotes (Name AS 'alias'), and supports Name followed directly by a single-quoted alias (Name 'alias'). - -## Constraints - -- The BINARY keyword is not supported as a column name. -- The a_expr qual_op syntax is not supported, i.e., the post operator - -## Syntax Description - -- `{expression [ [ AS ] output_name ]} output_name` Support for single-quote syntax -- `{expression [ output_name ]} output_name` Support for single-quote syntax - -## Example - -```sql -td_db=# SELECT a+1 AS 'quote1', b 'quote2' FROM test_quote; - quote1 | quote2 -----------+---------- - 2 | 2 - 4 | 4 -(2 rows) - -td_db=# SELECT datetime '2023-6-1' 'name1'; - name1 ---------------------- - 2023-06-01 00:00:00 -(1 row) - -td_db=# SELECT date '2023-6-1' 'name1'; - name1 ------------- - 2023-06-01 -(1 row) - -td_db=# SELECT a AS 'name1', b 'name2' FROM wxq; - name1 | name2 - --------+--------- - 1 | yi - 2 | er -(2 rows) - -td_db=# SELECT b+1 'quote2' FROM test_quote; - quote2 ------------- - 3 - 5 -(2 rows) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/current_date-current_time-keywords-as-field-name.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/current_date-current_time-keywords-as-field-name.md deleted file mode 100644 index 5f810d72..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/current_date-current_time-keywords-as-field-name.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: current_date/current_time Keywords As Field Name -summary: current_date/current_time Keywords As Field Name -author: Guo Huan -date: 2023-06-09 ---- - -# current_date/current_time Keywords As Field Name - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -Supports current_date/current_time as table field names for operations. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Description - -Current_date and current_time are used as system functions to return the current system date and system time in the database and cannot be used as table field names as RESERVED_KEYWORD keywords. This feature supports the operation of current_date/current_time as table field names for compatibility with application development. - -## Constraints - -- Currently only current_date/current_time are supported as field names in CREATE TABLE successfully; -- Other DML operations, such as SELECT/DELETE/UPDATE due to ambiguities with the current_date system function, the operation is not supported. (Related operations need to add double quotes to deal with) -- Currently current_date/current_time is still defined as a system function rather than a field in the check constraints, and is recognized as a field with double quotes. - -## Example - -```sql --- CREATE TABLE -MogDB=# CREATE TABLE test_date(current_date date); -CREATE TABLE -MogDB=# CREATE TABLE test_time(current_time time); -CREATE TABLE - --- UPDATE -MogDB=# UPDATE test SET current_date = '2022-12-14'; -UPDATE 1 - - -MogDB=# CREATE TABLE test(current_date date); -CREATE TABLE -MogDB=# INSERT INTO test VALUES ('2022-12-15'); -INSERT 0 1 -MogDB=# SELECT * FROM test; - current_date ---------------------- - 2022-12-15 00:00:00 -(1 row) - --- The SELECT operation returns the current date or time as a system function. Field operations require double quotes. -MogDB=# SELECT current_date FROM test; - date ------------- - 2022-12-16 -(1 row) - -MogDB=# SELECT "current_date" FROM test; - current_date ---------------------- - 2022-12-15 00:00:00 Note: Double quotes are added to display the field value date. -(1 row) - --- The DELETE operation defaults to being a system function operation, and field operations require double quotes. -MogDB=# DELETE FROM test WHERE CURRENT_DATE = '2022-12-14'; -DELETE 0 -MogDB=# SELECT * FROM test; - current_date ---------------------- - 2022-12-14 00:00:00 -(1 row) -MogDB=# DELETE FROM test WHERE "current_date" = '2022-12-14'; -DELETE 1 -MogDB=# SELECT * FROM test ; - current_date --------------- -(0 rows) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/custom-type-array.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/custom-type-array.md deleted file mode 100644 index 1a7b74cd..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/custom-type-array.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Custom Type Array -summary: Custom Type Array -author: Guo Huan -date: 2023-06-13 ---- - -# Custom Type Array - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -This feature supports the creation of custom type arrays. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Description - -This feature supports the creation of custom type arrays. - -## Constraints - -- The element type of the array must be of a pre-existing type and cannot be an array. -- Array size is ignored. -- Custom type arrays do not support nested types. -- Custom type arrays do not support column storage. -- The fields of custom type array do not support GIN index, BRIN index, BLOOM index. -- The fields of a custom type array are not supported as Hash partition keys. -- The fields of a custom type array are not supported as RANGE partition keys. -- Fields in a custom type array are not supported as list partition keys. -- Fields of a custom type array do not support array nesting. -- The fields of a custom type array are not supported as encrypted fields. - -## Syntax Description - -```ebnf+diagram -CreateType ::= CREATE TYPE name { AS | IS } { VARRAY | ARRAY } ( integer_value ) OF data_type; -``` - -This syntax is equivalent to the following syntax (ignoring integer_value): - -```ebnf+diagram -CreateType ::= CREATE TYPE name { AS | IS } TABLE OF data_type; -``` - -**Parameter Description**: - -- **integer_value** - - Unsigned 32bit positive integers (0-2147483647) such as 1, 100, 300, etc. - -## Example - -```sql -CREATE TYPE t_array IS VARRAY(10000) OF VARCHAR2(1000); -``` - -## Related Pages - -[CREATE TYPE](../../reference-guide/sql-syntax/CREATE-TYPE.md)、[ALTER TYPE](../../reference-guide/sql-syntax/ALTER-TYPE.md)、[DROP TYPE](../../reference-guide/sql-syntax/DROP-TYPE.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/for-update-supports-outer-join.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/for-update-supports-outer-join.md deleted file mode 100644 index 7b8578f5..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/for-update-supports-outer-join.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: For Update Support Outer Join -summary: For Update Support Outer Join -author: Guo Huan -date: 2023-06-06 ---- - -# For Update Support Outer Join - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -This feature mainly supports locking the NULL-side table in a outer join (left join, right join, full join), but not locking the NULL-side table if the corresponding row is NULL. - -## Benefits - -Supports For Update for locking outer join. - -## Description - -Usually, Select statement will not lock the data, the query process will not have an impact on other DML and DDL operations. However, in daily use, we often encounter business-level scenarios that require data exclusivity, such as train booking scenarios, the screen shows that there is a ticket, if the query process does not lock the data exclusively, when the ticket is really issued, the data may have been modified by other clients, and there is no ticket to be issued. In this case, it is necessary to lock the data during the query process to monopolize the data and ensure that it is not modified by other data. - -For update is a kind of row-level lock, once the user of a row of row-level locking, the user can query and update the locked data rows, other users can only query the data can not be modified, until the row of data is released until the lock. Therefore, the `select...for update` statement is commonly used to manually lock query statements. This feature supports for update to lock the NULL side of an outer join. - -## Example - -```sql --- create a table -CREATE TABLE tt01 (a INT, b INT); -CREATE TABLE tt02 (a INT, b INT); - --- 1)The two tables in the inner join are locked, the locking is successful, and no other transaction can modify it until the transaction ends. --- Transaction 1: -SELECT * FROM tt01 INNER JOIN tt02 ON tt01.a = tt02.a FOR UPDATE; --- Transaction 2: -UPDATE tt02 SET b = 200 WHERE a = 2; - --- The two left-joined tables are locked, the locking is successful, and no other transaction can modify them until the transaction ends. --- Transaction 1: -SELECT * FROM tt01 LEFT JOIN tt02 ON tt01.a = tt02.a FOR UPDATE; --- Transaction 2: -UPDATE tt02 SET b = 200 WHERE a = 2; - --- A lock is applied to the two right-joined tables, the lock is successful, and no other transaction can modify it until the transaction ends. --- Transaction 1: -SELECT * FROM tt01 RIGHT JOIN tt02 ON tt01.a = tt02.a FOR UPDATE; --- Transaction 2: -UPDATE tt01 SET b = 200 WHERE a = 2; - --- A lock is applied to two tables that are fully connected, the lock is successful, and no other transaction can modify it until the transaction is finished. --- Transaction 1: -SELECT * FROM tt01 FULL JOIN tt02 ON tt01.a = tt02.a FOR UPDATE; --- Transaction 2: -UPDATE tt01 SET b = 200 WHERE a = 2; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/format-error-backtrace.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/format-error-backtrace.md deleted file mode 100644 index 257514c3..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/format-error-backtrace.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: Support For dbms_utility.format_error_backtrace -summary: Support For dbms_utility.format_error_backtrace -author: Guo Huan -date: 2023-07-07 ---- - -# Support For dbms_utility.format_error_backtrace - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -dbms_utility.format_error_backtrace is used to get the exact source of a procedure exception, outputting a formatted string stack of the program and its line number back to the line where the error was first thrown. Therefore, this interface can only be called on procedure exceptions. - -This interface is implemented in the whale plugin, so it is a prerequisite that the whale plugin has been created first. If you need to print out the stack information, you need to use dbms_output.put_line. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Constraints - -- dbms_utility.format_error_backtrace can only be used in the exception section of a procedure procedure and does not support functions. - -- If you need to print the contents of dbms_utility.format_error_backtrace, you need to use the dbms_output.put_line interface. - - The following SQL statement needs to be executed before using the dbms_output.put_line interface: - - ```sql - select dbms_output.enable(***); - -- Enable the dbms_output package. - - set serveroutput to on; - -- Support for printing data - ``` - -- The number of lines printed by format_error_backtrace is the number of lines of the stored procedure at the level where it is located, not the number of lines of all the expanded stored procedures. - -- Support for inserting format_error_backtrace as a default value in the procedure - -## Example - -```sql --- The whale extension needs to be created first - -MogDB=# set whale.serveroutput to on; -SET - -MogDB=# select dbms_output.enable(10000); - enable --------- - -(1 row) -``` - -```sql --- dbms_utility.format_error_backtrace record exception stack information - -MogDB=# Create or replace procedure proc1 is -MogDB$# Begin -MogDB$# Dbms_output.put_line('running proc1'); -MogDB$# Raise no_data_found; -MogDB$# End; -MogDB$# / -CREATE PROCEDURE -MogDB=# create or replace procedure proc2 is -MogDB$# begin -MogDB$# dbms_output.put_line('calling proc1'); -MogDB$# dbms_output.put_line('---------------'); -MogDB$# proc1; -MogDB$# end; -MogDB$# / -CREATE PROCEDURE -MogDB=# -MogDB=# create or replace procedure proc3 is -MogDB$# begin -MogDB$# dbms_output.put_line('calling proc2'); -MogDB$# proc2; -MogDB$# exception -MogDB$# when no_data_found -MogDB$# then -MogDB$# dbms_output.put_line('error stack at top level'); -MogDB$# dbms_output.put_line(dbms_utility.format_error_backtrace); -MogDB$# end; -MogDB$# / -CREATE PROCEDURE - -MogDB=# begin -MogDB$# dbms_output.put_line('proc3->proc2->proc1 backtrace'); -MogDB$# proc3; -MogDB$# end; -MogDB$# / -proc3->proc2->proc1 backtrace -calling proc2 -calling proc1 ---------------- -running proc1 -error stack at top level -P0002: at "public.proc1", line 4 -P0002: at "public.proc2", line 5 -P0002: at "public.proc3", line 4 - -ANONYMOUS BLOCK EXECUTE -MogDB=# -``` - -```sql --- dbms_utility.format_error_backtrace can be used as a parameter default - -MogDB=# create or replace procedure proc_test_a(i_err_bt varchar2 default dbms_utility.format_error_backtrace,i_sqlerm varchar2 default 'ddd') is -MogDB$# begin -MogDB$# dbms_output.put_line('proc_test_a'); -MogDB$# dbms_output.put_line(i_err_bt ||i_sqlerm); -MogDB$# end; -MogDB$# / -CREATE PROCEDURE -MogDB=# - -MogDB=# declare -MogDB-# a int; -MogDB-# begin -MogDB$# a := 'abc'; -MogDB$# exception -MogDB$# when others then -MogDB$# proc_test_a; -MogDB$# end; -MogDB$# / -proc_test_a -ddd -ANONYMOUS BLOCK EXECUTE -MogDB=# -``` - -## Related Pages - -[whale](../../developer-guide/extension/whale.md) diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/mogdb-supports-insert-all.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/mogdb-supports-insert-all.md deleted file mode 100644 index 486be156..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/mogdb-supports-insert-all.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: MogDB Supports Insert All -summary: MogDB Supports Insert All -author: Guo Huan -date: 2023-06-02 ---- - -# MogDB Supports Insert All - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -Oracle 9i version of the new Insert All/First syntax, expanding the original Insert syntax, so that the Insert statement from the original can only be inserted into a table to expand to insert multiple tables at the same time, but also according to the judgment conditions to determine the data inserted into which table, so that the previous multiple Insert statement can be merged into one statement to avoid repeated scanning of data. Insert All and Insert First statements are basically the same syntax, the only difference is: Insert first for each line of data, only insert into the first table when the condition is established, do not continue to check other conditions, Insert All: for each line of data, check each when condition, if the condition is met, insert. If the conditions are met, insert all. This feature is compatible with Oracle's Insert All/First function. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Description - -In daily production life, we often encounter the need to insert a batch of data into different tables according to different conditions, such as the personal information collected nationwide, according to the origin of the individual inserted into the corresponding provincial table. For ordinary Insert statement, each province needs to query and scan all the data to filter the information of their own province and insert it into the corresponding table of their own province, which requires scanning the whole amount of data for many times. - -MogDB is compatible with Oracle's Insert All/First function, the Insert statement from the original can only be inserted into a table to expand to insert multiple tables at the same time, but also according to the judgment conditions to determine the data inserted into which table, so that the previous multiple Insert statements can be combined into one statement to avoid repeated scanning of data. The execution results are consistent with Oracle, no core during execution, no exception error report inconsistent with Oracle, the only difference is that MogDB does not support error_log_clause syntax. - -## Constraints - -- The corresponding error_log_clause syntax in Oracle is not supported. - -## Syntax Description - -Oracle's Insert syntax is defined as follows: - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/MogDB支持Insert-All特性-1.png) - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/MogDB支持Insert-All特性-2.png) - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/MogDB支持Insert-All特性-3.png) - -MogDB's syntax rules are basically the same as Oracle's above, the only difference is that MogDB does not support error_log_clause. - -In addition, MogDB supports Explain Analyze to see the specific number of rows inserted per table, as shown below: - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/MogDB支持Insert-All特性-4.png) - -## Example - -```sql --- Create table and insert test data -CREATE TABLE tt02 (a INT, b INT, a1 INT, b1 INT); -INSERT INTO tt02 -VALUES (generate_series(1, 10), generate_series(11, 20), generate_series(21, 30), generate_series(31, 40)); -CREATE TABLE tt02_1 (a INT); -CREATE TABLE tt02_2 (a INT, b INT); -CREATE TABLE tt02_3 (a1 INT, b1 INT); -CREATE TABLE tt02_4 (a INT, b INT, a1 INT, b1 INT); - --- use case Insert all -INSERT ALL - WHEN a1 < 25 THEN - INTO tt02_1(a) VALUES(a) - INTO tt02_2(a, b) VALUES(a, b) - WHEN a1 < 30 THEN - INTO tt02_3(a1, b1) VALUES(a, a1) - INTO tt02_4(a, b1) VALUES(b, a1) - ELSE - INTO tt02_4(a, b) VALUES(a, b) - INTO tt02_4(a, b) VALUES(a, b) -SELECT a, b, a1 FROM tt02; - --- result -MogDB =# SELECT * FROM tt02_1 ORDER BY a; a ---- - 1 - 2 - 3 - 4 -(4 rows) - -MogDB=# SELECT * FROM tt02_2 ORDER BY a, b; - a | b ----+---- - 1 | 11 - 2 | 12 - 3 | 13 - 4 | 14 -(4 rows) - -MogDB=# SELECT * FROM tt02_3 ORDER BY a1, b1; - a1 | b1 -----+---- - 1 | 21 - 2 | 22 - 3 | 23 - 4 | 24 - 5 | 25 - 6 | 26 - 7 | 27 - 8 | 28 - 9 | 29 -(9 rows) - -MogDB=# SELECT * FROM tt02_4 ORDER BY a, b, a1, b1; - a | b | a1 | b1 -----+----+----+---- - 10 | 20 | | - 10 | 20 | | - 11 | | | 21 - 12 | | | 22 - 13 | | | 23 - 14 | | | 24 - 15 | | | 25 - 16 | | | 26 - 17 | | | 27 - 18 | | | 28 - 19 | | | 29 -(11 rows) - --- use case Insert First -TRUNCATE TABLE tt02_1; -TRUNCATE TABLE tt02_2; -TRUNCATE TABLE tt02_3; -TRUNCATE TABLE tt02_4; -INSERT first - WHEN a1 < 25 THEN - INTO tt02_1(a) VALUES (a) - INTO tt02_2(a, b) VALUES (a, b) - WHEN a1 < 30 THEN - INTO tt02_3(a1, b1) VALUES (a, a1) - INTO tt02_4(a, b1) VALUES (b, a1) - ELSE - INTO tt02_4(a, b) VALUES (a, b) - INTO tt02_4(a, b) VALUES (a, b) -SELECT a, b, a1 FROM tt02; - --- result -MogDB=# SELECT * FROM tt02_1 ORDER BY a; - a ---- - 1 - 2 - 3 - 4 -(4 rows) - -MogDB=# SELECT * FROM tt02_2 ORDER BY a, b; - a | b ----+---- - 1 | 11 - 2 | 12 - 3 | 13 - 4 | 14 -(4 rows) - -MogDB=# SELECT * FROM tt02_3 ORDER BY a1, b1; - a1 | b1 -----+---- - 5 | 25 - 6 | 26 - 7 | 27 - 8 | 28 - 9 | 29 -(5 rows) - -MogDB=# SELECT * FROM tt02_4 ORDER BY a, b, a1, b1; - a | b | a1 | b1 -----+----+----+---- - 10 | 20 | | - 10 | 20 | | - 15 | | | 25 - 16 | | | 26 - 17 | | | 27 - 18 | | | 28 - 19 | | | 29 -(7 rows) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/oracle-dblink-syntax-compatibility.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/oracle-dblink-syntax-compatibility.md deleted file mode 100644 index ae3d75d2..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/oracle-dblink-syntax-compatibility.md +++ /dev/null @@ -1,242 +0,0 @@ ---- -title: Oracle DBLink Syntax Compatibility -summary: Oracle DBLink Syntax Compatibility -author: zhang cuiping -date: 2023-07-07 ---- - -# Oracle DBLink Syntax Compatibility - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -DBLink is known as database link, is a one-way connection between databases, usually used to create connections to external databases and perform DML operations on external data. This feature supports Oracle DBLink syntax, you can use `@` symbol to access the tables in Oracle database directly in MogDB database through oracle_fdw plugin. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Description - -This feature supports Oracle DBLink syntax to access tables in an Oracle database using the `@` symbol. - -Supports SELECT, INSERT, UPDATE, DELETE, and EXPLAIN operations via DBLink. - -## Constraints - -- Only SELECT, INSERT, UPDATE, DELETE, and EXPLAIN operations are supported. - -## Syntax Description - -- Connecting to an Oracle Database via DBLink - - ``` - create server foreign data wrapper oracle_fdw options(dbserver '/db_name'); - ``` - - - dblink_name: a customized DBLink name. - - IP:PORT: the IP address and port number of the server where the Oracle database resides. - - db_name: Oracle database name. - -- Create user mappings through DBLink. - - ``` - create user mapping for server options(user '',password 'oracle_password'); - ``` - - - mogdb_user_name: the MogDB database user name. - - dblink_name: a customized DBLink name. - - oracle_user_name: the user name of the Oracle database. - - oracle_password: the user name and password for the Oracle database. - -- Querying tables via DBLink - - ``` - SELECT * FROM @; - ``` - -- Inserting data via DBLink - - ``` - INSERT INTO @ VALUES (...); - ``` - -- Updating data via DBLink - - ``` - UPDATE @ SET... WHERE...; - ``` - -- Deleting data via DBLink - - ``` - DELETE FROM @ where ...; - ``` - -- Viewing the Execution Plan - - ``` - explain SELECT * FROM @; - ``` - -> **Note**: -> -> - oracle_table_name: the table name of the Oracle database. -> - dblink_name: a customized DBLink name. - -## Example - -### Environmental preparation - -**MogDB** - -- The MogDB database has been installed. -- The oracle_fdw plugin is installed. See [oracle_fdw](../../developer-guide/extension/foreign-data-wrapper/1-oracle_fdw.md) for details. - -**Oracle** - -1. Logged into the database. - -2. Create a table. - - ``` - CREATE TABLE scott.EMPLOYEE ( - ID INT PRIMARY KEY, - NAME VARCHAR2(50) NOT NULL, - SALARY NUMBER(10,2) - ); - ``` - -3. Insert test data. - - ``` - INSERT INTO scott.EMPLOYEE (ID, NAME, SALARY) VALUES (1001, 'Mike', 5000); - INSERT INTO scott.EMPLOYEE (ID, NAME, SALARY) VALUES (1002, 'JACK', 6000); - ``` - -### Steps - -1. Log in to the MogDB database, using the database postgres, port number 27000 as an example. - - ```bash - [omm5@localhost oracle_file]$ gsql -d postgres -p 27000 -r - gsql ((MogDB 5.0.0 build 503a9ef7) compiled at 2023-06-26 16:30:36 commit 0 last mr 1804 ) - Non-SSL connection (SSL connection is recommended when requiring high-security) - Type "help" for help. - - MogDB=# - ``` - -2. Create a user and give the user sysadmin permissions, using the username mymogdb50 as an example. - - ```sql - MogDB=# create user mymogdb50 identified by 'Enmo@123'; - NOTICE: The encrypted password contains MD5 ciphertext, which is not secure. - CREATE ROLE - MogDB=# alter user mymogdb50 sysadmin; - ALTER ROLE - ``` - -3. Exit the database. - - ``` - MogDB=# \q - [omm5@localhost oracle_file]$ - ``` - -4. Create a user mapping key file for the user. - - ```bash - [omm5@localhost oracle_file]$ gs_guc generate -S 'xxxx@123' -D $GAUSSHOME/bin -o usermapping -U mymogdb50 - The gs_guc run with the following arguments: [gs_guc -S ******** -D /data/mogdb500/app/bin -o usermapping -U mymogdb50 generate ]. - gs_guc generate -S *** -U *** - ``` - - **Description**: -S means customized key, e.g. `xxxx@123`. - -5. Log in to the MogDB database as user mymogdb50. - - ```bash - [omm5@localhost oracle_file]$ gsql -d postgres -p 27000 -r -U mymogdb50 -W 'Enmo@123' - gsql ((MogDB 5.0.0 build 503a9ef7) compiled at 2023-06-26 16:30:36 commit 0 last mr 1804 ) - Non-SSL connection (SSL connection is recommended when requiring high-security) - Type "help" for help. - - MogDB=> - ``` - -6. Create a DBLink connection from the MogDB database to the Oracle database, using the IP address 127.2.15.23, port number 55446, and database name pdb_test as an example. - - ```sql - MogDB=> CREATE SERVER db_link_to_pdb_test_15_2 FOREIGN DATA WRAPPER oracle_fdw OPTIONS(dbserver '121.36.15.2:55446/pdb_test'); - CREATE SERVER - ``` - -7. Create a user mapping. Create a mapping for MogDB database user mymogdb50 and Oracle database user scott. - - ```sql - MogDB=> create user mapping for mymogdb50 server db_link_to_pdb_test_15_2 options(user 'scott',password 'xxx123'); - CREATE USER MAPPING - ``` - - **Note**: The username `scott` and the password `xxx123` need to be replaced according to the actual environment. - -8. Querying Oracle tables in a MogDB database via DBLink. - - ```sql - MogDB=> SELECT * FROM scott.EMPLOYEE@db_link_to_pdb_test_15_2; - id | name | salary - ------+------+--------- - 1001 | Mike | 5000.00 - 1002 | JACK | 6000.00 - (2 rows) - - MogDB=> SELECT * FROM scott.EMPLOYEE@db_link_to_pdb_test_15_2 WHERE SALARY > 400; - id | name | salary - ------+------+--------- - 1001 | Mike | 5000.00 - 1002 | JACK | 6000.00 - (2 rows) - ``` - -9. Perform update, insertion, deletion, and query operations on Oracle database tables in a MogDB database through DBLink, using the scott.EMPLOYEE table as an example. - - ```sql - MogDB=> UPDATE scott.EMPLOYEE@db_link_to_pdb_test_15_2 SET SALARY = 5500 WHERE ID = 1001; - UPDATE 1 - - MogDB=> insert into scott.EMPLOYEE@db_link_to_pdb_test_15_2 values (1003, 'JANE', 7000); - INSERT 0 1 - - MogDB=> DELETE FROM scott.EMPLOYEE@db_link_to_pdb_test_15_2 WHERE SALARY =6000; - DELETE 1 - - MogDB=> select * from scott.EMPLOYEE@db_link_to_pdb_test_15_2; - id | name | salary - ------+------+--------- - 1003 | JANE | 7000.00 - 1001 | MIKE | 5500.00 - (2 rows) - ``` - -10. Performs the View Execution Plan operation on an Oracle database table in a MogDB database through DBLink. - - ```sql - MogDB=> explain select * from scott.EMPLOYEE@db_link_to_pdb_test_15_2; - QUERY PLAN - - ----------------------------------------------------------------------------------------------- - ------------------- - Foreign Scan on "scott.employee@db_link_to_pdb_test_15_2" employee (cost=10000.00..20000.00 r - ows=1000 width=78) - Oracle query: SELECT /*66abc20a4a7895b75898e391381f9de8*/ r1."ID",r1."NAME",r1."SALARY" FROM - scott.employee r1 - (2 rows) - ``` - -## Related Pages - -[oracle_fdw](../../developer-guide/extension/foreign-data-wrapper/1-oracle_fdw.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/remove-type-conversion-hint-when-creating-package-function-procedure.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/remove-type-conversion-hint-when-creating-package-function-procedure.md deleted file mode 100644 index 2a3deee0..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/remove-type-conversion-hint-when-creating-package-function-procedure.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Remove Type Conversion Hint When Creating PACKAGE/FUNCTION/PROCEDURE -summary: Remove Type Conversion Hint When Creating PACKAGE/FUNCTION/PROCEDURE -author: Guo Huan -date: 2023-06-16 ---- - -# Remove Type Conversion Hint When Creating PACKAGE/FUNCTION/PROCEDURE - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -When creating a FUNCTION, PROCEDURE, or FUNCTION/PROCEDURE in a PACKAGE, if the type uses source_type%type, remove the NOTICE log for the type conversion. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Syntax Description - -```sql -CREATE FUNCTION f_test(input1 source_type%TYPE ....) -$$ -... -END;$$ -``` - -## Example - -```sql -MogDB=# CREATE OR REPLACE FUNCTION f_processed_in_out_plpgsgl2(out out1 t_processed.val%TYPE, out out2 t_processed.val%TYPE, in1 t_processed.val%TYPE) -MogDB-# AS $$ -MogDB$# BEGIN -MogDB$# SELECT val,val2 INTO out1, out2 FROM t_processed WHERE val = in1 LIMIT 1; -MogDB$# END;$$ -MogDB-# LANGUAGE plpgsql; -CREATE FUNCTION -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-bypass-method-when-merge-into-hit-index.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-bypass-method-when-merge-into-hit-index.md deleted file mode 100644 index b6ad0339..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-bypass-method-when-merge-into-hit-index.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Support Bypass Method When Merge Into Hit Index -summary: Support Bypass Method When Merge Into Hit Index -author: Guo Huan -date: 2023-06-14 ---- - -# Support Bypass Method When Merge Into Hit Index - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -For improved performance, this feature supports the use of the Bypass method when MERGE INTO hits an index to improve the performance of MERGE INTO. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Constraints - -- The reference specified by USING must explicitly return a conditional record (e.g. sys_dummy view). - -- The ON condition must be able to hit the target table index in its entirety (the same way that IndexScan supports Bypass) and can only use deterministic values (e.g., constants or bound variables). - -- ON condition queries for partitioned tables only support the generation of execution plans that are deterministic to a sub-table. - -- MATCHED and NOT MATCHED cannot reference columns in tables (or views) referenced by USING. - -- Updating partitioned fields is not supported with UPDATE. - -## Syntax Description - -```sql -MERGE INTO target_table [ [ AS ] target_alias ] -USING data_source ON join_condition -when_clause; -when_clause is: -{ WHEN MATCHED THEN merge_update | - WHEN NOT MATCHED THEN merge_insert } -``` - -## Example - -```sql --- Prepare data -create table tab_bypass(id integer, name text); -insert into tab_bypass select n,'name'||n from generate_series(1,100000) as foo(n); -create index on tab_bypass(id); -analyze tab_bypass; - --- Check for "[Bypass]" in the output. -explain (costs off) -merge into tab_bypass using sys_dummy on(id=1) -when matched then update set name=name||'matched' -when not matched then insert (id,name) values(1,'name01'); --- A sample output message is as follows - QUERY PLAN ------------------------------------------------- - [Bypass] - Merge on tab_bypass - -> Nested Loop Left Join - -> Result - -> Index Scan using tab_bypass_id_idx on tab_bypass - Index Cond: (id = 1) - - --- Using prepare -prepare mystmt(integer) as -merge into tab_bypass using sys_dummy on(id=$1) -when matched then update set name=name||'matched' -when not matched then insert (id,name) values($1,'name'||$1); - --- Check for "[Bypass]" in the output. -explain (costs off) -execute mystmt(10); --- A sample output message is as follows - QUERY PLAN ------------------------------------------------- - [Bypass] - Merge on tab_bypass - -> Nested Loop Left Join - -> Result - -> Index Scan using tab_bypass_id_idx on tab_bypass - Index Cond: (id = $1) - --- Clear resources -deallocate mystmt; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-for-adding-nocopy-attributes-to-procedure-and-function-parameters.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-for-adding-nocopy-attributes-to-procedure-and-function-parameters.md deleted file mode 100644 index a6bdff78..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-for-adding-nocopy-attributes-to-procedure-and-function-parameters.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Support For Adding Nocopy Attributes To Procedure And Function Parameters -summary: Support For Adding Nocopy Attributes To Procedure And Function Parameters -author: Guo Huan -date: 2023-06-07 ---- - -# Support For Adding Nocopy Attributes To Procedure And Function Parameters - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -Support for the nocopy attribute in the syntax for creating stored procedures, functions, stored procedures in packages and functions. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Description - -This feature adds the NOCOPY keyword to the CREATE FUNCTION and CREATE PROCEDURE syntax. This feature adds the NOCOPY keyword to the CREATE PACKAGE syntax when the package contains a function or procedure. - -## Constraints - -- Compatible syntax only, no specific features. - -## Syntax Description - -``` -CreateFunction ::= CREATE [ OR REPLACE ] FUNCTION function_name - [ ( [ { argname [ argmode ] [NOCOPY] argtype [ { DEFAULT | ':=' | = } expression ]} [, ...] ] ) ] - [ RETURNS rettype [ DETERMINISTIC ] | RETURNS TABLE ( { column_name column_type } [, '...'] )] - LANGUAGE lang_name - [ - {IMMUTABLE | STABLE | VOLATILE } - | {SHIPPABLE | NOT SHIPPABLE} - | WINDOW - | [ NOT ] LEAKPROOF - | {CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT } - | {[ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER | AUTHID DEFINER | AUTHID CURRENT_USER} - | {fenced | not fenced} - | {PACKAGE} - | COST execution_cost - | ROWS result_rows - | SET configuration_parameter { {TO | =} value | FROM CURRENT } - - ]['...'] - { - AS 'definition' - } -``` - -``` -CreateProcedure ::= CREATE [ OR REPLACE ] PROCEDURE procedure_name - [ ( {[ argmode ] [ argname ] [NOCOPY] argtype [ { DEFAULT | ':=' | = } expression ]}[, '...']) ] - [ - { IMMUTABLE | STABLE | VOLATILE } - | { SHIPPABLE | NOT SHIPPABLE } - | {PACKAGE} - | [ NOT ] LEAKPROOF - | { CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT } - | {[ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER | AUTHID DEFINER | AUTHID CURRENT_USER} - | COST execution_cost - | SET configuration_parameter { TO value | = value | FROM CURRENT } - ][ '...' ] - { IS | AS } -plsql_body -``` - -## Example - -```sql -CREATE FUNCTION FUN_NOCOPY_TEST (a IN nocopy INT) RETURNS INT AS -$$ -SELECT a + 1;$$ -LANGUAGE SQL; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-for-constants-in-package-as-default-values.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-for-constants-in-package-as-default-values.md deleted file mode 100644 index 7c086b78..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-for-constants-in-package-as-default-values.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -title: Support For Constants In Package As Default Values -summary: Support For Constants In Package As Default Values -author: Guo Huan -date: 2023-06-14 ---- - -# Support For Constants In Package As Default Values - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -This feature supports package constants as default values for function or procedure entry parameters. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Constraints - -The current MogDB implementation can only read the PACKAGE package constants and assign them to the function in-parameters.In Oracle, if the FUNCTION in-parameter directly references the PACKAGE variable, and the PACKAGE variable type and the FUNCTION in-parameter type don't need to be type-converted, then modifying the value of the PACKAGE variable will affect the value of the FUNCTION in-parameter. The value of the PACKAGE variable will not affect the value of the FUNCTION in-parameter if the in-parameter references a PACKAGE variable expression or if type conversion is required. - -According to Oracle's behavior, if the input parameter of FUNCTION/PROCEDURE is a direct reference to a PACKAGE variable, and the type does not need to be converted, then modifying the value of the PACKAGE variable will affect the value of the FUNCTION input parameter. - -If the input parameter of FUNCTION/PROCEDURE is an expression of the PACKAGE variable, or the type needs to be converted, then the value of the PACKAGE variable will be affected. Changing the value of the PACKAGE variable will not affect the value of the FUNCTION entry. - -## Syntax Description - -The package constants of PACKAGE are referenced after the default value of the function or PROCEDURE entry parameter. - -```SQL -FUNCTION test_func(input p1 DEFAULT pkg_name.aaa) -``` - -Add GUC parameter proc_inparam_immutable, default is true. - -If the current library is in A library compatibility mode and proc_inparam_immutable is true, only IN type can use default value for MogDB input parameter, and the input parameter cannot be modified in FUNCTION/PROCEDURE, which is consistent with ORACLE. Modifying the value of the PACKAGE variable affects the behavior of function inputs consistent with Oracle. - -According to the PG_CAST system table, make a list of the type conversions that are required. - -With the following statement, you can query the list of source_type and target_type that require type conversion. - -```sql -SELECT pt1.typname AS source_type, pt2.typname AS target_type FROM pg_cast pc, pg_type pt1, pg_type pt2 WHERE pc.castsource = pt1.oid AND pc.casttarget = pt2.oid ORDER BY source_type; -``` - -## Example - -```sql -CREATE TABLE tlog( -id number -llevel number -); -CREATE OR REPLACE package pkg_logparam IS - default_level constant tlog.llevel%type :=70; --lerror -END pkg_logparam; -/ -CREATE OR REPLACE package pkg_mplog AS - FUNCTION getLevelInText(pLevel tlog.llevel%type DEFAULT pkg_logparam.default_level) RETURN varchar; -END pkg_mplog; -/ -CREATE OR REPLACE package body pkg_mplog AS - FUNCTION getLevelInText(pLevel tlog.llevel%type DEFAULT pkg_logparam.default_level) RETURN varchar - IS - BEGIN - RETURN pLevel; - END; -END pkg_mplog; -/ -SELECT pkg_mplog.getLevelInText(); - getlevelintext ------------------ - 70 -(1 row) - -CREATE OR REPLACE package test_pkg_paraml IS - v1 varchar2(4000) := 'old'; -- package public variable - v3 constant varchar2(4000) := 'old'; -- Package public constant - PROCEDURE proc(p1 varchar2 DEFAULT v1, - p3 varchar2 := v3 - ); -END; - -CREATE OR REPLACE package body test pkg_paraml IS - v5 varchar2(4000) := 'old';-- package private variable - v7 constant varchar2(4000) := 'old';-- Package private constant - PROCEDURE proc2(p1 varchar2 default v1, - p3 varchar2 default v3, - p5 varchar2 default v5, - p7 varchar2 default v7) IS - BEGIN - raise notice 'v1:%, p1:%', v1, p1; - raise notice 'v3:%, p3:%', v3, p3; - raise notice 'v5:%, p5:%', v5, p5; - raise notice 'v7:%, p7:%', v7, p7; - raise notice 'Modify v5 to new, v1 has been modified previously'; - v5 :='new'; - raise notice 'v1:%, p1:%', v1, p1; - raise notice 'v5:%, p5:%', v5, p5; - END; - PROCEDURE proc(p1 varchar2 DEFAULT v1, p3 varchar2 DEFAULT v3) IS - BEGIN - raise notice '---proc begin'; - raise notice 'v1:%, p1:%', v1, p1; - raise notice 'v3:%, p3:%', v3, p3; - raise notice 'Modify v1 to new'; - v1 := 'new'; - raise notice 'v1:%, p1:%', v1, p1; - raise notice '--- proc1 begin'; - raise notice '--- proc1 begin'; - raise notice '--- proc end'; - END; -END; - -BEGIN - raise notice '---------------------'; - test_pkg_paraml.proc; - raise notice '---------------------'; - test_pkg_paraml.proc('0', '0'); -END; -/ -NOTICE: ---------------------------- -NOTICE: --- proc begin -NOTICE: v1:old, p1:old -NOTICE: v3:old, p3:old -NOTICE: Modify v1 to new -NOTICE: vl:new, p1:old -NOTICE: --- proc1 begin -NOTICE: --- proc1 begin -NOTICE: --- proc end -NOTICE: ---------------------------- -NOTICE: --- proc begin -NOTICE: vl:new, p1:0 -NOTICE: v3:old, p3:0 -NOTICE: Modify v1 to new -NOTICE: vl:new, p1:0 -NOTICE: --- proc1 begin -NOTICE: --- proc1 begin -NOTICE: --- proc end -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-passing-the-count-attribute.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-passing-the-count-attribute.md deleted file mode 100644 index 70ab61af..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-passing-the-count-attribute.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Support For Passing The Count Attribute Of An Array As A Parameter Of The Array Extend -summary: Support For Passing The Count Attribute Of An Array As A Parameter Of The Array Extend -author: Guo Huan -date: 2023-06-14 ---- - -# Support For Passing The Count Attribute Of An Array As A Parameter Of The Array Extend - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -This feature supports passing the count attribute of an array as a parameter to the extend syntax of a procedure that extends an array. - -Currently, there is an extend method for arrays, but this method does not support passing "array.count" as a parameter, so this feature only adds support for passing "array.count" as a parameter without changing the behavior of This feature only adds support for passing the "array.count" attribute as a parameter, and does not change any existing behavior. - -Currently, the extend method of array does not support calling methods with two arguments. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Constraints - -Must conform to "array_var.extend(other_array_var.count)" syntax. Two arrays can be the same variable. - -## Syntax Description - -``` -array_var.extend(array_var.count); -``` - -## Example - -```sql -CREATE PROCEDURE proc_test_typearray_pro IS -type t1 IS record (a int); -type t1_t IS TABLE of t1; -v1 t1_t; -v2 t1_t; -BEGIN - v1[1].a:=1; - v1[2].a:=2; - v2.extend(v1.count); - raise notice '%',v2.count; -END; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-plpgsql-subtype.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-plpgsql-subtype.md deleted file mode 100644 index ab5869ac..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-plpgsql-subtype.md +++ /dev/null @@ -1,188 +0,0 @@ ---- -title: Support PLPGSQL subtype -summary: Support PLPGSQL subtype -author: Guo Huan -date: 2023-06-19 ---- - -# Support PLPGSQL subtype - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -A subtype is a user-defined subtype in Oracle PL/SQL based on an existing type. This feature supports this syntax and usage in PLPGSQL for types that are customized based on existing types. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Constraints - -The `RANGE low_value .. high_value` syntax is not currently supported. - -## Syntax Description - -```sql -SUBTYPE subtype_name IS base_type -  { precision [, scale ] } [ NOT NULL ] -``` - -base_type can be any existing type or user-defined type, e.g. CHAR, DATE, etc. Arrays and series types of PG syntax are not currently supported. - -PG syntax array and series types are not currently supported. subtype is not currently supported, only non-subtype types are supported. - -## Example - -```sql --- Scenario 1: Define subtype inside an anonymous block and use the -DECLARE -subtype varchar2_10 IS varchar2(10); -xx varchar2_10; -BEGIN -xx:='10'; -raise notice '%',xx; -END; -NOTICE: 10 -ANONYMOUS BLOCK EXECUTE - -DECLARE -subtype varchar2_10 IS varchar2(10); -type tyt_varchar2_10 IS TABLE of varchar2_10; -xx tyt_varchar2_10; -BEGIN -xx(1):='10'; -xx(2):='x'; -raise notice '%',xx; -END; -NOTICE: {10,x} -ANONYMOUS BLOCK EXECUTE - --- Scenario 2: Define the subtype inside the procedure and use the -CREATE PROCEDURE test_subtype_proc IS -subtype varchar2_10 IS varchar2(10); -type tyt_varchar2_10 IS TABLE of varchar2_10; -xx tyt_varchar2_10; -aa varchar2_10; -BEGIN -xx(1):='10'; -xx(2):='x'; -aa:='abc'; -raise notice '%-%',xx,aa; -END; - -BEGIN -test_subtype_proc; -END; -NOTICE: {10,x}-abc -ANONYMOUS BLOCK EXECUTE - --- Scenario 3: Defining subtype inside a function and using it -CREATE FUNCTION test_subtype_func RETURN varchar2 IS -subtype varchar2_10 IS varchar2(10); -type tyt_varchar2_10 IS TABLE of varchar2_10; -xx tyt_varchar2_10; -aa varchar2_10; -BEGIN -xx(1):='10'; -xx(2):='x'; -aa:='abc'; -RETURN xx(1)||'-'||xx(2)||'-'||aa; -END; - -SELECT test_subtype_func; - test_subtype_func -------------------- - 10-x-abc -(1 row) - --- Scenario 4: Defining subtype inside a package -CREATE PACKAGE test_subtype_pkg IS -subtype varchar2_10 IS varchar2(10); -type tyt_varchar2_10 IS TABLE of varchar2_10; -END test_subtype_pkg; - --- Scenario 5: Define subtype inside the package and use the procedure inside the package as a parameter type -CREATE PACKAGE test_subtype_pkg1 IS -subtype varchar2_10 IS varchar2(10); -type tyt_varchar2_10 IS TABLE of varchar2_10; -PROCEDURE test_proc(a varchar2_10); -END test_subtype_pkg1; - -CREATE PACKAGE test_subtype_pkg2 IS -subtype varchar2_10 IS varchar2(10); -type tyt_varchar2_10 IS TABLE of varchar2_10; -PROCEDURE test_proc(a test_subtype_pkg2.varchar2_10); -END test_subtype_pkg2; - -CREATE PACKAGE test_subtype_pkg3 IS -subtype varchar2_10 IS varchar2(10); -type tyt_varchar2_10 IS TABLE of varchar2_10; -PROCEDURE test_proc; -END test_subtype_pkg3; -CREATE PACKAGE body test_subtype_pkg3 IS -PROCEDURE test_proc IS -xx varchar2_10; -BEGIN -xx:=3; -END; -END test_subtype_pkg3; - --- Scenario 6: Define subtype in package, call in external procedure -CREATE PACKAGE test_subtype_pkg4 IS -subtype varchar2_10 IS varchar2(10); -type tyt_varchar2_10 IS TABLE of varchar2_10; -END test_subtype_pkg4; - -DECLARE -xx test_subtype_pkg4.varchar2_10; -BEGIN -xx:='10'; -raise notice '%',xx; -END; - -DECLARE -xx test_subtype_pkg4.tyt_varchar2_10; -BEGIN -xx(1):='10'; -xx(2):='x'; -raise notice '%',xx; -END; - --- Scenario 7: subtype default v -DECLARE -SUBTYPE empno IS VARCHAR2(10); -verb empno := 'run'; -BEGIN -null; -END; - --- Scenario 8: Using subtype in the package's proc parameter -CREATE OR REPLACE package test_subtype_pkg5 IS -subtype varchar2_10 IS varchar2(10); -type tyt_varchar2_10 IS TABLE of varchar2_10; -END test_subtype_pkg5; - -CREATE OR REPLACE package tst_pro IS -PROCEDURE tst_pro(a test_subtype_pkg5.varchar2_10); -END tst_pro; - -CREATE OR REPLACE package body tst_pro IS -PROCEDURE tst_pro(a test_subtype_pkg5.varchar2_10) IS  -BEGIN  -null; -END; -END tst_pro; - --- Scenario 9: subtype as an element of record -CREATE OR REPLACE package pki_type as -subtype tstr IS varchar(60); -subtype tstatus IS varchar(3); -type t_data IS record( -cust_no tstr, -cust_flag tstatus); -type t_data_array IS TABLE of t_data INDEX BY binary_integer; -END pki_type; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-q-quote-escape-character.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-q-quote-escape-character.md deleted file mode 100644 index cbc97a31..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-q-quote-escape-character.md +++ /dev/null @@ -1,296 +0,0 @@ ---- -title: Support Q Quote Escape Character -summary: Support Q Quote Escape Character -author: Guo Huan -date: 2023-06-14 ---- - -# Support Q Quote Escape Character - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -This feature is compatible with Oracle's use of `q'` for global single-quote escaping of individual strings, e.g., `SELECT q'{I'm fine}';` - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Constraints - -- Due to JDBC local character checking, JDBC execution of this command is currently not supported for the time being. - -## Syntax Description - -The syntax is one letter q (case insensitive), one single quote, one delimiter, the original string text, one delimiter, and one single quote. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/q-quote.png) - -The `q'` escape character is usually followed by `! [] {} () <>` and other escape symbols are used as delimiters, or you can use `\` , or you can use letters, numbers, =, +, -, *, &, $, %, #, etc., and you can't use spaces, tab keys. - -The delimiter cannot be a non-English, non-numeric, non-special character. If the intermediate string contains a delimiter, you need to make sure that the delimiter is not followed by single quotes. - -The range of delimiters includes special characters, English characters, and numbers. - -Special characters: {},',:,|,[],\,|,<>,? ,=,+,-,_,(),*,&,^,%,$,#,@,! ,~,`,. ,/, including commas, double quotes", single quotes' - -For example, `SELECT q'{I'm }' fine}'` uses `{` as a delimiter, but the intervening string contains `}` and is followed by single quotes, which are not supported by this syntax. - -## Example - -```sql -SELECT q '{I'm fine}'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q ''I'm fine''; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '[I'm fine]'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '\I'm fine\'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q ''; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '?I'm fine?'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q 'aI'm finea'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '2I'm fine2'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '=I'm fine='; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '-I'm fine-'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '(I'm fine)'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '*I'm fine*'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '&I'm fine&'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '^I'm fine^'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '%I'm fine%'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '$I'm fine$'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '#I'm fine#'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '@I'm fine@'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '!I'm fine!'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '~I'm fine~'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '`I'm fine`'; - ?column? ----------- - I 'm fine -( 1 row) - -SELECT q '{'s is good}'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '''s is good''; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '['s is good]'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '\'s is good\'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '<'s is good>'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '?'s is good?'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q 'a's is gooda'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '2's is good2'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '='s is good='; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '-'s is good-'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '('s is good)'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '*'s is good*'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '&'s is good&'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '^'s is good^'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '%'s is good%'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '$'s is good$'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '#'s is good#'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '@'s is good@'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '!'s is good!'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '~'s is good~'; - ?column? ------------- - 's is good -( 1 row) - -SELECT q '`'s is good`'; - ?column? ------------- - 's is good -( 1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-subtracting-two-date-types-to-return-numeric-type.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-subtracting-two-date-types-to-return-numeric-type.md deleted file mode 100644 index e3ae8cc1..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-subtracting-two-date-types-to-return-numeric-type.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Support Subtracting Two Date Types To Return Numeric Type -summary: Support Subtracting Two Date Types To Return Numeric Type -author: Guo Huan -date: 2023-06-06 ---- - -# Support Subtracting Two Date Types To Return Numeric Type - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -This feature supports an operator that subtracts two date types and returns a numeric type that represents the difference in the number of days between the two dates. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Description - -In Oracle, two date type fields are subtracted to get a number, representing the difference in days between the two dates, and if it contains hours, minutes and seconds information, the hours, minutes and seconds are converted to decimals calculated by day. - -MogDB has realized a compatible adaptation for this scenario, which is implemented and used in schema whale, and supports that two date types can be subtracted to return a number. Due to the inherent calculation method , MogDB for the numeric return precision does not do constraints , the maximum can be up to 24 decimal places. - -## Syntax Description - -1. New operator: - - - Left operand type: date - - Right operand type: date - - Return type: numeric - - Function: Subtract two dates to return a numeric result, indicating the number of days between the two dates. - -2. New bool GUC variable enable_date_operator_sub_oracle - - on: indicates that the numeric operator is used to subtract two dates. - - false: indicates that the mogdb original processing is used, and the two dates are subtracted by the Interval operator. - -## Example - -```sql -SET enable_date_operator_sub_oracle = on; - -SELECT '2022-08-09 12:12:01'::date - '2021-08-08'::date; - ?column? --------------------------- - 366.50834490740740740741 -(1 row) - -SET enable_date_operator_sub_oracle = off; - -SELECT '2022-08-09 12:12:01'::date - '2021-08-08'::date; - ?column? -------------------- - 366 days 12:12:01 -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-synonym-calls-without-parentheses-for-function-without-parameters.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-synonym-calls-without-parentheses-for-function-without-parameters.md deleted file mode 100644 index c5ef5488..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-synonym-calls-without-parentheses-for-function-without-parameters.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Support Synonym Calls Without Parentheses For Function Without Parameters -summary: Support Synonym Calls Without Parentheses For Function Without Parameters -author: Guo Huan -date: 2023-06-19 ---- - -# Support Synonym Calls Without Parentheses For Function Without Parameters - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -This feature supports parameterless FUNCTION to be called normally without parentheses after creating a synonym. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Example - -```sql -DROP SCHEMA IF EXISTS stone; -NOTICE:  SCHEMA "stone" does NOT exist, skipping -CREATE SCHEMA stone; -CREATE OR REPLACE FUNCTION stone.f_test RETURN number AS -BEGIN -  RETURN 1; -END; -/ -DROP synonym IF EXISTS syn_f_test; -NOTICE:  synonym "syn_f_test" does NOT exist, skipping -CREATE synonym syn_f_test FOR stone.f_test; --- Calling a function directly is normal -SELECT stone.f_test; - f_test --------- -      1 -(1 row) - --- Using synonym with parentheses is correct -SELECT syn_f_test(); - syn_f_test ------------- -          1 -(1 row) - --- Using synonym without parentheses works fine. -SELECT syn_f_test; - syn_f_test ------------- -          1 -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-table-function.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-table-function.md deleted file mode 100644 index 4841fc17..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-table-function.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Support table() -summary: Support table() -author: Guo Huan -date: 2023-06-14 ---- - -# Support table() - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -table function is used to the collection of multiple columns of the type of table data to return to the results, such as querying the same as the ordinary table query the result of the return of the set. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Constraints - -- table function returns the field, if it is a single column, there is no type of field name, then the query result of the field name is COLUMN_VALUE - -## Syntax Description - -```sql -SELECT * FROM TABLE(array['a','b']); -``` - -```sql -DECLARE -BEGIN -FOR rec IN (SELECT * FROM TABLE(xxx)) LOOP -... -END LOOP; -END; -``` - -## Example - -```sql -SELECT * FROM TABLE( array[ 1, 2, 3]); - column_value --------------- - 1 - 2 - 3 -( 3 rows) - -SELECT TABLE( array[ 1, 2, 3, null, 4, null, null, 5, 6]); - column_value --------------- - 1 - 2 - 3 - - 4 - - - 5 - 6 -( 9 rows) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-to-keep-the-same-name-after-the-end-with-oracle.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-to-keep-the-same-name-after-the-end-with-oracle.md deleted file mode 100644 index a3445238..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-to-keep-the-same-name-after-the-end-with-oracle.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Support To Keep The Same Name After The End With Oracle -summary: Support To Keep The Same Name After The End With Oracle -author: Guo Huan -date: 2023-06-14 ---- - -# Support To Keep The Same Name After The End With Oracle - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -This feature supports PROCEDURE/FUNCTION/PACKAGE after the end of the name syntax and Oracle to maintain consistency, either using the object name, or empty. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Description - -In Oracle, the end of PROCEDURE/FUNCTION/PACKAGE can be followed by the name of the object or empty; however, prior to MogDB 5.0 it was required that the end of PROCEDURE/FUNCTION could not be followed by a name, while the PACKAGE had to be followed by a name, resulting in a significant amount of code migration by manual This resulted in a lot of rewriting when manually migrating the code. - -MogDB 5.0.0 support PROCEDURE/FUNCTION/PACKAGE end after the name syntax and Oracle to maintain consistency with the object name can be used, can also be empty. CREATE PACKAGE body can be after the end of the package_name, can also be empty. - -## Syntax Description - -PROCEDURE/FUNCTION/PACKAGE name after end to be consistent with Oracle - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/end后的name和oracle保持一致1.png) - -Because there are blocks, whether they are functions, procedures, packages, or anonymous blocks, this syntax description does not need to appear separately in the creation syntax of procedures and functions, but is described uniformly here, so it is all optional, and once it is there, it must be consistent with the name. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/end后的name和oracle保持一致2.png) - -## Example - -```sql --- Scenario 1, support for anonymous block end followed by no name -BEGIN -raise notice 'ok' -END; - --- Scenario 2, anonymous block end followed by name -BEGIN -raise notice 'ok' -END test; - --- Scenario 3, support stored procedure end after name inconsistency, check and report error -CREATE OR REPLACE PROCEDURE test_proc_end IS -BEGIN -raise notice '%','ok'; -END test; - --- Scenario 4, support for stored procedures end followed by the correct name -CREATE OR REPLACE PROCEDURE test_proc_end IS -BEGIN -raise notice '%','ok'; -END test_proc_end; - --- Scenario 5, support for stored procedure end without name in package -CREATE OR REPLACE package test_pkg_end IS -PROCEDURE aa; -END; - --- Scenario 6, support for package bodyend followed by no name -CREATE OR REPLACE package body test_pkg_end IS -PROCEDURE aa IS -BEGIN -raise notice 'ok'; -END; -END; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-where-current-of.md b/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-where-current-of.md deleted file mode 100644 index f8283b2e..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/compatibility/support-where-current-of.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: Support Where Current Of -summary: Support Where Current Of -author: Guo Huan -date: 2023-06-12 ---- - -# Support Where Current Of - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -When a cursor is positioned on a table row, it can be updated or deleted using that cursor to identify the row. - -## Benefits - -Enhance MogDB compatibility with Oracle to reduce application migration costs. - -## Description - -Cursor query is a basic simple single-table query statement, for complex statements may be the case of error reporting, especially grouping statements, others also include multi-table join, related subqueries and so on. This feature supports the use of "CURRENT OF cursor" for the WHERE condition of UPDATE or DELETE statement. - -## Constraints - -- The CURRENT OF statement cannot be used in conjunction with any other condition, i.e., the arithmetic condition can only be used in conjunction with this condition. -- In Oracle compatibility mode the cursor must specify "for update". -- When the cursor contains join, Oracle may not be able to delete or update the data (and no error is reported), but MogDB can be modified normally. - -## Syntax Description - -``` -CURRENT OF cursor_name -``` - -## Example - -```sql --- Prepare data -CREATE TABLE uctest(f1 int, f2 text); -INSERT INTO uctest VALUES (1, 'one'), (2, 'two'), (3, 'three'); - --- DELETE WHERE CURRENT OF -START TRANSACTION; -CURSOR c1 FOR SELECT * FROM uctest FOR UPDATE; -FETCH 2 FROM c1; -DELETE FROM uctest WHERE CURRENT OF c1; -COMMIT; - --- Check UPDATE WHERE CURRENT -START TRANSACTION; -CURSOR c1 FOR SELECT * FROM uctest FOR UPDATE; -FETCH c1; -UPDATE uctest SET f1 = 8 WHERE CURRENT OF c1; -COMMIT; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/1-access-control-model.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/1-access-control-model.md deleted file mode 100644 index 179c7926..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/1-access-control-model.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Access Control Model -summary: Access Control Model -author: Guo Huan -date: 2022-05-07 ---- - -# Access Control Model - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -The access control model can be used to manage users' access permissions and grant them the minimum permissions required for completing a task. - -## Benefits - -You can create users and grant permissions to them as needed to minimize risks. - -## Description - -The database provides a role-based access control model and an access control model based on the separation of duties. In the role-based access control model, database roles are classified into system administrator, monitoring administrator, O&M administrator, security policy administrator, and common user. The security administrator creates roles or user groups and grant permissions to roles. The monitoring administrator views the monitoring views or functions in **dbe_perf** mode. The O&M administrator uses the Roach tool to back up and restore the database. The security policy administrator creates resource labels, anonymization policies, and unified audit policies. A user who is assigned a role has the role's permissions. - -In the access control model based on the separation of duties, database roles are classified into system administrator, security administrator, audit administrator, monitoring administrator, O&M administrator, security policy administrator, and common user. The security administrator creates users, the system administrator grants permissions to users, and the audit administrator audits all user behavior. - -By default, the role-based access control model is used. To switch to another mode, set the GUC parameter **enableSeparationOfDuty** to **on**. - -## Enhancements - -None - -## Constraints - -- The permissions of the system administrator are controlled by the GUC parameter **enableSeparationOfDuty**. - -- The database needs to be restarted when the separation of duties is enabled, disabled or switched. In addition, improper user permissions in the new model cannot be automatically identified. The database administrator needs to manually identify and rectify the fault. - -## Dependencies - -None - -## Related Pages - -[Operation Audit](../../reference-guide/guc-parameters/auditing/operation-audit.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/10-row-level-access-control.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/10-row-level-access-control.md deleted file mode 100644 index 9f2fe222..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/10-row-level-access-control.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Row-Level Access Control -summary: Row-Level Access Control -author: Guo Huan -date: 2022-05-07 ---- - -# Row-Level Access Control - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -The row-level access control feature enables database access control to be accurate to each row of data tables. When different users perform the same SQL query operation, the read results may be different. - -## Benefits - -When different users perform the same SQL query operation, the read results may be different. - -## Description - -You can create a row-level access control policy for a data table. The policy defines an expression that takes effect only for specific database users and SQL operations. When a database user accesses the data table, if a SQL statement meets the specified row-level access control policy of the data table, the expressions that meet the specified condition will be combined by using **AND** or **OR** based on the attribute type (**PERMISSIVE** or **RESTRICTIVE**) and applied to the execution plan in the query optimization phase. - -Row-level access control is used to control the visibility of row-level data in tables. By predefining filters for data tables, the expressions that meet the specified condition can be applied to execution plans in the query optimization phase, which will affect the final execution result. Currently, the SQL statements that can be affected include **SELECT**, **UPDATE**, and **DELETE**. - -## Enhancements - -None - -## Constraints - -- Row-level access control policies can be applied only to **SELECT**, **UPDATE**, and **DELETE** operations and cannot be applied to **INSERT** and **MERGE** operations. -- Row-level access control policies can be defined for row-store tables, row-store partitioned tables, column-store tables, column-store partitioned tables, replication tables, unlogged tables, and hash tables. Row-level access control policies cannot be defined for foreign tables, and temporary tables. -- Row-level access control policies cannot be defined for views. -- A maximum of 100 row-level access control policies can be defined for a table. -- Initial users and system administrators are not affected by row-level access control policies. -- If a dynamic data anonymization policy is configured for a table that has the row-level access control policies defined, grant the trigger permission of the table to other users with caution to prevent other users from using the trigger to bypass the anonymization policy. - -## Dependencies - -None - -## Related Pages - -[Row-Level Access Control](../../security-guide/security/2-managing-users-and-their-permissions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/11-password-strength-verification.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/11-password-strength-verification.md deleted file mode 100644 index 0e8ce16e..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/11-password-strength-verification.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Password Strength Verification -summary: Password Strength Verification -author: Guo Huan -date: 2022-05-07 ---- - -# Password Strength Verification - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -Verifies the password strength when users access the database. - -## Benefits - -Users cannot set passwords with low strength to enhance customer data security. - -## Description - -You need to specify a password when initializing a database, creating a user, or modifying a user. The password must meet the strength requirements. Otherwise, the system prompts you to enter the password again. Password complexity requirements: - -- Minimum number of uppercase letters (A-Z) (**password_min_uppercase**) -- Minimum number of lowercase letters (a-z) (**password_min_lowercase**) -- Minimum number of digits (0-9) (**password_min_digital**) -- Minimum number of special characters (**password_min_special**) -- Minimum password length (**password_min_length**) -- Maximum password length (**password_max_length**) -- A password must contain at least three types of the characters (uppercase letters, lowercase letters, digits, and special characters). -- A password is case insensitive and cannot be the username or the username spelled backwards. -- A new password cannot be the current password and the current password spelled backwards. -- It must be a strong password. - - Weak passwords are weak passwords that are easy to crack. The definition of weak passwords may vary with users or user groups. Users can define their own weak passwords. - -If parameter **password_policy** is set to **1**, the default password complexity rule is used to check passwords. - -Passwords in the weak password dictionary are stored in the **gs_global_config** system catalog (the record whose name field is **weak_password** is the stored weak password). When a user is created or modified, the password set by the user is compared with the password stored in the weak password dictionary. If the password is matched, a message is displayed, indicating that the password is weak and the password fails to be set. - -The weak password dictionary is empty by default. You can add or delete weak passwords using the following syntax: - -```sql -CREATE WEAK PASSWORD DICTIONARY WITH VALUES ('password1'), ('password2'); - -DROP WEAK PASSWORD DICTIONARY; -``` - -In the preceding statement, **password1** and **password2** are weak passwords prepared by users. After the statement is executed successfully, the passwords are saved to the weak password system catalog. - -When a user attempts to run the CREATE WEAK PASSWORD DICTIONARY statement to insert a weak password that already exists in the table, only one weak password is retained in the table. - -The DROP WEAK PASSWORD DICTIONARY statement clears weak passwords in the entire system catalog. - -The gs_global_config system catalog does not have a unique index. You are not advised to use the COPY FROM statement to copy the same data to the gs_global_config system catalog. - -To audit weak password operations, set the third bit of the value of the **audit_system_object** parameter to **1**. - -## Enhancements - -In MogDB, the weak password dictionary function is implemented. - -## Constraints - -- Initial users, system administrators, and security administrators can view, add, and delete weak password dictionaries. -- Common users can view but cannot add or delete weak password dictionaries. - -## Dependencies - -None - -## Related Pages - -[CREATE WEAK PASSWORD DICTIONARY](../../reference-guide/sql-syntax/CREATE-WEAK-PASSWORD-DICTIONARY.md) diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/12-equality-query-in-a-fully-encrypted-database.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/12-equality-query-in-a-fully-encrypted-database.md deleted file mode 100644 index cf14afe5..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/12-equality-query-in-a-fully-encrypted-database.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Equality Query in a Fully-encrypted Database -summary: Equality Query in a Fully-encrypted Database -author: Guo Huan -date: 2022-05-07 ---- - -# Equality Query in a Fully-encrypted Database - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The encrypted database aims to protect privacy throughout the data lifecycle. In this way, data is always in ciphertext during transmission, computing, and storage regardless of the service scenario and environment. After the data owner encrypts data on the client and sends the encrypted data to the server, no attacker can obtain valuable information even if the attacker steals user data by exploiting system vulnerabilities. In this way, data privacy is protected. - -## Benefits - -The entire service data flow is in ciphertext during data processing, so the following can be implemented by using a fully-encrypted database: - -1. Protect data privacy and security throughout the lifecycle on the cloud. Attackers cannot obtain valid information from the database server regardless of the data status. -2. Help cloud service providers obtain third-party trust. Users, including service administrators and O&M administrators in enterprise service scenarios and application developers in consumer cloud services, can keep keys by themselves so that users with high permissions cannot obtain valid data. -3. Enable cloud databases to better comply with personal privacy protection laws and regulations with the help of the fully-encrypted database. - -## Description - -From the perspective of users, the encrypted equality query functions are divided into three parts, which are implemented by the newly added KeyTool and the enhanced MogDB gsql client. - -First, this feature provides the client key management function. Users can use KeyTool to generate, destroy, and update CMKs, and import and export keys. With the import and export functions of KeyTool, CMKs can be transmitted between different clients. In addition, the KeyTool implements key management on a single client. By configuring management files, you can store and update keys. - -In addition, this feature provides the key creation and encrypted table creation functions. The SQL syntax CREATE CLINET MASTER KEY and CREATE COLUMN ENCRYPTION KEY are added to record and manage CMK and CEK metadata in the database. The CMK and CEK information is recorded in the new system catalog. The CREATE TABLE syntax is extended to specify a column encryption key and encryption algorithm for each sensitive information column in a table, facilitating subsequent ciphertext data storage. - -This feature supports the encrypted equality query function, which is the core of the entire feature. Although users are unaware of the ciphertext query, the query of sensitive data is restricted by the specifications of the current encrypted equality query. - -From the overall perspective, this feature is used to store and manage data based on sensitive data protection requirements and implement query tasks based on ciphertext data. - -## Enhancements - -None. - -## Constraints - -- Data is encrypted at the column level, and encryption policies cannot be differentiated by row level. -- Except the RENAME operation, the ALTER TABLE syntax cannot be used to change columns in an encrypted table (including the conversion between encrypted and unencrypted columns). The ADD and DROP operations can be used to add and delete encrypted columns, respectively. -- The CHECK(COLUMN IS NOT NULL) syntax can be used, but most check constraint syntax cannot be set for encrypted columns. -- When **support_extended_features** is set to **off**, primary key and unique cannot be used for encrypted columns. When **support_extended_features** is set to **on**, only primary key and unique can be used for encrypted columns. -- Different data types cannot be implicitly converted. -- The set operation cannot be performed between ciphertexts of different data types. -- Range partitioning cannot be created for encrypted columns. -- Only the repeat and empty_blob() functions can be used to encrypt columns. -- The current version supports only gsql and JDBC (deployed on a Linux OS) clients. Other clients such as ODBC do not support encrypted equality query. -- Data can only be imported to the encrypted table by running **copy from stdin**, **\copy**, or **insert into values (…)** on the client. -- Copying an encrypted table to a file is not supported. -- The system does not support encrypted queries, such as sorting, range query, and fuzzy query, except equality query. -- The encrypted syntax of stored procedures for some functions is supported. For details about the constraints, see “Encrypted Functions and Stored Procedures” in the *Developer Guide*. -- Non-encrypted table data cannot be inserted into encrypted table data using the **INSERT INTO… SELECT…** or **MERGE INTO** syntax. -- For a request in connection state, the CEK information change on the server can be detected only after the cache update operation is triggered (for example, the user is changed or the encrypted column fails to be decrypted) and the connection is re-established. -- Encrypted equality query is not supported on columns encrypted using the random encryption algorithm. -- An error is reported if the two attribute conditions used for comparison in the encrypted equality query use different data encryption keys. -- Encrypted equality query is not supported in time series tables and foreign tables. The ustore storage engine is not supported. -- If the database service configuration (such as the pg_settings system catalog, permission, key, and encrypted column) is changed, you need to re-establish a JDBC connection to make the configuration take effect. -- Multiple SQL statements cannot be executed at the same time. This constraint does not apply to the scenario where the INSERT INTO statement is executed in multiple batches. -- The encrypted database does not encrypt empty strings of zero length. -- Deterministic encryption is prone to frequency attacks. Therefore, it is not recommended that deterministic encryption be used in scenarios where the plaintext frequency is obviously distributed. -- Encrypted equality query supports the following data types: - -| Category | Type | Description | -| -------------------- | ------------------ | ------------------------------------------------------------ | -| Integer types | tinyint/tinyint(n) | Tiny integer, which is the same as int1. | -| | smallint | Small integer, which is the same as int2. | -| | int4 | Common integer. | -| | binary_integer | Oracle compatibility type. Generally, the value is an integer. | -| | bigint/bigint(n) | Big integer, which is the same as int8. | -| Numeric data types | numeric(p,s) | A number with the precision **p**. | -| | number | Oracle compatibility type, which is the same as numeric(p,s). | -| Floating point types | float4 | Single-precision floating point. | -| | float8 | Double-precision floating point. | -| | double precision | Double-precision floating point. | -| Character data types | char/char(n) | Fixed-length character string. If the length is insufficient, add spaces. The default precision is **1**. | -| | varchar(n) | Variable-length character string, where **n** indicates the maximum number of bytes. | -| | text | Text type. | -| | varchar2(n) | Oracle compatibility type, which is the same as varchar(n). | -| | clob | Character large object. | -| Binary data types | bytea | Variable-length binary string. | -| | blob | Binary large object. | - -## Dependencies - -None. - -## Related Pages - -[Setting Encrypted Equality Query](../../security-guide/security/4-setting-encrypted-equality-query.md) diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/13-ledger-database-mechanism.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/13-ledger-database-mechanism.md deleted file mode 100644 index 95289c7a..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/13-ledger-database-mechanism.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Ledger Database Mechanism -summary: Ledger Database Mechanism -author: Guo Huan -date: 2022-05-07 ---- - -# Ledger Database Mechanism - -## Availability - -This feature is available since MogDB 2.1.0. - -## Introduction - -The ledger database feature includes adding the verification information to a tamper-proof table specified by a user and recording the user's data operation history. The consistency between the data and operation history is checked to ensure that the user data cannot be maliciously tampered with. When a user performs DML operations on a tamper-proof table, the system adds a small amount of additional row-level verification information to the table and records the SQL statements and data change history. The feature provides a verification API for users to check whether the data in the tamper-proof table is consistent with the operation information recorded by the system. - -## Benefits - -The ledger database provides user data operation records, historical data change records, and easy-to-use consistency verification API to help users check whether sensitive information in the database is maliciously tampered with at any time, effectively improving the tamper-proof capability of the database. - -## Description - -The ledger database uses the ledger schema to isolate common tables from tamper-proof user tables. If a row-store table created in the ledger schema has the tamper-proof attribute, it is a tamper-proof user table. When data is inserted into a tamper-proof user table, the system automatically generates a small amount of row-level verification information. When a user executes DML, the system records user operations in the global blockchain table (GS_GLOBAL_CHAIN) and records data changes in the historical table corresponding to the user table. The data in operation records, data change records, and the user table must be the same. The ledger database provides a high-performance verification API for users to verify data consistency. If the consistency verification fails, the data may be tampered with. In this case, contact the audit administrator to trace the operation history. - -## Enhancements - -None. - -## Constraints - -- In tamper-proof schema, row-store tables are tamper-proofing, whereas temporary tables, unlogged tables, column-store tables, and time series tables are not. -- The structure of the tamper-proof user table cannot be modified. The tamper-proof tables cannot be truncated. The tamper-proof user table cannot be switched to a common schema. The non-tamper-proof table cannot be switched to the tamper-proof schema. -- If the tamper-proof table is a partitioned table, operations such as exchange partition, drop partition and truncate partition are not supported. -- Functions and triggers cannot be used to modify data in a tamper-proof user table. -- When a tamper-proof user table is created, the column named **hash** cannot exist. -- Common users can call the tampering verification API to verify only tables that they have the permission to query. -- Only the audit administrator and initial user can query the global blockchain table and tables in BLOCKCHAIN schema. Common users do not have the permission to access and all users do not have the permission to modify the tables. -- According to the naming rules of historical tables, if the name of the schema or table to be created ends or starts with an underscore (_), the name of the corresponding historical table may conflict with that of an existing table. In this case, you need to rename the table. -- Currently, the hash digest of user row-level data in the ledger database is used only to ensure data consistency. It cannot prevent attackers from directly tampering with data files. - -## Dependencies - -None. - -## Related Pages - -[Setting a Ledger Database](../../security-guide/security/5-setting-a-ledger-database.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/14-transparent-data-encryption.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/14-transparent-data-encryption.md deleted file mode 100644 index 795577e1..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/14-transparent-data-encryption.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Transparent Data Encryption -summary: Transparent Data Encryption -author: Guo Huan -date: 2022-05-07 ---- - -# Transparent Data Encryption - -## Availability - -This feature is available since MogDB 2.1.0. - -## Introduction - -Transparent data encryption (TDE) encrypts data when the database writes the data to the storage medium and automatically decrypts the data when reading the data from the storage medium. This prevents attackers from reading data in the data file without database authentication, solving the static data leakage problem. This function is almost transparent to the application layer. You can determine whether to enable the transparent data encryption function as required. - -## Benefits - -To prevent attackers from reading data files without authentication, you can use the transparent data encryption function to encrypt data files in the database. This ensures that users can read decrypted data only after starting and connecting to the database properly. - -## Description - -The three-layer key structure is used to implement the key management mechanism, including the root key (RK), cluster master key (CMK), and data encryption key (DEK). CMKs are encrypted and protected by RKs, and DEKs are encrypted and protected by CMKs. DEKs are used to encrypt and decrypt user data. Each table corresponds to a DEK. - -Table-level encryption is supported. When creating a table, you can specify whether to encrypt the table and the encryption algorithm to be used. The encryption algorithm can be AES_128_CTR or SM4_CTR, which cannot be changed once specified. If an encrypted table is created, the database automatically applies for a DEK for the table and saves the encryption algorithm, key ciphertext, and corresponding CMK ID in the **reloptions** column of the pg_class system catalog in keyword=value format. - -You can switch an encrypted table to a non-encrypted table or switch a non-encrypted table to an encrypted table. If the encryption function is not enabled when a table is created, the table cannot be switched to an encrypted table. - -For encrypted tables, DEK rotation is supported. After the key rotation, the data encrypted using the old key is decrypted using the old key, and the newly written data is encrypted using the new key. The encryption algorithm is not changed during key rotation. - -## Enhancements - -None. - -## Constraints - -The current version interconnects with HUAWEI CLOUD KMS to support table-level key storage and row-store table encryption. The specifications are as follows: - -- Encryption of a row-store table stored as a heap is supported. -- Column-store encryption, materialized view encryption, and ustore storage engine encryption are not supported. -- Indexes, sequences, Xlogs, MOTs, and system catalogs cannot be encrypted. -- You can specify an encryption algorithm when creating a table. Once specified, the encryption algorithm cannot be changed. If **enable_tde** is set to **on** but the encryption algorithm **encrypt_algo** is not specified when a table is created, the AES-128-CTR encryption algorithm is used by default. -- If the encryption function is not enabled or the encryption algorithm is not specified when a table is created, the table cannot be switched to an encrypted table. -- For a table that has been assigned an encryption key, switching between the encrypted and unencrypted states of the table does not change the key or encryption algorithm. -- Data key rotation is supported only when the table encryption function is enabled. -- Cross-region primary/standby synchronization of multiple copies in a single cluster is not supported. Cross-region scaling of a single cluster is not supported. Cross-region backup and restoration, cluster DR, and data migration are not supported. -- In hybrid cloud scenarios, if the HUAWEI CLOUD KMS and management plane functions are used, transparent data encryption is supported. Other KMS services are not supported if their APIs are incompatible. -- The query performance of encrypted tables is lower than that of non-encrypted tables. If high performance is required, exercise caution when enabling the encryption function. - -## Dependencies - -The key management service is provided by the external KMS. The current version can interconnect with HUAWEI CLOUD KMS. - -## Related Pages - -[Configuring TDE](../../security-guide/security/6-transparent-data-encryption.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/2-separation-of-control-and-access-permissions.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/2-separation-of-control-and-access-permissions.md deleted file mode 100644 index 85fe2a32..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/2-separation-of-control-and-access-permissions.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Separation of Control and Access Permissions -summary: Separation of Control and Access Permissions -author: Guo Huan -date: 2022-05-07 ---- - -# Separation of Control and Access Permissions - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -The control permissions and the access permissions can be separated. - -## Benefits - -The control permissions of database administrators for tables need to be isolated from their access permissions to improve the data security of common users. - -## Description - -If multiple business departments use different database users to perform service operations and a database maintenance department at the same level uses database administrators to perform O&M operations, the business departments may require that database administrators can only perform control operations (**DROP**, **ALTER**, and **TRUNCATE**) and cannot perform access operations (**INSERT**, **DELETE**, **UPDATE**, **SELECT**, and **COPY**) without authorization. That is, the control permissions of database administrators for tables need to be isolated from their access permissions to improve the data security of common users. - -In separation-of-duties mode, a database administrator does not have permissions for the tables in schemas of other users. In this case, database administrators have neither control permissions nor access permissions. This does not meet the requirements of the business departments mentioned above. Therefore, MogDB provides private users to solve the problem. That is, create private users with the **INDEPENDENT** attribute in non-separation-of-duties mode. Users with the CREATEROLE permission or the system administrator permission can create private users or change the attributes of common users to private users. Common users can also change their own attributes to private users. - -```sql -MogDB=# CREATE USER user_independent WITH INDEPENDENT IDENTIFIED BY "1234@abc"; -``` - -System administrators can manage (**DROP**, **ALTER**, and **TRUNCATE**) table objects of private users but cannot access (**INSERT**, **DELETE**, **SELECT**, **UPDATE**, **COPY**, **GRANT**, **REVOKE**, and **ALTER OWNER**) the objects before being authorized. - -## Enhancements - -None - -## Constraints - -For a table owned by a private user, grant the trigger permission of the table to other users with caution to prevent other users from using the trigger to view the data of the private user. - -If permissions related to private user tables are granted to non-private users, the system administrator will obtain the same permissions. - -## Dependencies - -None - -## Related Pages - -[CREATE USER](../../reference-guide/sql-syntax/CREATE-USER.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/3-database-encryption-authentication.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/3-database-encryption-authentication.md deleted file mode 100644 index d7f30ab4..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/3-database-encryption-authentication.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Database Encryption Authentication -summary: Database Encryption Authentication -author: Guo Huan -date: 2022-05-07 ---- - -# Database Encryption Authentication - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The client/server (C/S) mode-based client connection authentication mechanism is provided. - -## Benefits - -The unidirectional, irreversible hash encryption algorithm PBKDF2 is used for encryption and authentication, effectively defending against rainbow attacks. - -## Description - -MogDB uses a basic client connection authentication mechanism. After a client initiates a connection request, the server verifies the information and sends the information required for authentication to the client based on the verification result. The authentication information includes the salt, token, and server signature. The client responds to the request and sends the authentication information to the server. The server calls the authentication module to authenticate the client authentication information. The user password is encrypted and stored in the memory. During the entire authentication process, passwords are encrypted for storage and transmission. When the user logs in to the system next time, the hash value is calculated and compared with the key value stored on the server to verify the correctness. - -## Enhancements - -The message processing flow in the unified encryption and authentication process effectively prevents attackers from cracking the username or password by capturing packets. - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - -[Client Access Authentication](../../security-guide/security/1-client-access-authentication.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/4-data-encryption-and-storage.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/4-data-encryption-and-storage.md deleted file mode 100644 index cde3aa4f..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/4-data-encryption-and-storage.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Data Encryption and Storage -summary: Data Encryption and Storage -author: Guo Huan -date: 2022-05-07 ---- - -# Data Encryption and Storage - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Imported data is encrypted before stored. - -## Benefits - -You can use encrypted import interfaces to encrypt sensitive information and store it in a table. - -## Description - -MogDB provides the encryption functions **gs_encrypt_aes128()** and **gs_encrypt()**, and decryption functions **gs_decrypt_aes128()** and **gs_decrypt()**. Before you import data to a certain column in a table, you can use this function to encrypt the data. The function can be called using a statement in the following format: - -```shell -gs_encrypt_aes128(column, key), gs_encrypt (decryptstr, keystr, decrypttype) -``` - -In the preceding command, **key** indicates the initial password specified by the user, which is used to derive the encryption key. To encrypt an entire table, you need to write an encryption function for each column. - -If a user with the required permission wants to view specific data, the user can decrypt required columns using the decryption function interface **gs_decrypt_aes128(***column***, ***key\***)**. To invoke the interface, run the following command: - -```shell -gs_decrypt_aes128(column, key), gs_decrypt(decryptstr, keystr, decrypttype) -``` - -## Enhancements - -None. - -## Constraints - -None. - -## Dependencies - -None. - -## Related Pages - -[Security Functions](../../reference-guide/functions-and-operators/security-functions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/5-database-audit.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/5-database-audit.md deleted file mode 100644 index 21297b85..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/5-database-audit.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Database Audit -summary: Database Audit -author: Guo Huan -date: 2022-05-07 ---- - -# Database Audit - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -Audit logs record user operations performed on database startup and stopping, as well as connection, DDL, DML, and DCL operations. - -## Benefits - -The audit log mechanism enhances the database capability of tracing unauthorized operations and collecting evidence. - -## Description - -Database security is essential for a database system. MogDB writes all user operations in the database into audit logs. Database security administrators can use the audit logs to reproduce a series of events that cause faults in the database and identify unauthorized users, unauthorized operations, and the time when these operations are performed. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - -[Configuring Database Audit](../../security-guide/security/3-configuring-database-audit.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/6-network-communication-security.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/6-network-communication-security.md deleted file mode 100644 index 8daa5c01..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/6-network-communication-security.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Network Communication Security -summary: Network Communication Security -author: Guo Huan -date: 2022-05-07 ---- - -# Network Communication Security - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -To secure the transmission of sensitive data on the Internet, MogDB encrypts communication between the server and the client using the Secure Socket Layer (SSL) protocol. - -## Benefits - -The communication between your client and the server can be secured. - -## Description - -MogDB supports the SSL protocol. The SSL protocol is an application-layer communication protocol with high security, which is mainly used for secure web transmission. SSL contains a record layer and a transport layer. The record-layer protocol determines the encapsulation format of the transport-layer data. The transport-layer security protocol uses X.509 for authentication. The SSL protocol uses asymmetric encryption algorithms to authenticate the identities of communicating parties, and then the two parties exchange symmetric keys as communication keys. The SSL protocol effectively ensures the confidentiality and reliability of the communication between two applications and prevents the communication between a client and a server from being eavesdropped by attackers. - -MogDB also supports the TLS 1.2 protocol. TLS 1.2 is a transport-layer communication protocol with high security. It consists of the TLS Record and TLS Handshake protocols. Each protocol suit has information in multiple formats. The TLS protocol is independent of application-layer protocols. Upper-layer protocols can be transparently distributed on the TLS protocol. The TLS protocol ensures the data confidentiality and integrity for both communication parties. - -## Enhancements - -Checking the strength of certificate signature algorithms: For low-strength signature algorithms, alarms are reported, reminding you to replace the certificate with another certificate containing a high-strength signature algorithm. - -Checking the certificate validity period: If a certificate is about to expire in less than seven days, an alarm is reported, reminding you to replace the certificate on the client. - -Checking certificate permissions: The certificate permissions are verified at the connection setup stage. - -## Constraints - -The formal certificates and keys for servers and clients shall be obtained from the Certificate Authority (CA). Assume the private key and certificate for a server are **server.key** and **server.crt**, the private key and certificate for the client are **client.key** and **client.crt**, and the CA root certificate is **cacert.pem**. - -You need to enable the SSL protocol and configure the certificate and connection mode. - -## Dependencies - -OpenSSL - -## Related Pages - -[Managing SSL Certificates](../../security-guide/security/1-client-access-authentication.md#managing-ssl-certificates) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/7-resource-label.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/7-resource-label.md deleted file mode 100644 index c17280d3..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/7-resource-label.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Resource Label -summary: Resource Label -author: Guo Huan -date: 2022-05-07 ---- - -# Resource Label - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -Database resources refer to database objects, such as databases, schemas, tables, columns, views, and triggers. The more the database objects are, the more complex the classification management of database resources is. The resource label mechanism is a technology that classifies and labels database resources with certain features to implement resource classification management. After adding labels to some resources in a database, administrators can perform operations such as data audit or anonymization using the labels to implement security management on labeled database resources. - -## Benefits - -Proper resource labels can be used to effectively classify data objects, improve management efficiency, and simplify security policy configuration. To perform unified audit or data anonymization on a group of database resources, the administrator can allocate a resource label to these resources first. The label indicates that the database resources have a certain feature or require unified configuration of a certain policy. The administrator can directly perform operations on the resource label, which greatly reduces the complexity of policy configuration and information redundancy as well as improves management efficiency. - -## Description - -The resource label mechanism selectively classifies resources in the current database. Administrators can use the following SQL syntax to create a resource label and add the label to a group of database resources: - -```sql -CREATE RESOURCE LABEL schm_lb ADD SCHEMA(schema_for_label); -CREATE RESOURCE LABEL tb_lb ADD TABLE(schema_for_label.table_for_label); -CREATE RESOURCE LABEL col_lb ADD COLUMN(schema_for_label.table_for_label.column_for_label); -CREATE RESOURCE LABEL multi_lb ADD SCHEMA(schema_for_label), TABLE(table_for_label); -``` - -**schema_for_label**, **table_for_label**, and **column_for_label** indicate the schema, table, and column to be labeled, respectively. The **schm_lb** label is added to schema **schm_for_label**, **tb_lb** is added to table **table_for_label**, **col_lb** is added to column **column_for_label**, and **multi_lb** is added to schema **schm_for_label** and table **table_for_label**. You can perform unified audit or dynamic data anonymization using the configured resource labels, that is, manage all labeled database resources. - -Currently, resource labels support the following database resource types: schema, table, column, view, and function. - -## Enhancements - -None - -## Constraints - -- Resource labels can be created only by a user with the **POLADMIN** and **SYSADMIN** attributes or an initial user. -- Resource labels cannot be created for temporary tables. -- Columns in the same basic table can belong to only one resource tag. - -## Dependencies - -None - -## Related Pages - -[CREATE RESOURCE LABEL](../../reference-guide/sql-syntax/CREATE-RESOURCE-LABEL.md), [ALTER RESOURCE LABEL](../../reference-guide/sql-syntax/ALTER-RESOURCE-LABEL.md), [DROP RESOURCE LABEL](../../reference-guide/sql-syntax/DROP-RESOURCE-LABEL.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/8-unified-audit.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/8-unified-audit.md deleted file mode 100644 index bbbfebcc..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/8-unified-audit.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Unified Audit -summary: Unified Audit -author: Guo Huan -date: 2022-05-07 ---- - -# Unified Audit - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The audit mechanism is a security management solution that can effectively deal with the attackers' repudiation. The larger the audit scope is, the more operations can be monitored and the more audit logs are generated, affecting the actual audit efficiency. The unified audit mechanism is a technology that implements efficient security audit management by customizing audit policies. After the administrator defines the audit object and audit behaviors, if the task executed by a user is associated with an audit policy, the corresponding audit behavior is generated and the audit log is recorded. Customized audit policies can cover common user management activities, as well as DDL and DML operations, meeting routine audit requirements. - -## Benefits - -Audit is indispensable for routine security management. When a traditional audit mechanism is used to audit an operation, such as **SELECT**, a large number of audit logs are generated, increasing the I/O of the entire system and affecting the system performance and audit efficiency of administrators. The unified audit mechanism allows you to customize policies for generating audit logs. For example, only the operation that database account **A** queries table **a** is audited. Customized audit greatly reduces the number of generated audit logs, ensuring audit behaviors and reducing the impact on system performance. In addition, customized audit policies can improve the audit efficiency of administrators. - -## Description - -The unified audit mechanism customizes audit behaviors based on resource labels and classifies the supported audit behaviors into the **ACCESS** and **PRIVILEGES** classes. The SQL syntax for creating a complete audit policy is as follows: - -```sql -CREATE RESOURCE LABEL auditlabel add table(table_for_audit1, table_for_audit2); -CREATE AUDIT POLICY audit_select_policy ACCESS SELECT ON LABEL(auditlabel) FILTER ON ROLES(usera); -CREATE AUDIT POLICY audit_admin_policy PRIVILEGES ALTER, DROP ON LABEL(auditlabel) FILTER ON IP(local); -``` - -**auditlabel** indicates the resource label in the current audit, which contains two table objects. **audit_select_policy** defines the audit policy for user **usera** to audit the **SELECT** operation on the objects with the **auditlabel** label, regardless of the access source. **audit_admin_policy** defines a local audit policy for **ALTER** and **DROP** operations on the objects with the **auditlabel** label, regardless of the user. If **ACCESS** and **PRIVILEGES** are not specified, all DDL and DML operations on objects with a resource label are audited. If no audit objects are specified, operations on all objects are audited. The addition, deletion, and modification of unified audit policies are also recorded in unified audit logs. - -Currently, unified audit supports the following audit behaviors: - -| **SQL Type** | Supported operations and object types | -| ------------ | ------------------------------------------------------------ | -| DDL | Operations: ALL, ALTER, ANALYZE, COMMENT, CREATE, DROP, GRANT, and REVOKE
SET SHOW
Objects: DATABASE, SCHEMA, FUNCTION, TRIGGER, TABLE, SEQUENCE, FOREIGN_SERVER, FOREIGN_TABLE, TABLESPACE, ROLE/USER, INDEX, VIEW, and DATA_SOURCE | -| DML | Operations: ALL, COPY, DEALLOCATE, DELETE, EXECUTE, REINDEX, INSERT, PREPARE, SELECT, TRUNCATE, and UPDATE | - -## Enhancements - -None. - -## Constraints - -- The unified audit policy must be created by a user with the **POLADMIN** or **SYSADMIN** attribute, or by the initial user. Common users do not have the permission to access the security policy system catalog and system view. - -- The syntax of a unified audit policy applies to either DDL or DML operations. DDL operations and DML operations are mutually exclusive in an audit policy. A maximum of 98 unified audit policies can be configured. - -- Unified audit monitors the SQL statements executed by users on the clients, but does not record the internal SQL statements of databases. - -- In the same audit policy, the same resource tag can be bound to different audit behaviors, and the same behavior can be bound to different resource tags. The ALL operation type includes all operations supported by DDL or DML. - -- A resource label can be associated with different unified audit policies. Unified audit outputs audit information in sequence based on the policies matched by SQL statements. - -- Audit logs of unified audit policies are recorded separately. Currently, no visualized query interfaces are provided. Audit logs depend on the OS service Rsyslog and are archived through the service configuration. - -- In cloud service scenarios, logs need to be stored in the OBS. In hybrid cloud scenarios, you can deploy Elasticsearch to collect CN logs and perform visualized processing. - -- It is recommended that **APP** in **FILTER** be set to applications in the same trusted domain. Since a client may be forged, a security mechanism must be formed on the client when **APP** is used to reduce misuse risks. Generally, you are not advised to set **APP**. If it is set, pay attention to the risk of client spoofing. - -- Taking an IPv4 address as an example, the following formats are supported: - - | IP Address Format | Example | - | -------------------- | ------------------------ | - | Single IP address | 127.0.0.1 | - | IP address with mask | 127.0.0.1\|255.255.255.0 | - | CIDR IP address | 127.0.0.1⁄24 | - | IP address segment | 127.0.0.1-127.0.0.5 | - -## Dependencies - -None. - -## Related Pages - -[CREATE RESOURCE LABEL](../../reference-guide/sql-syntax/CREATE-RESOURCE-LABEL.md), [CREATE AUDIT POLICY](../../reference-guide/sql-syntax/CREATE-AUDIT-POLICY.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/9-dynamic-data-anonymization.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/9-dynamic-data-anonymization.md deleted file mode 100644 index 20a64302..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/9-dynamic-data-anonymization.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Dynamic Data Masking -summary: Dynamic Data Masking -author: Guo Huan -date: 2022-05-07 ---- - -# Dynamic Data Masking - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Data masking is an effective database privacy protection solution, which can prevent attackers from snooping on private data. The dynamic data masking mechanism is a technology that protects privacy data by customizing masking policies. It can effectively prevent unauthorized users from accessing sensitive information while retaining original data. After the administrator specifies the object to be anonymized and customizes a data masking policy, if the database resources queried by a user are associated with a masking policy, data is anonymized based on the user identity and masking policy to restrict attackers' access to privacy data. - -## Benefits - -Data privacy protection is one of the required database security capabilities. It can restrict attackers' access to privacy data, ensuring privacy data security. The dynamic data masking mechanism can protect the privacy of specified database resources by configuring masking policies. In addition, the masking policy configuration is flexible and can implement targeted privacy protection in specific user scenarios. - -## Description - -The dynamic data masking mechanism customizes masking policies based on resource labels. It can select masking modes based on the site requirements or customize masking policies for specific users. The SQL syntax for creating a complete masking policy is as follows: - -```sql -CREATE RESOURCE LABEL label_for_creditcard ADD COLUMN(user1.table1.creditcard); -CREATE RESOURCE LABEL label_for_name ADD COLUMN(user1.table1.name); -CREATE MASKING POLICY msk_creditcard creditcardmasking ON LABEL(label_for_creditcard); -CREATE MASKING POLICY msk_name randommasking ON LABEL(label_for_name) FILTER ON IP(local), ROLES(dev); -``` - -**label_for_creditcard** and **msk_name** are the resource labels for masking, and each label is allocated to two column objects. **creditcardmasking** and **randommasking** are preset masking functions. **msk_creditcard** specifies that the masking policy **creditcardmasking** will be applied when any user accesses resources with **label_for_creditcard**, regardless of the access source. **msk_name** specifies that the masking policy **randommasking** will be applied when local user **dev** accesses resources with **label_for_name**. If **FILTER** is not specified, the setting takes effect for all users. Otherwise, the setting takes effect only for specified users. - -The following table shows the preset masking functions: - -| **Masking Function** | **Example** | -| -------------------- | ------------------------------------------------------------ | -| creditcardmasking | '4880-9898-4545-2525' will be anonymized as 'xxxx-xxxx-xxxx-2525'. This function anonymizes digits except the last four digits. | -| basicemailmasking | 'abcd@gmail.com' will be anonymized as 'xxxx@gmail.com'. This function anonymizes text before the first @. | -| fullemailmasking | 'abcd@gmail.com' will be anonymized as 'xxxx@xxxxx.com'. This function anonymizes text before the first dot (.) (except @). | -| alldigitsmasking | 'alex123alex' will be anonymized as 'alex000alex'. This function anonymizes only digits in the text. | -| shufflemasking | 'hello word' will be anonymized as 'hlwoeor dl'. This weak masking function is implemented through character dislocation. You are not advised to use this function to anonymize strings with strong semantics. | -| randommasking | 'hello word' will be anonymized as 'ad5f5ghdf5'. This function randomly anonymizes text by character. | -| regexpmasking | You need to enter four parameters in sequence. **reg** indicates the character string to be replaced, **replace_text** indicates the character string after replacement, **pos** indicates the position where the target character string starts to be replaced, and **reg_len** indicates the replacement length. Both **pos** and **reg_len** are of the integer type. **reg** and **replace_text** can be expressed by regular expressions. If **pos** is not specified, the default value is **0**. If **reg_len** is not specified, the default value is **–1**, indicating that all character strings after **pos** will be replaced. If the type of the input parameter is inconsistent with the expected parameter type, the maskall function is used for anonymization.
`CREATE MASKING POLICY msk_creditcard regexpmasking('[\d+]', 'x', 5, 9 ) ON LABEL(label_for_creditcard);` | -| maskall | '4880-9898-4545-2525' will be anonymized as 'xxxxxxxxxxxxxxxxxxx'. | - -The data types supported by each masking function are as follows: - -| **Masking Function** | **Supported Data Types** | -| -------------------- | ------------------------------------------------------------ | -| creditcardmasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (character data in credit card format only) | -| basicemailmasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (character data in email format only) | -| fullemailmasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (character data in email format only) | -| alldigitsmasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (character data containing digits only) | -| shufflemasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (text data only) | -| randommasking | BPCHAR, VARCHAR, NVARCHAR, TEXT (text data only) | -| maskall | BOOL, RELTIME, TIME, TIMETZ, INTERVAL, TIMESTAMP, TIMESTAMPTZ, SMALLDATETIME, ABSTIME, TEXT, BPCHAR, VARCHAR, NVARCHAR2, NAME, INT8, INT4, INT2, INT1, NUMRIC, FLOAT4, FLOAT8, CASH | - -For unsupported data types, the **maskall** function is used for data masking by default. The data of the BOOL type is masked as **'0'**. The RELTIME type is masked as **'1970'**. The TIME, TIMETZ, and INTERVAL types are masked as **'00:00:00.0000+00'**. The TIMESTAMP, TIMESTAMPTZ, SMALLDATETIME, and ABSTIME types are masked as **'1970-01-01 00:00:00.0000'**. The TEXT, CHAR, BPCHAR, VARCHAR, NVARCHAR2, and NAME type are masked as **'x'**. The INT8, INT4, INT2, INT1, NUMERIC, FLOAT4, FLOAT8 types are masked as **'0'**. If the data type is not supported by **maskall**, the masking policy cannot be created. If implicit conversion is involved in the masking column, the data type after implicit conversion is used for masking. In addition, if the masking policy is applied to a data column and takes effect, operations on the data in the column are performed based on the masking result. - -Dynamic data masking applies to scenarios closely related to actual services. It provides users with proper masking query APIs and error handling logic based on service requirements to prevent raw data from being obtained through credential stuffing. - -## Enhancements - -None. - -## Constraints - -- The dynamic data masking policy must be created by a user with the **POLADMIN** or **SYSADMIN** attribute, or by the initial user. Common users do not have the permission to access the security policy system catalog and system view. - -- Dynamic data masking takes effect only on data tables for which masking policies are configured. Audit logs are not within the effective scope of the masking policies. - -- In a masking policy, only one masking mode can be specified for a resource label. - -- Multiple masking policies cannot be used to anonymize the same resource label, except when **FILTER** is used to specify user scenarios where the policies take effect and there is no intersection between user scenarios of different masking policies that contain the same resource label. In this case, you can identify the policy that a resource label is anonymized by based on the user scenario. - -- It is recommended that **APP** in **FILTER** be set to applications in the same trusted domain. Since a client may be forged, a security mechanism must be formed on the client when **APP** is used to reduce misuse risks. Generally, you are not advised to set **APP**. If it is set, pay attention to the risk of client spoofing. - -- For INSERT or MERGE INTO operations with the query clause, if the source table contains anonymized columns, the inserted or updated result in the preceding two operations is the anonymized value and cannot be restored. - -- When the built-in security policy is enabled, the ALTER TABLE EXCHANGE PARTITION statement fails to be executed if the source table is in the anonymized column. - -- If a dynamic data masking policy is configured for a table, grant the trigger permission of the table to other users with caution to prevent other users from using the trigger to bypass the masking policy. - -- A maximum of 98 dynamic data masking policies can be created. - -- Only the preceding seven preset masking policies can be used. - -- Only data with the resource labels containing the **COLUMN** attribute can be anonymized. - -- Only columns in base tables can be anonymized. - -- Only the data queried using **SELECT** can be anonymized. - -- Taking an IPv4 address as an example, the following formats are supported: - - | IP Address Format | Example | - | -------------------- | ------------------------ | - | Single IP address | 127.0.0.1 | - | IP address with mask | 127.0.0.1\|255.255.255.0 | - | CIDR IP address | 127.0.0.1⁄24 | - | IP address segment | 127.0.0.1-127.0.0.5 | - -## Dependencies - -None. - -## Related Pages - -[CREATE RESOURCE LABEL](../../reference-guide/sql-syntax/CREATE-RESOURCE-LABEL.md), [CREATE MASKING POLICY](../../reference-guide/sql-syntax/CREATE-MASKING-POLICY.md) diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/database-security.md b/product/en/docs-mogdb/v5.2/characteristic-description/database-security/database-security.md deleted file mode 100644 index 8eaf2905..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/database-security/database-security.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Database Security -summary: Database Security -author: Guo Huan -date: 2023-05-22 ---- - -# Database Security - -+ **[Access Control Model](1-access-control-model.md)** -+ **[Separation of Control and Access Permissions](2-separation-of-control-and-access-permissions.md)** -+ **[Database Encryption Authentication](3-database-encryption-authentication.md)** -+ **[Data Encryption and Storage](4-data-encryption-and-storage.md)** -+ **[Database Audit](5-database-audit.md)** -+ **[Network Communication Security](6-network-communication-security.md)** -+ **[Resource Label](7-resource-label.md)** -+ **[Unified Audit](8-unified-audit.md)** -+ **[Dynamic Data Masking](9-dynamic-data-anonymization.md)** -+ **[Row-Level Access Control](10-row-level-access-control.md)** -+ **[Password Strength Verification](11-password-strength-verification.md)** -+ **[Equality Query in a Fully-encrypted Database](12-equality-query-in-a-fully-encrypted-database.md)** -+ **[Ledger Database Mechanism](13-ledger-database-mechanism.md)** -+ **[Transparent Data Encryption](14-transparent-data-encryption.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/1-support-for-functions-and-stored-procedures.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/1-support-for-functions-and-stored-procedures.md deleted file mode 100644 index cab0122d..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/1-support-for-functions-and-stored-procedures.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Support for Functions and Stored Procedures -summary: Support for Functions and Stored Procedures -author: Guo Huan -date: 2022-05-07 ---- - -# Support for Functions and Stored Procedures - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Functions and stored procedures are important database objects. They encapsulate SQL statement sets used for certain functions so that the statements can be easily invoked. - -## Benefits - -1. Allows customers to modularize program design and encapsulate SQL statement sets, easy to invoke. -2. Caches the compilation results of stored procedures to accelerate SQL statement set execution. -3. Allows system administrators to restrict the permission for executing a specific stored procedure and controls access to the corresponding type of data. This prevents access from unauthorized users and ensures data security. - -## Description - -MogDB supports functions and stored procedures compliant with the SQL standard. The stored procedures are compatible with certain mainstream stored procedure syntax, improving their usability. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - -[Functions and Operators](../../reference-guide/functions-and-operators/functions-and-operators.md), [Stored Procedure](../../developer-guide/1-1-stored-procedure.md),[Overview of PL/pgSQL Functions](../../developer-guide/plpgsql/1-1-plpgsql-overview.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/10-autonomous-transaction.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/10-autonomous-transaction.md deleted file mode 100644 index 06e7e628..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/10-autonomous-transaction.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Autonomous Transaction -summary: Autonomous Transaction -author: Guo Huan -date: 2022-05-07 ---- - -# Autonomous Transaction - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -An autonomous transaction is a type of transaction in which the commit of a sub-transaction is not affected by the commit or rollback of the main transaction. - -## Benefits - -This feature meets diversified application scenarios. - -## Description - -In an autonomous transaction, a specified type of SQL statements are executed in an independent transaction context during the execution of the main transaction. The commit and rollback operations of an autonomous transaction are not affected by the commit and rollback operations of the main transaction. - -User-defined functions and stored procedures support autonomous transactions. - -A typical application scenario is as follows: A table is used to record the operation information during the main transaction execution. When the main transaction fails to be rolled back, the operation information recorded in the table cannot be rolled back. - -## Enhancements - -None - -## Constraints - -- A trigger function does not support autonomous transactions. -- In the autonomous transaction block of a function or stored procedure, static SQL statements do not support variable transfer. -- Autonomous transactions do not support nesting. -- A function containing an autonomous transaction does not support the return value of parameter transfer. -- A stored procedure or function that contains an autonomous transaction does not support exception handling. - -## Dependencies - -None - -## Related Pages - -[Autonomous Transaction](../../developer-guide/autonomous-transaction/1-introduction-to-autonomous-transaction.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/11-global-temporary-table.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/11-global-temporary-table.md deleted file mode 100644 index 5ad5931b..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/11-global-temporary-table.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Global Temporary Table -summary: Global Temporary Table -author: Guo Huan -date: 2022-05-07 ---- - -# Global Temporary Table - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -A temporary table does not guarantee persistency. Its life cycle is usually bound to a session or transaction, which can be used to store temporary data during processing and accelerate query. - -## Benefits - -This feature improves the expression capability and usability of temporary tables. - -## Description - -The metadata of the global temporary table is visible to all sessions. After the sessions end, the metadata still exists. The user data, indexes, and statistics of a session are isolated from those of another session. Each session can only view and modify the data submitted by itself. - -Global temporary tables have two schemas: ON COMMIT PRESERVE ROWS and ON COMMIT PRESERVE ROWS. In session-based ON COMMIT PRESERVE ROWS schema, user data is automatically cleared when a session ends. In transaction-based ON COMMIT DELETE ROWS schema, user data is automatically cleared when the commit or rollback operation is performed. If the **ON COMMIT** option is not specified during table creation, the session level is used by default. Different from local temporary tables, you can specify a schema that does not start with **pg_temp_** when creating a global temporary table. - -## Enhancements - -The processing of the global temporary table is added based on the local temporary table. - -## Constraints - -- Parallel scanning is not supported. -- Temp tablespace is not supported. -- Partitions are not supported. -- GIST indexes are not supported. -- The user-defined statistics **pg_statistic_ext** is not supported. -- ON COMMIT DROP is not supported. -- Hash bucket cluster storage is not supported. -- Row store is not supported. - -## Dependencies - -None - -## Related Pages - -[Global Temporary Table](../../reference-guide/guc-parameters/global-temporary-table.md), [Global Temporary Table Functions](../../reference-guide/functions-and-operators/global-temporary-table-functions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/12-pseudocolumn-rownum.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/12-pseudocolumn-rownum.md deleted file mode 100644 index 54c11da6..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/12-pseudocolumn-rownum.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Pseudocolumn ROWNUM -summary: Pseudocolumn ROWNUM -author: Guo Huan -date: 2022-05-07 ---- - -# Pseudocolumn ROWNUM - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -ROWNUM is a sequence number generated for each record in the query result. The sequence number starts from 1 and is unique. - -## Benefits - -- This feature is compatible with Oracle features, facilitating database migration. -- Similar to the LIMIT feature, this feature can filter out the first *n* records in the result set. - -## Description - -ROWNUM (pseudocolumn), which is used to label the records that meet conditions in the SQL query in sequence. In the query result, the value of **ROWNUM** in the first line is **1**, the value of **ROWNUM** in the second line is **2**, and so on. The value of **ROWNUM** in the _n_th line is *n*. This feature is used to filter the first *n* rows of data in the query result set, which is similar to the LIMIT function in MogDB. - -## Enhancements - -During internal execution, the optimizer rewrites ROWNUM into LIMIT to accelerate the execution speed. - -## Constraints - -- Do not use the pseudocolumn ROWNUM as an alias to avoid ambiguity in SQL statements. -- Do not use ROWNUM when creating an index. Bad example: **create index index_name on table(rownum);** -- Do not use ROWNUM as the default value when creating a table. Bad example: **create table table_name(id int default rownum);** -- Do not use ROWNUM as an alias in the WHERE clause. Bad example: **select rownum rn from table where rn < 5;** -- Do not use ROWNUM when inserting data. Bad example: **insert into table values (rownum,'blue')** -- Do not use ROWNUM in a table-less query. Bad example: **select \* from (values(rownum,1)), x(a,b);** -- If the HAVING clause contains ROWNUM (and is not in the aggregate function), the GROUP BY clause must contain ROWNUM (and is not in the aggregate function). - -## Dependencies - -None - -## Related Pages - -[Simple Expressions](../../reference-guide/sql-reference/expressions/simple-expressions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/13-stored-procedure-debugging.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/13-stored-procedure-debugging.md deleted file mode 100644 index 7ee1c260..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/13-stored-procedure-debugging.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Stored Procedure Debugging -summary: Stored Procedure Debugging -author: Guo Huan -date: 2022-05-07 ---- - -# Stored Procedure Debugging - -## Availability - -This feature was introduced in MogDB 1.1.0. After the third-party library code directory structure was adjusted, this feature was temporarily deleted and is now available since MogDB 1.1.0. - -## Introduction - -This feature provides a group of APIs for debugging stored procedures, such as breakpoint debugging and variable printing. - -## Benefits - -This feature improves user experience in developing stored procedures based on MogDB. - -## Description - -Stored procedures are important database objects. They encapsulate SQL statement sets used for certain functions so that the statements can be easily invoked. A stored procedure usually contains many SQL statements and procedural execution structures, depending on the service scale. However, writing a large stored procedure is usually accompanied by logic bugs. It is difficult or even impossible to find the bugs by only executing the stored procedure. Therefore, a debugging tool is required. - -The stored procedure debugging tool provides a group of debugging APIs to enable the stored procedure to be executed step by step. During the execution, you can set breakpoints and print variables so that SQL developers can detect and correct errors in time and develop functions more efficiently and with high quality. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - -[Debugging](../../developer-guide/plpgsql/1-13-debugging.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/14-jdbc-client-load-balancing-and-readwrite-isolation.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/14-jdbc-client-load-balancing-and-readwrite-isolation.md deleted file mode 100644 index e586adb6..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/14-jdbc-client-load-balancing-and-readwrite-isolation.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: JDBC Client Load Balancing and Read/Write Isolation -summary: JDBC Client Load Balancing and Read/Write Isolation -author: Guo Huan -date: 2022-05-07 ---- - -# JDBC Client Load Balancing and Read/Write Isolation - -## Availability - -This feature is available since MogDB 2.1.0. - -## Introduction - -The JDBC client provides load balancing and read/write isolation capabilities. - -## Benefits - -Load balancing and read/write isolation can be configured on the JDBC client. - -## Description - -The IP addresses and port numbers of multiple nodes on the client are configured to adapt to HA switchover between multiple AZs and remote DR switchover. The connection-level read/write isolation configuration is supported. Preferentially connecting to read-only nodes is supported. Multiple read-only nodes are evenly distributed. - -## Enhancements - -None. - -## Constraints - -None. - -## Dependencies - -None. - -## Related Pages - -[Example: JDBC Primary And Backup Cluster Load Balancing](../../developer-guide/dev/2-development-based-on-jdbc/example-jdbc-primary-and-backup-cluster-load-balancing.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/15-in-place-update-storage-engine.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/15-in-place-update-storage-engine.md deleted file mode 100644 index 7733ab7a..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/15-in-place-update-storage-engine.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: In-place Update Storage Engine -summary: In-place Update Storage Engine -author: Guo Huan -date: 2022-05-07 ---- - -# In-place Update Storage Engine - -## Availability - -This feature is available since MogDB 2.1.0. - -## Introduction - -The in-place update storage engine is a new storage mode added to MogDB. The row storage engine used by the earlier versions of MogDB is in append update mode. The append update has good performance in addition, deletion, and HOT (Heap Only Tuple) update (that is, update on the same page) in the service. However, in a non-HOT UPDATE scenario across data pages, garbage collection is not efficient. The Ustore storage engine can solve this problem. - -## Benefits - -The in-place update storage engine can effectively reduce storage space occupation after tuples are updated for multiple times. - -## Description - -The in-place update storage engine solves the problems of space expansion and large tuples of the Append update storage engine. The design of efficient rollback segments is the basis of the in-place update storage engine. - -## Enhancements - -None. - -## Constraints - -None. - -## Dependencies - -None. - -## Related Pages - -[Configuring Ustore](../../performance-tuning/system-tuning/configuring-ustore.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/16-publication-subscription.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/16-publication-subscription.md deleted file mode 100644 index 3c36b327..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/16-publication-subscription.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Publication-Subscription -summary: Publication-Subscription -author: Guo Huan -date: 2022-05-10 ---- - -# Publication-Subscription - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Publication-subscription is implemented based on logical replication, with one or more subscribers subscribing to one or more publications on a publisher node. The subscriber pulls data from the publications they subscribe to. Data across database clusters can be synchronized in real time. - -## Benefits - -The typical application scenarios of publication-subscription are as follows: - -- Sending incremental changes in a database or a subset of a database to subscribers as they occur -- Firing triggers when changes reach subscribers -- Consolidating multiple databases into a single one (for example, for analysis purposes) - -## Description - -Changes on the publisher are sent to the subscriber as they occur in real time. The subscriber applies the published data in the same order as the publisher, so that transactional consistency is guaranteed for publications within a single subscription. This method of data replication is sometimes called transactional replication. - -The subscriber database behaves in the same way as any other MogDB instance and can be used as a publisher for other databases by defining its own publications. When the subscriber is treated as read-only by an application, there will be no conflicts in a single subscription. On the other side, conflicts may occur if other write operations are performed by the application or by other subscribers in the same set of tables. - -## Enhancements - -In version 3.1.0, this feature is enhanced as follows: - -- gs_probackup can be used to back up the logical replication slot of the publisher. In this way, the replication slot is not lost after the publisher uses gs_probackup to back up and restore data, ensuring that the publication-subscription connections are normal. - -- Publication-subscription can synchronize basic data. Before a publication-subscription relationship is created, data already exists in the table of the publisher. The basic data is synchronized to the subscriber after the subscription is created. - -## Constraints - -Publication-subscription is implemented based on logical replication and inherits all restrictions of logical replication. In addition, publication-subscription has the following additional restrictions or missing functions. - -- Database schemas and DDL commands are not replicated. Initial schemas can be manually copied by using **gs_dump --schema-only**. Subsequent schema changes need to be manually synchronized. -- Sequence data is not replicated. The data in serial or identifier columns backed by the sequence in the background will be replicated as part of the table, but the sequence itself will still display the start value on the subscriber. If the subscriber is used as a read-only database, this is usually not a problem. However, if some kind of switchover or failover to the subscriber database is intended, the sequence needs to be updated to the latest value, either by copying the current data from the publisher (perhaps using **gs_dump**) or by determining a sufficiently large value from the tables themselves. -- Only tables, including partitioned tables, can be replicated. Attempts to replicate other types of relations, such as views, materialized views, or foreign tables, will result in errors. -- Multiple subscriptions in the same database cannot subscribe to the same publication (that is, the same published table). Otherwise, duplicate data or primary key conflicts may occur. -- If a published table contains data types that do not support B-tree or hash indexes (such as the geography types), the table must have a primary key so that UPDATE and DELETE operations can be successfully replicated to the subscription side. Otherwise, the replication will fail, and the message “FATAL: could not identify an equality operator for type xx” will be displayed on the subscription side. - -## Dependencies - -Publication-subscription depends on the logical replication function. - -## Related Pages - -[CREATE PUBLICATION](../../reference-guide/sql-syntax/CREATE-PUBLICATION.md), [CREATE SUBSCRIPTION](../../reference-guide/sql-syntax/CREATE-SUBSCRIPTION.md), [Publication-Subscription](../../developer-guide/logical-replication/publication-subscription/publication-subscription.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/17-foreign-key-lock-enhancement.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/17-foreign-key-lock-enhancement.md deleted file mode 100644 index 20035ec6..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/17-foreign-key-lock-enhancement.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Foreign Key Lock Enhancement -summary: Foreign Key Lock Enhancement -author: Guo Huan -date: 2022-05-10 ---- - -# Foreign Key Lock Enhancement - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Two types of row locks are added, which are extended from share and update locks to key share, share, no key update, and update locks. A non-primary key update obtains a no key update lock, and a row lock obtained by a foreign key trigger is a key share lock. The two types of locks do not conflict with each other, thereby improving concurrency of foreign key locks. - -## Benefits - -Most table update operations are non-primary key updates. This feature effectively reduces the blocking of concurrent updates in scenarios with foreign key constraints and improves efficiency. - -## Description - -When the non-primary key column of a tuple in the parent table is updated, the no key update lock is obtained. When the corresponding tuple in the child table is updated or inserted, the foreign key trigger is triggered to obtain the key share lock of the tuple in the parent table. They do not block each other. - -Because row locks that do not conflict with each other are added, multiple transactions are not composed of only share locks. Instead, there are multiple combinations of different row locks according to the following conflict table. - -| Lock Mode | key share | share | no key update | update | -| ------------- | --------- | ----- | ------------- | ------ | -| key share | | | | X | -| share | | | X | X | -| no key update | | X | X | X | -| update | X | X | X | X | - -## Enhancements - -None. - -## Constraints - -- The new row lock does not support the Ustore table. - -## Dependencies - -None - -## Related Pages - -[LOCK](../../reference-guide/sql-syntax/LOCK.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/18-data-compression-in-oltp-scenarios.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/18-data-compression-in-oltp-scenarios.md deleted file mode 100644 index 83cfa88a..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/18-data-compression-in-oltp-scenarios.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Data Compression in OLTP Scenarios -summary: Data Compression in OLTP Scenarios -author: Guo Huan -date: 2022-05-10 ---- - -# Data Compression in OLTP Scenarios - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -The feature supports row-store data compression in OLTP scenarios, provides a general compression algorithm, and implements transparent compression of data pages and maintenance of page storage locations to achieve high compression and high performance. Disk persistence is implemented using two types of files: compressed address file (with the file name extension .pca) and compressed data file (with the file name extension .pcd). - -## Benefits - -Typically, it is applicable where the database disk space needs to be reduced. - -## Description - -Data compression in OLTP scenarios can reduce the disk storage space of row tables and index data and improve performance in I/O-intensive database systems. - -## Constraints - -- Only heap-organized data table compression is supported, i.e., normal row-storage table, Btree index compression. -- The operating system must support punch hole operations. -- The data backup media must support punch hole operation. -- Do not support the modification of compression-related parameters. Do not support the conversion of uncompressed tables into compressed tables. -- Compression and decompression operations will have a certain impact on CPU and performance. The advantage is that it increases the storage capacity of the disk, improves disk utilization, and saves disk IO and reduces disk IO pressure. - -## Dependencies - -- Requires the database to support double write operations. -- Compression using open source compression algorithms PGLZ, ZSTD. - -## Related Pages - -[Parameters Related to Efficient Data Compression Algorithms](../../reference-guide/guc-parameters/parameters-related-to-efficient-data-compression-algorithms.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/19-transaction-async-submit.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/19-transaction-async-submit.md deleted file mode 100644 index 203b3faf..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/19-transaction-async-submit.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Transaction Asynchronous Submit -summary: Transaction Asynchronous Submit -author: Guo Huan -date: 2022-06-13 ---- - -# Transaction Asynchronous Submit - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -In the thread pool mode of MogDB, a committer thread is added to each thread group, and the session is hung while waiting for the logs to be flushed during the transaction commit process, and the committer thread will continue to commit the transaction after the logs are flushed, while the original worker thread can go to handle other pending sessions after the session is hung. - -## Benefits - -In TP scenario, there is a high demand on the performance of transaction processing under high concurrency. However, the transaction commit process in MogDB needs to wait for the log to drop synchronously, and the worker threads are idle during this period and cannot be used to process other transactions. Although there is an implementation of "synchronous_commit=off" in MogDB, it does not guarantee the integrity of data after the database crashes in the middle. - -This feature can realize the real sense of transaction asynchronous commit, which can make full use of CPU and improve the transaction processing ability in high concurrency scenario, especially in the small query adding, deleting and changing operations. - -## Constraints - -- This feature is valid only in threadpool mode, non-threadpool mode does not support transaction asynchronous commit. That is, set "enable_thread_pool = on" and "synchronous_commit" not to "off". -- The **async_submit** parameter is required to be set to **on**. - -## Related Pages - -[GS_ASYNC_SUBMIT_SESSIONS_STATUS](../../reference-guide/system-catalogs-and-system-views/system-views/GS_ASYNC_SUBMIT_SESSIONS_STATUS.md), [async_submit](../../reference-guide/guc-parameters/MogDB-transaction.md#async_submit), [LOCAL_THREADPOOL_STATUS](../../reference-guide/schema/DBE_PERF/session-thread/LOCAL_THREADPOOL_STATUS.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/2-sql-hints.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/2-sql-hints.md deleted file mode 100644 index a5036048..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/2-sql-hints.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: SQL Hints -summary: SQL Hints -author: Guo Huan -date: 2022-05-07 ---- - -# SQL Hints - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -SQL hints can be used to override execution plans. - -## Benefits - -Improves SQL query performance. - -## Description - -In plan hints, you can specify a join order; join, stream, and scan operations, the number of rows in a result, and redistribution skew information to tune an execution plan, improving query performance. - -## Enhancements - -Support planhint to set session-level optimizer parameters. - -Support specifying subqueries not to be expanded. - -Support disabling gpc for single query. - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - -[Hint Based Tuning](../../performance-tuning/sql-tuning/hint-based-tuning.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/20-copy-import-optimization.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/20-copy-import-optimization.md deleted file mode 100644 index 9fc45df1..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/20-copy-import-optimization.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: COPY Import Optimization -summary: COPY Import Optimization -author: Guo Huan -date: 2022-06-16 ---- - -# COPY Import Optimization - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -COPY is the most used way to import user table data. This feature improves the performance of COPY in the parsing stage by using the SIMD feature of modern CPUs to improve the performance of COPY and the import speed. - -When COPY imports data from a file, it is theoretically a string comparison operation during the parsing phase to find the separator and to determine whether data parsed by CSV/TEXT is legal or not. The SIMD feature supports comparison of multiple strings at one time, thereby reducing the number of branch judgments and then improving performance. - -## Benefits - -The row or column separator is optimized in lookup during COPY parsing using SIMD command. The end users of this feature are general customers, such as database DBAs, software developers, etc. The performance of COPY is increased by 10% to 30%. - -| Number of Data Records | **100000000** | -| ----------------------------------- | ------------- | -| **Total Data Size** | **24 GB** | -| **Average Performance Improvement** | **12.29%** | - -The test results are as follows. - -| Test Sequence | Time Spent with SIMD feature Unused (Second) | Time Spent with SIMD Feature Used (Second) | -| ------------------ | -------------------------------------------- | ------------------------------------------ | -| 1 | 761.01 | 671.05 | -| 2 | 747.06 | 662.60 | -| 3 | 770.22 | 663.03 | -| 4 | 747.940 | 674.03 | -| 5 | 787.22 | 674.13 | -| Average time spent | 762.69 | 668.97 | - -## Constraints - -- Only machines with the x86 architecture, only text and csv files are supported. The following are not supported: escape characters, escape and quote, null value substitution and custom column separators. - -- Because this string comparison instruction value is only supported since SSE4.2, only x86 that supports SSE4.2 can use this optimization. - -The following commands can be used to determine if the machine supports the SSE4.2 command set (logging in as either root or omm user). - -```shell -[omm3@hostname ~]$ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" -SSE 4.2 supported - -[xxxx@hostname ~]$ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not supported" -SSE 4.2 not supported -``` - -The enable_sse42 feature can be enabled or disabled using the following command. - -Log in to the database. - -```shell -[omm3@hostname ~]$ gsql -d postgres -p18000 -gsql ((MogDB 3.0.0 build 945141ad) compiled at 2022-05-28 16:14:02 commit 0 last mr ) -Non-SSL connection (SSL connection is recommended when requiring high-security) -Type "help" for help. -``` - -Enable the enable_sse42 feature. - -```sql -MogDB=# set enable_sse42 to on; -SET -MogDB=# show enable_sse42; -enable_sse42 --------------- - on -(1 row) -``` - -Disable the enable_sse42 feature. - -```sql -MogDB=# set enable_sse42 to off; -SET -MogDB=# show enable_sse42; -enable_sse42 --------------- -off -(1 row) -``` - -## Related Pages - -[COPY](../../reference-guide/sql-syntax/COPY.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/21-dynamic-partition-pruning.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/21-dynamic-partition-pruning.md deleted file mode 100644 index 4ba2d1d8..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/21-dynamic-partition-pruning.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Dynamic Partition Pruning -summary: Dynamic Partition Pruning -author: Guo Huan -date: 2022-06-17 ---- - -# Dynamic Partition Pruning - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -This feature mainly implements the following functions. - -1. Support NEST LOOP for partition pruning. - -2. Support bind variables for partition pruning. - -3. Support subqueries for partition pruning. - -4. Support using EXPLAIN ANALYZE to view the dynamic partition pruning results. - -## Benefits - -This feature mainly optimizes the partition pruning feature, introduces dynamic partition pruning, and supports viewing partition pruning results by EXPLAIN ANALYZE. This feature improves the query performance of partitioned tables by pruning off the unneeded partitions and then scanning the partitioned tables during SQL execution. - -## Related Pages - -[Static Partition Pruning](../../developer-guide/partition-management/partition-pruning/static-partition-pruning.md), [Dynamic Partition Pruning](../../developer-guide/partition-management/partition-pruning/dynamic-partition-pruning.md) diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/22-sql-running-status-observation.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/22-sql-running-status-observation.md deleted file mode 100644 index 3f462dcf..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/22-sql-running-status-observation.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: SQL Running Status Observation -summary: SQL Running Status Observation -author: Guo Huan -date: 2022-06-17 ---- - -# SQL Running Status Observation - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -The sampling thread is sampled once in 1s by default, which can be controlled by the GUC parameter asp_sample_interval, and up to 100000 lines of data are sampled in memory, which is controlled by asp_sample_num, and will be flushed to a new disk when the upper limit is reached. - -## Benefits - -When a performance bottleneck is found in a SQL statement and the execution of each operator of that SQL cannot be queried in the sampled view, you can locate the performance problem by plan_node_id. - -Operator: the specific action of each step in the execution of the SQL statement (e.g. SELECT, SUM, WHERE, Group By, Having, Order By, Limit, etc.) - -## Description - -A new column plan_node_id is added to [dbe_perf.local_active_session](../../reference-guide/schema/DBE_PERF/session-thread/LOCAL_ACTIVE_SESSION.md) and [GS_ASP](../../reference-guide/system-catalogs-and-system-views/system-catalogs/GS_ASP.md) to record the execution of each operator of the SQL statement. - -The existing monitoring level is defined by the GUC parameter [resource_track_level](../../reference-guide/guc-parameters/load-management.md#resource_track_level), which has three values according to the level, namely - -- **none**: Resources are not monitored. -- **query**: Resources used at the query level are monitored. -- **operator**: Resources used at query and operator levels are monitored. - -So each operator of the SQL statement is sampled only if the resource_track_level is set to operator. - -MogDB will start a background worker sampling thread after being started. In order to avoid wasting resources, this sampling thread will not sample all the time, but sample MogDB every one sampling period, collect the snapshot of MogDB running at that time and save it in memory. dbe_perf.local_active_session can query the real-time sampling information. The sampling period is defined by the GUC parameter [asp_sample_interval](../../reference-guide/guc-parameters/system-performance-snapshot.md#asp_sample_interval), and the default sample period is 1s. MogDB will flush the sampled data in memory to the GS_ASP table for historical query when 100000 rows (controlled by guc parameter [asp_sample_num](../../reference-guide/guc-parameters/system-performance-snapshot.md#asp_sample_num)) are sampled in memory every time. Only when the statement execution time is greater than the sampling time, the running information will be collected by the sampling thread. - -## Scenarios - -1. Create the table test in session1 and perform the insert operation. - - ```sql - MogDB=# create table test(c1 int); - CREATE TABLE - MogDB=# insert into test select generate_series(1, 1000000000); - ``` - -2. In session2, look up the query_id of the SQL from the active session view - - ```sql - MogDB=# select query,query_id from pg_stat_activity where query like 'insert into test select%'; - query | query_id - -----------------------------------------------------------+----------------- - insert into test select generate_series(1, 100000000000); | 562949953421368 - (1 row) - ``` - -3. In session2, according to the query_id from the active job management view to query the statement with plan_node_id execution plan (the statement execution cost needs to be greater than the GUC value **resource_track_cost** to be recorded in the view, the default value of the GUC parameter is 100000, session level can be updated, so in order to facilitate testing, you can change the value to 10 in the test) - - Set resource_track_cost=10; - - ```sql - MogDB=# select query_plan from dbe_perf.statement_complex_runtime where queryid = 562949953421368; - query_plan - ---------------------------------------------------------------------------- - Coordinator Name: datanode1 + - 1 | Insert on test (cost=0.00..17.51 rows=1000 width=8) + - 2 | -> Subquery Scan on "*SELECT*" (cost=0.00..17.51 rows=1000 width=8) + - 3 | -> Result (cost=0.00..5.01 rows=1000 width=0) + - + - (1 row) - ``` - -4. In session2, the sampling of the statement is queried from the sampling view dbe_perf.local_active_session based on the query_id, and the performance analysis is done in conjunction with the execution plan of the above query. - - ```sql - MogDB=# select plan_node_id, count(plan_node_id) from dbe_perf.local_active_session where query_id = 562949953421368 group by plan_node_id; - plan_node_id | count - --------------+------- - 3 | 12 - 1 | 366 - 2 | 2 - (3 rows) - ``` - -5. In session2, when the memory data reaches the upper limit (controlled by the GUC parameter **asp_sample_num**), the existing memory sampling data will be flushed to the gs_asp table, and the data sampled using the statement can be queried from the gs_asp table after the flush. - - ```sql - MogDB=# select plan_node_id, count(plan_node_id) from gs_asp where query_id = 562949953421368 group by plan_node_id; - plan_node_id | count - --------------+------- - 3 | 19 - 1 | 582 - 2 | 3 - - (3 rows) - ``` - -## Conclusion - -When it is found that there is a performance bottleneck in `insert into test select generate_series(1, 1000000000)`, the above steps locate that the insert operation is sampled with the highest value (plan_node_id =1 , count=366) during the whole SQL statement execution, which can be optimized. - -## Related Pages - -[GS_ASP](../../reference-guide/system-catalogs-and-system-views/system-catalogs/GS_ASP.md), [LOCAL_ACTIVE_SESSION](../../reference-guide/schema/DBE_PERF/session-thread/LOCAL_ACTIVE_SESSION.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/23-index-creation-parallel-control.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/23-index-creation-parallel-control.md deleted file mode 100644 index ed142659..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/23-index-creation-parallel-control.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Index Creation Parallel Control -summary: Index Creation Parallel Control -author: Guo Huan -date: 2022-06-17 ---- - -# Index Creation Parallel Control - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -This feature supports specifying the degree of parallelism directly in the index creation statement, making more efficient use of resources while increasing usage flexibility. - -## Benefits - -This feature adds the option to specify the degree of parallelism in the index creation syntax, so that DBAs and delivery testers can control the degree of concurrency of index creation from the syntax level to achieve optimal execution. - -## Description - -Add parallel syntax when creating indexes to control the parallelism of index creation. - -## Related Pages - -[CREATE INDEX](../../reference-guide/sql-syntax/CREATE-INDEX.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/24-brin-index.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/24-brin-index.md deleted file mode 100644 index a504990b..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/24-brin-index.md +++ /dev/null @@ -1,261 +0,0 @@ ---- -title: BRIN Index -summary: BRIN Index -author: Guo Huan -date: 2022-06-20 ---- - -# BRIN Index - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -MogDB 3.0.0 adds BRIN index feature. - -A BRIN index is a block range index. Unlike other indexes, the BRIN index allows you to quickly exclude rows that do not satisfy the query criteria. - -## Benefits - -- Sequential scans will be fast, then statistical SQL performance against large tables will be dramatically improved. -- Creating indexes is very fast. -- Indexes take up very little space. - -## Description - -Block Range INdex is short for BRIN index. Unlike other indexes, the idea of a BRIN index is to quickly exclude rows that do not meet the query criteria, rather than quickly find matching rows. - -The way a BRIN index works: The block of a table is divided into some intervals, and the index stores summary information (usually min and max information, and some other information for spatial data) for each interval. If the value of the column to be queried does not fall into the summary information of this interval, then the interval can be skipped. Otherwise, all rows of this interval need to be scanned. - -The BRIN index is suitable for columns where the data is correlated with the physical location. The optimizer uses this value to make a choice between an index scan and a bitmap scan. We can also use it to estimate how well the BRIN index fits. The closer the column position correlation is to 1, the more correlated the column data is and the more suitable for building a BRIN index. Also BRIN indexes are mainly designed for large tables. Compared with indexes like btree, BRIN indexes have less data volume. - -## Constrains - -- BRIN indexes are not supported in B-compatible mode databases. - -## Scenarios - -### Create a BRIN Index - -The syntax is the same as that for creating B-trees, hash, GiST, SP-GiST, and GINs. - -The parameters pages_per_range and autosummarize can be specified when creating a BRIN index. - -pages_per_range: specifies how many pages are contained in each range in the BRIN index. The range is 1 ~ 131072. if it is not specified, the default value is 128. - -autosummarize: specifies whether to automatically create indexes for unindexed data pages in the table. The default value is **off**. - -```sql -CREATE INDEX brinidx ON tbbrin USING brin(i1,i2,i3) WITH (pages_per_range=64, autosummarize=off); ---Online Create -CREATE INDEX CONCURRENTLY brinidx ON tbbrin USING brin(i1,i2,i3) WITH (pages_per_range=64); -``` - -### Recreate a BRIN Index - -The syntax is the same as that for recreating B-tree, hash, GiST, SP-GiST, and GINs. - -```sql -REINDEX INDEX brinidx ; ---Online reindex -REINDEX INDEX CONCURRENTLY brinidx ; -``` - -### Alter a BRIN Index - -BRIN supports modifying the pages_per_range and autosummarize parameters. After the alter command is executed, only the metadata is updated, and the parameters set by the reindex command need to be executed to take effect. - -Example: - -```sql -alter index idx set(pages_per_range=64); -reindex index idx; -``` - -### View the Execution Plan of the BRIN Index - -Example: - -```sql -MogDB=# explain select * from example where id = 100; - QUERY PLAN ---------------------------------------------------------- - Bitmap Heap Scan on example (cost=15.88..486.21 rows=500 width=4) - Recheck Cond:(id = 100) - -> Bitmap Index Scan on idx (cost=0.00..15.75 rows=500 width=0) - Index Cond:(id =100) -(4 rows) -``` - -### Manually Update a BRIN Index - -It may happen that some data pages do not appear in the index during the use of the BRIN index. You can update the BRIN index manually by the following two ways. - -- Perform a vacuum operation on a table. - -- Execute the brin_summarize_new_values(oid) function. The input is the ID of the BRIN index. If the return value is 0, then the index is not updated. If it returns 1, then the index has been updated. - -Example: - -```sql -SELECT brin_summarize_new_values((select oid from pg_class where relname='brinidx')::oid); -``` - -The index can be checked for updates by the following actions. - -- View the total block size of the table from pg_class. - -- View the page numbers of the indexes that have been created based on brin_revmap_data. - -- Calculate if the difference between the previous 2 items is greater than pages_per_range. If it is, the index needs to be updated. - -### autosummarize for BRIN index - -autosummarize is a switch for whether to automatically create indexes for data pages in the table that do not appear in the index. - -You can test if autosummarize is invalid by following these steps. - -1. Update the table data. - -2. Query the table for updating the last_autovacuum field via pg_stat_user_table, e.g. - - `````sql - MogDB=# select relname,last_vacuum,last_autovacuum from pg_stat_user_tables where relname = 'person'; - relname | last_vacuum | last_autovacuum - ---------+-------------------------------+----------------- - person | 2022-06-20 19:21:58.201214+08 | - (1 row) | - ````` - - The following two commands can be executed to speed up the autovacuum frequency of the table. - - ```sql - ALTER TABLE example SET (autovacuum_vacuum_scale_factor = 0.0); - ALTER TABLE example SET (autovacuum_vacuum_threshold = 100); - ``` - -3. After the pg_stat_user_table table is updated, you can observe the automatic update of index data. - - ```sql - MogDB=# select relname,last_vacuum,last_autovacuum from pg_stat_user_tables where relname = 'person'; - relname | last_vacuum | last_autovacuum - ---------+-------------------------------+----------------- - person | 2022-06-20 19:23:58.201214+08 | 2022-06-20 19:24:59.201214+08 - (1 row) - ``` - -### View BRIN Index Via pageinspect - -For the BRIN index of the table, you can query the page data by pageinspect. - -The pageinspect module provides functions that allow viewing the contents of database data pages or index pages, which can be useful for debugging or locating problems. pageinspect tool is installed in the same way, by executing the following command during installation. - -````sql -create extension pageinspect; -```` - -pageinspect only supports non-partitioned tables for now. - -The BRIN index provides three functions for querying data in the meta page, revmap_page and regular page respectively. - -- brin_metapage_info(page bytea) returns record - - Returns information about the classification of the BRIN index metapage, where the second parameter is fixed to 0. e.g. - - ```sql - MogDB=# SELECT * FROM brin_metapage_info(get_raw_page('brinidx', 0)); - magic | version | pagesperrange | lastrevmappage - ------------+---------+---------------+---------------- - 0xA8109CFA | 1 | 4 | 2 - ``` - -- brin_revmap_data(page bytea) returns setof tid - - Returns the list of tuple identifiers in the BRIN index range mapping page. e.g. - - ```sql - MogDB=# SELECT * FROM brin_revmap_data(get_raw_page('brinidx', 2)) limit 5; - pages - --------- - (6,137) - (6,138) - (6,139) - (6,140) - (6,141) - ``` - -- brin_page_items(page bytea, index oid) returns setof record - - Returns the data stored in the BRIN data page. e.g. - - ```sql - MogDB=# SELECT * FROM brin_page_items(get_raw_page('brinidx', 5), - 'brinidx') - ORDER BY blknum, attnum LIMIT 6; - itemoffset | blknum | attnum | allnulls | hasnulls | placeholder | value - ------------+--------+--------+----------+----------+-------------+-------------- - 137 | 0 | 1 | t | f | f | - 137 | 0 | 2 | f | f | f | {1 .. 88} - 138 | 4 | 1 | t | f | f | - 138 | 4 | 2 | f | f | f | {89 .. 176} - 139 | 8 | 1 | t | f | f | - 139 | 8 | 2 | f | f | f | {177 .. 264} - ``` - -## Examples - -```sql -# Create a test table -MogDB=# CREATE TABLE testtab (id int NOT NULL PRIMARY KEY,date TIMESTAMP NOT NULL, level INTEGER, msg TEXT); -NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "testtab_pkey" for table "testtab" -CREATE TABLE -# Insert test data -MogDB=# INSERT INTO testtab (id, date, level, msg) SELECT g, CURRENT_TIMESTAMP + ( g || 'minute' ) :: interval, random() * 6, md5(g::text) FROM generate_series(1,8000000) as g; -INSERT 0 8000000 -# If you look at the execution plan of a query statement without creating an index, you can see that the execution plan uses seq scan -MogDB=# explain analyze select * from public.testtab where date between '2019-08-08 14:40:47.974791' and '2019-08-08 14:50:47.974791'; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------- - Seq Scan on testtab (cost=0.00..212024.20 rows=43183 width=48) (actual time=46620.314..46620.314 rows=0 loops=1) - Filter: (("date" >= '2019-08-08 14:40:47.974791'::timestamp without time zone) AND ("date" <= '2019-08-08 14:50:47.974791'::timestamp without time zone)) - Rows Removed by Filter: 8000000 - Total runtime: 46620.580 ms -(4 rows) -# Create a brin index on the table -MogDB=# create index testtab_date_brin_idx on testtab using brin (date); -CREATE INDEX -# Looking at the index information of the brin index, you can see that the size of the brin index is about 64 kB -MogDB=# \di+ testtab_date_brin_idx  - List of relations - Schema | Name | Type | Owner | Table | Size | Storage | Description ---------+------------------------+-------+--------+---------+-------+---------+------------- - public | testtab_date_brin_idx  | index | wusong | testtab | 64 kB | | -(1 row) -# Looking at the execution plan of the same query statement, you can see that the query uses the brin index -MogDB=# explain analyze select * from public.testtab where date between '2019-08-08 14:40:47.974791' and '2019-08-08 14:50:47.974791'; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - Bitmap Heap Scan on testtab (cost=20.00..24.02 rows=1 width=49) (actual time=1.121..1.121 rows=0 loops=1) - Recheck Cond: (("date" >= '2019-08-08 14:40:47.974791'::timestamp without time zone) AND ("date" <= '2019-08-08 14:50:47.974791'::timestamp without time zone)) - -> Bitmap Index Scan on "testtab_date_brin_idx " (cost=0.00..20.00 rows=1 width=0) (actual time=1.119..1.119 rows=0 loops=1) - Index Cond: (("date" >= '2019-08-08 14:40:47.974791'::timestamp without time zone) AND ("date" <= '2019-08-08 14:50:47.974791'::timestamp without time zone)) - Total runtime: 1.281 ms -(5 rows) -# Create a btree index on the date column of the table -MogDB=# create index testtab_date_idx  on testtab(date); -CREATE INDEX -# Looking at the size of the btree index, we can see that the size of the btree index is about 172 MB, much larger than the 64 KB of the brin index -MogDB=# \di+ testtab_date_idx  - List of relations - Schema | Name | Type | Owner | Table | Size | Storage | Description ---------+-------------------+-------+--------+---------+--------+---------+------------- - public | testtab_date_idx  | index | wusong | testtab | 172 MB | | -(1 row) - -``` - -## Related Pages - -[CREATE INDEX](../../reference-guide/sql-syntax/CREATE-INDEX.md), [DROP INDEX](../../reference-guide/sql-syntax/DROP-INDEX.md), [ALTER INDEX](../../reference-guide/sql-syntax/ALTER-INDEX.md), [VACUUM](../../reference-guide/sql-syntax/VACUUM.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/25-bloom-index.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/25-bloom-index.md deleted file mode 100644 index 6c4b8429..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/25-bloom-index.md +++ /dev/null @@ -1,215 +0,0 @@ ---- -title: BLOOM Index -summary: BLOOM Index -author: Guo Huan -date: 2022-06-21 ---- - -# BLOOM Index - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Bloom provides a Bloom filter-based approach to index access. A Bloom filter is a spatially efficient data structure that is used to test whether an element is a member of a collection. In the case of the index access method, it can quickly exclude non-matching meta-ancestors by sizing the signature decided at index creation. - -## Benefits - -Bloom indexes are suitable for scenarios where there are many columns in a table and the query can use any combination of columns. For traditional indexes, such as Btree in this scenario may need to build many indexes to cover possible query conditions, which may lead to a large index space and affect the performance of insertion and update, while Bloom indexes only need to build one to cope with it. - -## Description - -Users can query through Bloom index in the applicable scenario of Bloom index, and also reduce the size of index space occupied compared with Btree index. - -## Constrains - -- BLOOM indexes are not supported in B-compatible mode databases. - -## Scenarios - -Blooms indexes are most useful when the table has many attributes and the query may test any combination of them. Traditional btree indexes will be faster than Bloom indexes, but many btree indexes are needed to support all possible queries, while for Bloom only one is needed. - -### Create Bloom Index - -#### Constraints - -- Bloom index only supports being created on row-stored table. - -- The column type for index creation can only be 4-byte-length int type or variable-length string type. 4-byte-length int type in MogDB can be int, int4 and integer, and variable-length string type can be varchar, text and clob. - -- Bloom does not support creating unique indexes. - -- null values cannot be indexed and null values are skipped. -- Only equal-value queries are supported. -- Partitioned table Global indexes are not supported. - -#### Example - -```sql -CREATE INDEX ON USING bloom(col1,col2...) with (length=80,col1=2,col2=4); -``` - -#### Parameters - -**length** - -Specifies how many bits are used to represent the signature generated in an index. The default value is 80, and the user-specified value is internally rounded up to an integer multiple of 16 (transparent to the user), with a minimum value of 1 and a maximum value of 4096. If the set value is not within the limit, the command execution reports an error and indicates the correct range of the value. - -**col1-col32** - -Specifies how many bits are used to represent each index column. The default value is 2, the minimum value is 1, and the maximum value is 4095. If the set value is not within the limit, the command execution reports an error and prompts for the correct range of values. - -If Bloom index is created for non-row-stored tables, an error is reported, and it is prompted that it cannot be created for non-row-stored tables. - -If a unique index is created for Bloom, an error is reported indicating that the unique index cannot be created. - -If a Global index for a partitioned table with index type Bloom is created, an error is reported, indicating that the partitioned table Global index can only be of type Btree. - -### Delete Bloom Index - -Example: - -```sql -MogDB=# DROP INDEX bloom_idx; -DROP INDEX -``` - -### Reindex Bloom Index - -This feature supports rebuilding a Bloom index that already exists. The user issues the command to rebuild Bloom index through the client or database driver, the index is finally rebuilt successfully, the index metadata is correct, and the index can be used normally (e.g., if the original execution plan goes to Bloom index, the execution plan can still use the rebuilt Bloom index after the index is rebuilt). - -Example: - -```sql -MogDB=# REINDEX INDEX bloom_idx; -REINDEX -``` - -### Alter Bloom Index - -This feature supports modifying the attributes of an existing Bloom index. The constraints related to the index attributes are consistent with those at the time of index creation, e.g. the modified length of a Bloom index needs to be in the range of [1,4096]. - -Key points: - -- Modified index attributes need to conform to indexing rules and constraints, and if they do not, an error needs to be reported, returning the corresponding prompt message. -- Renaming the index and verifying whether the index metadata is correct in pg_class. -- Modify the length or col attribute of Bloom index. If you want to verify whether the attribute is effective, you can simply judge by the change of the value of the relpages field in pg_class, or by SELECT pg_size_pretty(pg_relation_size('blidx ')) and other commands to see the change in index size. -- To modify tablespace, you can confirm whether it takes effect by viewing the data file on disk. - -Example: - -```sql -MogDB=# ALTER INDEX IF EXISTS bloom_idx RENAME TO newbloom_idx; -ALTER INDEX -MogDB=# SELECT oid,relname,relfilenode,relpages FROM pg_class WHERE relname = 'newbloom_idx'; - oid | relname | relfilenode | relpages --------+--------------+-------------+---------- - 41159 | newbloom_idx | 41160 | 30 -(1 row) -MogDB=# ALTER INDEX IF EXISTS newbloom_idx SET (length=160); -ALTER INDEX -MogDB=# REINDEX INDEX newbloom_idx; -REINDEX -MogDB=# SELECT oid,relname,relfilenode,relpages FROM pg_class WHERE relname = 'newbloom_idx'; - oid | relname | relfilenode | relpages --------+--------------+-------------+---------- - 41159 | newbloom_idx | 41162 | 49 -(1 row) -``` - -### Querying with Bloom Index - -When a Bloom index exists on the table and the query conditions match the applicable conditions of the Bloom index, the execution plan will show that the Bloom index has been used. - -Example: - -```sql -CREATE TABLE tbloom AS - SELECT - (random() * 1000000)::int as i1, - (random() * 1000000)::int as i2, - (random() * 1000000)::int as i3, - (random() * 1000000)::int as i4, - (random() * 1000000)::int as i5, - (random() * 1000000)::int as i6 - FROM - generate_series(1,10000000); - -CREATE INDEX btreeidx ON tbloom (i1, i2, i3, i4, i5, i6); -CREATE INDEX bloomidx ON tbloom USING bloom (i1, i2, i3, i4, i5, i6); - -MogDB=# EXPLAIN ANALYZE SELECT * FROM tbloom where i3 = 100 AND i5 = 1000; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- - Bitmap Heap Scan on tbloom (cost=178436.31..179393.23 rows=250 width=24) (actual time=138.209..138.209 rows=0 loops=1) - Recheck Cond: ((i3 = 100) AND (i5 = 1000)) - Rows Removed by Index Recheck: 21936 - Heap Blocks: exact=18673 - -> Bitmap Index Scan on bloomidx (cost=0.00..178436.25 rows=250 width=0) (actual time=85.681..85.681 rows=21936 loops=1) - Index Cond: ((i3 = 100) AND (i5 = 1000)) - Total runtime: 138.412 ms -(7 rows) - -MogDB=# EXPLAIN ANALYZE SELECT * FROM tbloom where i1 = 100 AND i2 = 1000; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------ - [Bypass] - Index Only Scan using btreeidx on tbloom (cost=0.00..8.27 rows=1 width=24) (actual time=0.084..0.084 rows=0 loops=1) - Index Cond: ((i1 = 100) AND (i2 = 1000)) - Heap Fetches: 0 - Total runtime: 0.134 ms -(5 rows) -``` - -### Update Bloom Index - -Example: - -```sql -MogDB=# select i2,i3,i4 from tbloom where rownum <=5; - i2 | i3 | i4 ---------+--------+-------- - 778090 | 624067 | 948170 - 927435 | 800792 | 904419 - 325217 | 726778 | 834407 - 925272 | 221411 | 826500 - 93906 | 992575 | 997677 - -UPDATE tbloom SET i1 = 10 - WHERE i2 = 325217 AND - i3 = 726778 AND - i4 = 834407; - -MogDB=# select * from tbloom where i2 = 325217 and i3 = 726778; - i1 | i2 | i3 | i4 | i5 | i6 -----+--------+--------+--------+--------+-------- - 10 | 325217 | 726778 | 834407 | 702579 | 525581 -(1 row) - -MogDB=# explain select * from tbloom where i2 = 325217 and i3 = 726778; - QUERY PLAN ----------------------------------------------------------------------------- - Bitmap Heap Scan on tbloom (cost=178439.48..178443.50 rows=1 width=24) - Recheck Cond: ((i2 = 325217) AND (i3 = 726778)) - -> Bitmap Index Scan on bloomidx (cost=0.00..178439.48 rows=1 width=0) - Index Cond: ((i2 = 325217) AND (i3 = 726778)) -(4 rows) - -DELETE FROM tbloom WHERE i2 = 1000 AND i3 = 789678 AND i4 = 311551; -select * from tbloom where i2 = 1000 and i3 = 789678; - i1 | i2 | i3 | i4 | i5 | i6 -----+----+----+----+----+---- -(0 rows) -explain select * from tbloom where i2 = 1000 and i3 = 789678; - Bitmap Heap Scan on tbloom (cost=178440.26..178444.28 rows=1 width=24) - Recheck Cond: ((i2 = 1000) AND (i3 = 789678)) - -> Bitmap Index Scan on tbloomidx (cost=0.00..178440.26 rows=1 width=0) - Index Cond: ((i2 = 1000) AND (i3 = 789678)) -(4 rows) -``` - -## Related Pages - -[CREATE INDEX](../../reference-guide/sql-syntax/CREATE-INDEX.md), [DROP INDEX](../../reference-guide/sql-syntax/DROP-INDEX.md), [ALTER INDEX](../../reference-guide/sql-syntax/ALTER-INDEX.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/3-full-text-indexing.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/3-full-text-indexing.md deleted file mode 100644 index 4168e4f7..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/3-full-text-indexing.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Full-Text Indexing -summary: Full-Text Indexing -author: Guo Huan -date: 2022-05-07 ---- - -# Full-Text Indexing - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -MogDB full-text indexing allows documents to be preprocessed and facilitates subsequent search. - -## Benefits - -MogDB full-text indexing provides the capability to identify natural-language documents that satisfy a query and sort them by relevance. - -## Description - -The preprocessing process of creating a full-text index includes: - -- Parsing documents into tokens - - It is useful to identify various classes of tokens, for example, numbers, words, compound words, and email addresses, so that they can be processed differently. In principle, token classes depend on the specific application, but for most purposes it is adequate to use a predefined set of classes. - -- Converting tokens into lexemes - - A lexeme is a string, just like a token, but it has been normalized so that different forms of the same word are made alike. For example, normalization almost always includes folding upper-case letters to lower-case, and often involves removal of suffixes (such as **s** or **es** in English). This allows searches to find variant forms of the same word, without entering all the possible variants. Also, this step typically eliminates stop words, which are so common and usually useless for searching. (In short, tokens are raw fragments of the document text, while lexemes are words that are believed useful for indexing and searching.) MogDB uses dictionaries to perform this step and provides various standard dictionaries. - -- Storing preprocessed documents optimized for searching - - For example, each document can be represented as a sorted array of normalized lexemes. Along with the lexemes, it is often desirable to store positional information for proximity ranking. Therefore, a document that contains a more “dense” area of query words is assigned with a higher rank than the one with scattered query words. Dictionaries allow fine-grained control over how tokens are normalized. With appropriate dictionaries, you can define stop words that should not be indexed. - -## Enhancements - -None - -## Constraints - -The current limitations of MogDB's text search features are: - -- The length of each lexeme must be less than 2 KB. -- The length of a **tsvector** (lexemes + positions) must be less than 1 MB. -- Position values in **tsvector** must be greater than 0 and less than or equal to 16383. -- No more than 256 positions per lexeme. Excessive positions, if any, will be discarded. - -## Dependencies - -None - -## Related Pages - -[Full Text Search](../../reference-guide/sql-reference/full-text-search/full-text-search.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/4-copy-interface-for-error-tolerance.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/4-copy-interface-for-error-tolerance.md deleted file mode 100644 index 0dc37de4..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/4-copy-interface-for-error-tolerance.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Copy Interface for Error Tolerance -summary: Copy Interface for Error Tolerance -author: Guo Huan -date: 2022-05-07 ---- - -# Copy Interface for Error Tolerance - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Certain errors that occur during the copy process are imported to a specified error table without interrupting the process. - -## Benefits - -Refine the copy function and improve the tolerance and robustness to common errors such as invalid formats. - -## Description - -MogDB provides the encapsulated copy error tables for creating functions and allows users to specify error tolerance options when using the **Copy From** statement. In this way, errors related to parsing, data format, and character set during the execution of the **Copy From** statement are recorded in the error table instead of being reported and interrupted. Even if a small amount of data in the target file of **Copy From** is incorrect, the data can be imported to the database. You can locate and rectify the fault in the error table later. - -## Enhancements - -None - -## Constraints - -For details, see “Importing Data > Running the COPY FROM STDIN Statement to Import Data > [Handling Import Errors](../../administrator-guide/importing-and-exporting-data/importing-data/3-running-the-COPY-FROM-STDIN-statement-to-import-data.md#handling-import-errors)” in the *Administrator Guide*. - -## Dependencies - -None - -## Related Pages - -[Handling Import Errors](../../administrator-guide/importing-and-exporting-data/importing-data/3-running-the-COPY-FROM-STDIN-statement-to-import-data.md#handling-import-errors) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/5-partitioning.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/5-partitioning.md deleted file mode 100644 index 28940f2f..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/5-partitioning.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Partitioning -summary: Partitioning -author: Guo Huan -date: 2022-05-07 ---- - -# Partitioning - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Data is partitioned horizontally on a node using a specified policy. This operation splits a table into multiple partitions that are not overlapped. - -## Benefits - -In common scenarios, a partitioned table has the following advantages over a common table: - -- High query performance: You can specify partitions when querying partitioned tables, improving query efficiency. -- High availability: If a certain partition in a partitioned table is faulty, data in the other partitions is still available. -- Balanced I/O: Partitions can be mapped to different disks to balance I/O and improve the overall system performance. - -## Description - -Currently, the MogDB database supports range partitioned tables, list partitioned tables, hash partitioned tables, interval partitioned tables, and level-2 partitioned tables. - -- In a range partitioned table, data within a certain range is mapped to each partition. The range is determined by the partition key specified when the partitioned table is created. This partitioning mode is most commonly used. - - With the range partitioning function, the database divides a record, which is to be inserted into a table, into multiple ranges using one or multiple columns and creates a partition for each range to store data. Partition ranges do no overlap. - -- In a list partitioned table, data is mapped to each partition based on the key values contained in each partition. The key values contained in a partition are specified when the partition is created. - - The list partitioning function divides the key values in the records to be inserted into a table into multiple lists (the lists do not overlap in different partitions) based on a column of the table, and then creates a partition for each list to store the corresponding data. - -- In a hash partitioned table, data is mapped to each partition using the hash algorithm, and each partition stores records with the same hash value. - - The hash partitioning function uses the internal hash algorithm to divide records to be inserted into a table into partitions based on a column of the table. - -- Interval partitioning is a special type of range partitioning. Compared with range partitioning, interval partitioning defines the interval value. When no matching partition can be found for an inserted record, a partition can be automatically created based on the interval value. - -- A level-2 partitioned table is based on level-1 partition. Its partitioning scheme is a combination of two level-1 partitioning schemes. Currently, the level-2 partitioned table supports nine partitioning policies combining range partitioning, list partitioning, and hash partitioning. - -If you specify the **PARTITION** parameter when running the **CREATE TABLE** statement, data in the table will be partitioned. Users can modify partition keys as needed during table creation to make the query result stored in the same or least partitions (called partition pruning), obtaining consecutive I/O to improve the query performance. - -In actual services, time is often used to filter query objects. Therefore, you can select the time column as the partition key. The key value range can be adjusted based on the total data volume and the data volume queried at a time. - -## Enhancements - -Range partitioned tables can be combined. - -## Constraints - -None. - -## Dependencies - -None. - -## Related Pages - -[Partition Table](../../reference-guide/sql-reference/partition-table.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/6-support-for-advanced-analysis-functions.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/6-support-for-advanced-analysis-functions.md deleted file mode 100644 index 8a56fee7..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/6-support-for-advanced-analysis-functions.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Support for Advanced Analysis Functions -summary: Support for Advanced Analysis Functions -author: Guo Huan -date: 2022-05-07 ---- - -# Support for Advanced Analysis Functions - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -None - -## Benefits - -Window functions are provided for advanced data analysis and processing. The window function groups the data in a table in advance. Each row belongs to a specific group. Then, a series of association analysis calculations are performed on the group. In this way, some attributes of each tuple in the set and association information with other tuples can be mined. - -## Description - -The following uses an example to describe the window analysis function: Compare the salary of each person in a department with the average salary of the department. - -```sql -SELECT depname, empno, salary, avg(salary) OVER (PARTITION BY depname) FROM empsalary; -depname | empno | salary | avg ------------+-------+--------+----------------------- -develop | 11 | 5200 | 5020.0000000000000000 -develop | 7 | 4200 | 5020.0000000000000000 -develop | 9 | 4500 | 5020.0000000000000000 -develop | 8 | 6000 | 5020.0000000000000000 -develop | 10 | 5200 | 5020.0000000000000000 -personnel | 5 | 3500 | 3700.0000000000000000 -personnel | 2 | 3900 | 3700.0000000000000000 -sales | 3 | 4800 | 4866.6666666666666667 -sales | 1 | 5000 | 4866.6666666666666667 -sales | 4 | 4800 | 4866.6666666666666667 -(10 rows) -``` - -The analysis function **avg(salary) OVER (PARTITION BY depname)** easily calculates each employee's salary and the average salary of the department. - -Currently, the system supports the following analysis functions: **row_number()**, **rank()**, **dense_rank()**, **percent_rank()**, **cume_dist()**, **ntile()**, **lag()**, **lead()**, **first_value()**, **last_value()**, and **nth_value()**. For details about functions and statements, see “Functions and Operators > [Window Functions](../../reference-guide/functions-and-operators/window-functions.md)” in the *Reference Guide*. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - -[Window Functions(Analysis Functions)](../../reference-guide/functions-and-operators/window-functions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/7-materialized-view.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/7-materialized-view.md deleted file mode 100644 index e6b36d25..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/7-materialized-view.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Materialized View -summary: Materialized View -author: Guo Huan -date: 2022-05-07 ---- - -# Materialized View - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -A materialized view is a special physical table, which is relative to a common view. A common view is a virtual table and has many application limitations. Any query on a view is actually converted into a query on an SQL statement, and performance is not actually improved. The materialized view actually stores the results of the statements executed by the SQL statement, and is used to cache the results. - -## Benefits - -The materialized view function is used to improve query efficiency. - -## Description - -Full materialized views and incremental materialized views are supported. Full materialized views can only be updated in full mode. Incremental materialized views can be updated asynchronously. You can run statements to update new data to materialized views. - -## Enhancements - -None - -## Constraints - -Only simple filter queries and UNION ALL statements are supported for base tables. - -## Dependencies - -None - -## Related Pages - -[Materialized View](../../developer-guide/materialized-view/1-materialized-view-overview.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/8-hyperloglog.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/8-hyperloglog.md deleted file mode 100644 index 974b03bd..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/8-hyperloglog.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: HyperLogLog -summary: HyperLogLog -author: Guo Huan -date: 2022-05-07 ---- - -# HyperLogLog - -## Availability - -This feature is available as of MogDB 1.1.0. - -## Introduction - -HyperLoglog (HLL) is used to count the number of distinct values. - -## Benefits - -Improves AP/TP query performance. - -## Description - -HLL is an approximation algorithm for efficiently counting the number of distinct values in a dataset. It features faster computing and lower space usage. You only need to store HLL data structures instead of datasets. When new data is added to a dataset, make hash calculation on the data and insert the result to an HLL. Then, you can obtain the final result based on the HLL. - -HLL has advantages over others in the computing speed and storage space requirement. In terms of time complexity, the Sort algorithm needs to sort at least O(n log n) time. Although the Hash algorithm can obtain the result by scanning the entire table O(n) time, the storage space is as follows: Both the Sort and Hash algorithms need to store the original data before collecting statistics, which consumes a large amount of storage space. For the HLL, the original data does not need to be stored, and only the HLL data structure needs to be maintained. Therefore, the occupied space is always at the 1280-byte constant level. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - -[HLL Functions And Operators](../../reference-guide/functions-and-operators/hll-functions-and-operators.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/9-creating-an-index-online.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/9-creating-an-index-online.md deleted file mode 100644 index b070763c..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/9-creating-an-index-online.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Creating an Index Online -summary: Creating an Index Online -author: Guo Huan -date: 2022-05-07 ---- - -# Creating an Index Online - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Uses the CREATE INDEX CONCURRENTLY syntax to create indexes online without blocking DML. - -## Benefits - -When creating an index, you can specify the CONCURRENTLY keyword to ensure that the DML and online services are not blocked during the index creation. - -## Description - -A normal CREATE INDEX acquires exclusive lock on the table on which the index depends, blocking other accesses until the index drop can be completed. If the CONCURRENTLY keyword is specified, the ShareUpdateExclusiveLock lock is added to the table so that DML is not blocked during the creation. - -This keyword is specified when an index is created online. The entire table needs to be scanned twice and built. When the table is scanned for the first time, an index is created and the read and write operations are not blocked. During the second scan, changes that have occurred since the first scan are merged and updated. The table needs to be scanned and built twice, and all existing transactions that may modify the table must be completed. This means that the creation of the index takes a longer time than normal. In addition, the CPU and I/O consumption also affects other services. - -## Enhancements - -None - -## Constraints - -- Only one index name can be specified when an index is created online. -- The CREATE INDEX statement can be run within a transaction, but CREATE INDEX CONCURRENTLY cannot. -- Column-store tables and temporary tables do not support **CREATE INDEX CONCURRENTLY**. -- Partitioned tables support **CREATE GLOBAL INDEX CONCURRENTLY**, but do not support **CREATE LOCAL INDEX CONCURRENTLY**. - -## Dependencies - -None - -## Related Pages - -[CREATE INDEX](../../reference-guide/sql-syntax/CREATE-INDEX.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/enterprise-level-features.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/enterprise-level-features.md deleted file mode 100644 index 592bd802..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/enterprise-level-features.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Enterprise-Level Features -summary: Enterprise-Level Features -author: Guo Huan -date: 2023-05-22 ---- - -# Enterprise-Level Features - -+ **[Support for Functions and Stored Procedures](1-support-for-functions-and-stored-procedures.md)** -+ **[SQL Hints](2-sql-hints.md)** -+ **[Full-Text Indexing](3-full-text-indexing.md)** -+ **[Copy Interface for Error Tolerance](4-copy-interface-for-error-tolerance.md)** -+ **[Partitioning](5-partitioning.md)** -+ **[Support for Advanced Analysis Functions](6-support-for-advanced-analysis-functions.md)** -+ **[Materialized View](7-materialized-view.md)** -+ **[HyperLogLog](8-hyperloglog.md)** -+ **[Creating an Index Online](9-creating-an-index-online.md)** -+ **[Autonomous Transaction](10-autonomous-transaction.md)** -+ **[Global Temporary Table](11-global-temporary-table.md)** -+ **[Pseudocolumn ROWNUM](12-pseudocolumn-rownum.md)** -+ **[Stored Procedure Debugging](13-stored-procedure-debugging.md)** -+ **[JDBC Client Load Balancing and Read/Write Isolation](14-jdbc-client-load-balancing-and-readwrite-isolation.md)** -+ **[In-place Update Storage Engine](15-in-place-update-storage-engine.md)** -+ **[Publication-Subscription](16-publication-subscription.md)** -+ **[Foreign Key Lock Enhancement](17-foreign-key-lock-enhancement.md)** -+ **[Data Compression in OLTP Scenarios](18-data-compression-in-oltp-scenarios.md)** -+ **[Transaction Asynchronous Submit](19-transaction-async-submit.md)** -+ **[Index Creation Parallel Control](23-index-creation-parallel-control.md)** -+ **[Dynamic Partition Pruning](21-dynamic-partition-pruning.md)** -+ **[COPY Import Optimization](20-copy-import-optimization.md)** -+ **[SQL Running Status Observation](22-sql-running-status-observation.md)** -+ **[BRIN Index](24-brin-index.md)** -+ **[BLOOM Index](25-bloom-index.md)** -+ **[Event Trigger](event-trigger.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/event-trigger.md b/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/event-trigger.md deleted file mode 100644 index 4ee91f21..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/enterprise-level-features/event-trigger.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Event Trigger -summary: Event Trigger -author: Guo Huan -date: 2023-04-04 ---- - -# Event Trigger - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -An event trigger is used to capture and process DDL operations. - -## Benefits - -A typical application scenario is as follows: An event trigger is used to capture DDL operations and target objects to implement data synchronization. - -## Description - -The event trigger can capture DDL operations in the current database and target objects of DDL operations. It cannot capture operations on shared objects, such as databases, roles, and tablespaces. The event trigger can capture four types of events: ddl_command_start, ddl_command_end, sql_drop, and table_rewrite. - -1. The ddl_command_start event occurs before the CREATE, ALTER, DROP, SECURITY LABEL, COMMENT, GRANT, or REVOKE statement is executed. The existence of the affected object is not checked before the event trigger is used. -2. The ddl_command_end event captures DDL operations and occurs after DDL execution. -3. The sql_drop event captures any operation of deleting database objects and is executed before ddl_command_end. -4. The table_rewrite event occurs only before the table is overwritten by some actions of the ALTER TABLE and ALTER TYPE statements. - -## Constraints - -- This feature is available only in PG-compatible mode. -- Only the system administrator can add, delete, and modify the event trigger. -- Built-in functions related to the event trigger can be used only for event trigger functions (the return type is event_trigger). - -## Dependencies - -None. - -## Related Pages - -[CREATE EVENT TRIGGER](../../reference-guide/sql-syntax/CREATE-EVENT-TRIGGER.md), [ALTER EVENT TRIGGER](../../reference-guide/sql-syntax/ALTER-EVENT-TRIGGER.md), [DROP EVENT TRIGGER](../../reference-guide/sql-syntax/DROP-EVENT-TRIGGER.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/1-primary-standby.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/1-primary-standby.md deleted file mode 100644 index 4ed84ead..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/1-primary-standby.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Primary/Standby -summary: Primary/Standby -author: Guo Huan -date: 2022-05-07 ---- - -# Primary/Standby - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -To ensure that a fault can be rectified, data needs to be written into multiple copies. Multiple copies are configured for the primary and standby nodes, and logs are used for data synchronization. In this way, MogDB has no data lost when a node is faulty or the system restarts after a stop, meeting the ACID feature requirements. - -## Benefits - -Services can be switched to the standby node when the primary node is faulty. Therefore, data is not lost and services can be quickly restored. - -## Description - -You can deploy the one-primary-multiple-standby mode. In the one-primary-multiple-standby mode, all standby nodes need to redo logs and can be promoted to the primary. The one-primary-multiple-standby mode provides higher DR capabilities and is more suitable for the OLTP system that processes a large number of transactions. - -The **switchover** command can be used to trigger a switchover between the primary and standby nodes. If the primary node is faulty, the **failover** command can be used to promote the standby node to the primary. - -In scenarios such as initial installation or backup and restoration, data on the standby node needs to be rebuilt based on the primary node. In this case, the build function is required to send the data and WALs of the primary node to the standby node. When the primary node is faulty and joins again as a standby node, the build function needs to be used to synchronize data and WALs with those of the new primary node. In addition, in online capacity expansion scenarios, you need to use build to synchronize metadata to instances on new nodes. Build includes full build and incremental build. Full build depends on primary node data for rebuild. The amount of data to be copied is large and the time required is long. Incremental build copies only differential files. The amount of data to be copied is small and the time required is short. Generally, the incremental build is preferred for fault recovery. If the incremental build fails, the full build continues until the fault is rectified. - -To implement HA DR for all instances, in addition to the preceding primary/standby multi-copy replication configured for DNs, MogDB also provides other primary/standby DR capabilities, such as CM server (one primary and multiple standbys) and ETCD (one primary and multiple standbys). In this way, instances can be recovered as soon as possible without interrupting services, minimizing the impact of hardware, software, and human errors on services and ensuring service continuity. - -## Enhancements - -None. - -## Constraints - -None. - -## Dependencies - -None. - -## Related Pages - -[Primary and Standby Management](../../administrator-guide/primary-and-standby-management.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/10-adding-or-deleting-a-standby-server.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/10-adding-or-deleting-a-standby-server.md deleted file mode 100644 index aecc1c95..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/10-adding-or-deleting-a-standby-server.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Adding or Deleting a Standby Node -summary: Adding or Deleting a Standby Node -author: Guo Huan -date: 2022-05-07 ---- - -# Adding or Deleting a Standby Node - -## Availability - -This feature is available since MogDB 2.0.0. - -## Introduction - -Standby nodes can be added and deleted. - -## Benefits - -If the read pressure of the primary node is high or you want to improve the disaster recovery capability of the database, you need to add a standby node. If some standby nodes in a cluster are faulty and cannot be recovered within a short period of time, you can delete the faulty nodes to ensure that the cluster is running properly. - -## Description - -MogDB can be scaled out from a single node or one primary and multiple standbys to one primary and eight standbys. Cascaded standby nodes can be added. Standby nodes can be added when a faulty standby node exists in the cluster. One primary and multiple standbys can be scaled in to a single node. A faulty standby node can be deleted. - -Standby nodes can be added or deleted online without affecting the primary node. - -## Enhancements - -None. - -## Constraints - -For adding a standby node: - -- Ensure that the MogDB image package exists on the primary node. -- Ensure that the same users and user groups as those on the primary node have been created on the new standby node. -- Ensure that the mutual trust of user **root** and the database management user has been established between the existing database nodes and the new nodes. -- Ensure that the XML file has been properly configured and information about the standby node to be scaled has been added to the installed database configuration file. -- Ensure that only user **root** is authorized to run the scale-out command. -- Do not run the **gs_dropnode** command on the primary node to delete other standby nodes at the same time. -- Ensure that the environment variables of the primary node have been imported before the scale-out command is run. -- Ensure that the operating system of the new standby node is the same as that of the primary node. -- Do not perform an primary/standby switchover or failover on other standby nodes at the same time. - -For deleting a standby node: - -- Delete the standby node only on the primary node. -- Do not perform an primary/standby switchover or failover on other standby nodes at the same time. -- Do not run the **gs_expansion** command on the primary node for scale-out at the same time. -- Do not run the **gs_dropnode** command twice at the same time. -- Before deletion, ensure that the database management user trust relationship has been established between the primary and standby nodes. -- Run this command as a database administrator. -- Before running commands, run the **source** command to import environment variables of the primary node. - -## Dependencies - -None. - -## Related Pages - -[gs_dropnode](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_dropnode.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/11-delaying-entering-the-maximum-availability-mode.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/11-delaying-entering-the-maximum-availability-mode.md deleted file mode 100644 index 7f3ad021..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/11-delaying-entering-the-maximum-availability-mode.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Delaying Entering the Maximum Availability Mode -summary: Delaying Entering the Maximum Availability Mode -author: Guo Huan -date: 2022-05-10 ---- - -# Delaying Entering the Maximum Availability Mode - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -The primary node can be delayed to enter the maximum availability mode. - -## Benefits - -When the primary node detects that the standby node exits due to network instability or other reasons and the maximum availability mode is enabled on the primary node, the primary node remains in the maximum protection mode within a specified time window. After the time window expires, the primary node enters the maximum availability mode. - -This prevents the primary node from frequently switching between the maximum protection mode and maximum availability mode due to factors such as network jitter and intermittent process disconnection. - -## Description - -If **most_available_sync** is set to **on**, when synchronous standby nodes are faulty in primary/standby scenarios and the number of configured synchronous standby nodes is insufficient (for details, see the meaning of **synchonous_standby_name**), setting **keep_sync_window** will retain the maximum protection mode within the time window specified by **keep_sync_window**. That is, committing transactions on the primary node is blocked, delaying the primary node to enter the maximum availability mode. - -If synchronous standby nodes recover from faults and the number of synchronous standby nodes meets the configuration requirements, transactions are not blocked. - -## Enhancements - -None. - -## Constraints - -- This feature takes effect only when the maximum availability mode is enabled. -- Enabling this feature may affect the RPO. If the primary node is faulty within the configured timeout window, its transactions are committed locally but not synchronized to the faulty synchronous standby nodes. -- This feature does not apply to cascaded standby nodes. - -## Dependencies - -This feature depends on the maximum availability mode. - -## Related Pages - -[Primary Server](../../reference-guide/guc-parameters/ha-replication/primary-server.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/12-parallel-logical-decoding.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/12-parallel-logical-decoding.md deleted file mode 100644 index 544ae34b..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/12-parallel-logical-decoding.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Parallel Logical Decoding -summary: Parallel Logical Decoding -author: Guo Huan -date: 2022-05-10 ---- - -# Parallel Logical Decoding - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Multi-thread parallel decoding is supported. - -## Benefits - -The logical decoding performance is greatly improved and the decoding speed can be improved from 3 to 5 Mbit/s to 100 Mbit/s in standard scenarios (16-core CPU, 128 GB memory, network bandwidth > 200 Mbit/s, 10 to 100 columns in a table, 0.1 KB to 1 KB data volume in a single row, insert as the main DML operation, no flush transaction is involved, that is, number of statements in a single transaction < 4096). - -## Description - -When JDBC or pg_recvlogical is used for decoding, you can set **parallel-decode-num** to a value greater than 1 and less than or equal to 20 to enable parallel decoding. In this way, one read thread, multiple decoding threads, and one sending thread are used to perform logical decoding, significantly improving the decoding speed. - -## Enhancements - -None. - -## Constraints - -1. The hardware and network environment must be normal. The size of a logical log is twice that of an Xlog. To ensure that the speed of the Xlog reaches 100 Mbit/s, the I/O bandwidth must be at least 200 Mbit/s. Resources need to be reserved for the reader, decoder, and sender threads. The number of CPU cores to be reserved must be the number of concurrent threads plus 2. For example, if there are four concurrent threads, six CPU cores need to be reserved. In actual scenarios, decoding on the standby node can meet the requirements, and no special resource reservation planning is required. To ensure that the decoding performance meets the requirements and minimize the impact on services, you are advised to set up only one parallel decoding connection on a standby node to ensure that the CPU, memory, and bandwidth resources are sufficient. -2. The value of GUC parameter **wal_level** of the log level is **logical**. -3. The GUC parameter **max_replication_slots** is set to a value greater than or equal to the number of physical streaming replication slots, backup slots, and logical replication slots required by each DN. -4. The value of the decoding configuration option **parallel-decode-num** is greater than 1 and less than or equal to 20, specifying the number of concurrent decoding threads. -5. Decoding into DDL statements is not supported. -6. Decoding is not supported for column-store data and data page replication. -7. The size of a single tuple cannot exceed 1 GB. Decoding results may be larger than inserted data. Therefore, it is recommended that the size of a single tuple be less than or equal to 500 MB. -8. Decoding compressed tables into DML statements is not supported. -9. MogDB supports the following data types for decoding: **INTEGER**, **BIGINT**, **SMALLILNT**, **TINYINT**, **SERIAL**, **SMALLSERIAL**, **BIGSERIAL**, **FLOAT**, **DOUBLE PRECISION**, **DATE**, **TIME[WITHOUT TIME ZONE]**, **TIMESTAMP[WITHOUT TIME ZONE]**, **CHAR(***n***)**, **VARCHAR(***n***)**, and **TEXT**. -10. If the SSL connection is required, the GUC parameter **ssl** must be set to **on**. -11. Interval partitioned tables cannot be replicated. -12. After a DDL statement is executed in a transaction, the DDL statement and subsequent statements are not decoded. -13. To perform decoding on the standby node, set the GUC parameter **enable_slot_log** to **on** on the corresponding host. -14. Currently, ultra-large CLOB decoding is not supported. -15. The same replication slot for decoding cannot be used between the primary node and standby node or between different standby nodes at the same time. Otherwise, data inconsistency will occur. -16. Do not perform operations on the replication slot on other nodes when the logical replication slot is in use. To delete a replication slot, stop decoding in the replication slot first. -17. Only the built-in 'j' style export is supported, and the wal2json extension is not supported. - -## Related Pages - -[Example Logic Replication Code](../../developer-guide/dev/2-development-based-on-jdbc/14-example-logic-replication-code.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/13-dcf.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/13-dcf.md deleted file mode 100644 index 92205dea..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/13-dcf.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: DCF -summary: DCF -author: Guo Huan -date: 2022-05-10 ---- - -# DCF - -## Availability - -This feature is available since MogDB 2.0.0. - -## Introduction - -Distributed consensus framework (DCF) implements data synchronization based on the Paxos algorithm. After the DCF mode is enabled, DNs support Paxos-based replication and quorum capabilities. - -## Benefits - -Primary DN selection and log replication are performed based on Paxos. Compression and flow control are supported during the replication to prevent high bandwidth usage. DCF provides capabilities such as log replication and cluster HA. DCF supports automatic leader election, forcible minority startup, and dynamic traffic adjustment for log replication. In addition, multiple Paxos-based roles are provided and can be adjusted. You can query the instance status in the current database. - -DCF is a high-performance, highly mature, reliable, scalable, and easy-to-use independent basic library. Other systems can easily interconnect with DCF through interfaces to obtain the strong consistency, high availability, and automatic disaster recovery capabilities provided by the Paxos algorithm. - -## Description - -- During log replication, DCF supports log compression before transmission to reduce network bandwidth usage. -- DCF supports SSL, including TLS 1.2 and TLS 1.3. When SSL is enabled, the DN configures DCF as TLS 1.2 by default. -- DCF supports five TLS 1.3 cipher suites: TLS13-AES-256-GCM-SHA384, TLS13-CHACHA20-POLY1305-SHA256, TLS13-AES-128-GCM-SHA256, TLS13-AES-128-CCM-8-SHA256 and TLS13-AES-128-CCM-SHA256. -- DCF supports passive nodes, which do not participate in leader election and only synchronize and replay logs. When this type of nodes is heavily loaded during log synchronization, flow control is performed. -- DCF supports logger nodes, which participate in leader election and voting. However, the logger nodes only replicate DCF logs. Xlogs are not replicated and redo is not performed. -- DCF supports online switchover between follower and passive nodes. That is, without interrupting services, a node can switch over between the follower and passive roles. -- DCF supports forcible startup of the minority of DNs. In case that the majority of database instances are faulty, a minority of standby DNs are selected and forcibly promoted to primary, and other normal standby DNs replicate logs from the primary DNs. -- DCF supports automatic leader election. When the original primary DN is faulty, a new primary DN is automatically selected from the remaining standby DNs on the premise that data consistency is ensured. -- DCF supports the policy-based majority capability. Based on the configured AZ, DCF ensures that at least one node in the AZ synchronizes replication logs. - -## Enhancements - -None. - -## Constraints - -To use this function, you need to enable DCF during installation and deployment. In DCF mode, the majority election is used. During the installation, if the number of faulty nodes and build nodes reaches the majority, the cluster installation will fail. For example, when one primary node and two standby nodes are installed, one node fails to be installed due to insufficient memory, but the other two nodes can be started normally. A standby node will be built again. In this case, the number of build nodes and faulty nodes is 2, which is the majority, and the cluster installation will fail. During the installation, check whether the memory and disk resources are sufficient. - -If the policy-based majority parameter is configured for an AZ and all nodes in the AZ are faulty, you need to remove the AZ configuration from the policy-based majority configuration when performing build operations on the nodes. - -The manual mode is used to set the cluster-level working mode. In this mode, the logger or passive role is not supported. - -## Dependencies - -None. - -## Related Pages - -[DCF](../../high-available-guide/high-available-dcf.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/14-cm.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/14-cm.md deleted file mode 100644 index 6c13f3d6..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/14-cm.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: CM -summary: CM -author: Guo Huan -date: 2022-05-10 ---- - -# CM - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Cluster manager (CM) is a database management software, which consists of cm_server, cm_agent, om_monitor, cm_ctl. Its core features are as follows. - -- cm_server - - Send commands (such as start, stop, status query, switchover and failover of database instances) to cm_agent on each node and receive responses; - - Receive the database instance status information reported by cm_agent on each node; - - Highly available arbitration of cm_server itself on each node; - - Highly available arbitration of database instances on each node. - -- cm_agent - - Receive and execute commands from cm_server, such as start, stop, status query, switchover and failover of database instances; - - Monitor the status of the database instance running on the current node and report it to cm_server. - -- cm_monitor - - Monitor the cm_agent service running on the current node to ensure its availability. - -- cm_ctl - - Client tool for CM. - -## Benefits - -It manages and monitors the running status of functional units and physical resources in a database system, ensuring stable running of the system. - -## Description - -It supports customized resource monitoring and provides capabilities such as monitoring of the primary/standby database status, network communication faults, file system faults, and automatic primary/standby switchover upon faults. It also provides various database management capabilities, such as starting and stopping nodes and instances, querying database instance status, performing primary/standby switchover, and managing logs. - -## Enhancements - -The CM supports external status query and push. Additionally, CM supports two-node deployment. - -- The HTTP/HTTPS service is used to remotely query the cluster status, helping management personnel and O&M platforms monitor the cluster status. - -- When an primary/standby switchover occurs in the database cluster, the latest primary/standby information of the cluster is pushed to the receiving address registered by the application through the HTTP/HTTPS service in time. In this way, the application can detect the primary/standby change of the cluster in time and quickly connect to the new primary and standby nodes. - -## Constraints - -In scenarios where there are one primary node and one standby node, CM does not support enabling of DCF configuration. - -## Dependencies - -Highly available arbitration relies on DCF, DCC, and CBB components. - -## Related Pages - -[Cluster Management](../../high-available-guide/cluster-management/cluster-management.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/15-global-syscache.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/15-global-syscache.md deleted file mode 100644 index 33a39fd5..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/15-global-syscache.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Global SysCache -summary: Global SysCache -author: Guo Huan -date: 2022-05-10 ---- - -# Global SysCache - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Global SysCache is the global and local caches of system catalog data. Figure 1 illustrates the principle. - -**Figure 1** Global SysCache principle - - ![Global-SysCache principle](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/Global-SysCache-en.jpg) - -## Benefits - -Global SysCache reduces the cache memory usage of database processes and improves the concurrent expansion capability of a database. - -## Description - -Global SysCache decouples the system cache from sessions and binds them to threads to reduce the memory usage together with the thread pool feature. In addition, it is used to improve the cache hit rate and ensure stable performance. - -## Enhancements - -Supports more concurrent queries. - -## Constraints - -- Set **enable_global_syscache** to **on**. You are advised to set **enable_thread_pool** to **on**. -- When the number of databases is large and the value of **global_syscache_threshold** is small, memory control cannot work properly and the performance deteriorates. -- Distributed time series tasks are not supported. The memory control and performance of these tasks are not affected by the GSC feature. -- If **wal_level** is set to **minimal** or **archive**, the query performance of the standby server deteriorates and short connections are used. - -## Dependencies - -The memory reduction capability of this feature depends on the thread pool feature. - -## Related Pages - -[Global SysCache Parameters](../../reference-guide/guc-parameters/global-syscache-parameters.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/16-using-a-standby-node-to-build-a-standby-node.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/16-using-a-standby-node-to-build-a-standby-node.md deleted file mode 100644 index b53c2c59..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/16-using-a-standby-node-to-build-a-standby-node.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Using a Standby Node to Build a Standby Node -summary: Using a Standby Node to Build a Standby Node -author: Guo Huan -date: 2022-05-10 ---- - -# Using a Standby Node to Build a Standby Node - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -A standby node can be built by another standby node to accelerate standby node recovery from faults. The I/O and bandwidth pressure of the primary node can be reduced. - -## Benefits - -When the service load is heavy, building a standby node by using the primary node increases the resource burden on the primary node. As a result, the performance of the primary node deteriorates and the build becomes slow. Building a standby node by using a standby node does not affect services on the primary node. - -## Description - -You can run the **gs_ctl** command to specify a standby node to build another standby node to be repaired. For details, see “Tools Used in the Internal System > [gs_ctl](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_ctl.md)” in *Tool Reference*. - -## Enhancements - -None. - -## Constraints - -A standby node can only be used to build another standby node. You can only use a specified IP address and port number to build the standby node. Before building the standby node, ensure that the logs of the standby node to be repaired are later than those of the standby node that sends data. - -## Dependencies - -None. - -## Related Pages - -[gs_ctl](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_ctl.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/17-two-city-three-dc-dr.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/17-two-city-three-dc-dr.md deleted file mode 100644 index eeb0cba0..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/17-two-city-three-dc-dr.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Two-City Three-DC DR -summary: Two-City Three-DC DR -author: zhang cuiping -date: 2022-10-13 ---- - -# Two-City Three-DC DR - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -MogDB 3.1.0 supports two-city three-DC DR. - -## Benefits - -The services require the underlying database to provide cross-region DR capabilities to ensure data security and availability in case of extreme disasters. - -## Description - -Finance and banking industries require high data security. In the case of extreme disasters such as fires, earthquakes, and wars, data security must be ensured. Therefore, a cross-region DR solution is required. Cross-region DR refers to the scenario where the distance between the primary and standby data centers is greater than 200 km. When the primary data center encounters an extreme disaster, the standby data center can take over services. This feature provides a cross-region MogDB DR solution. - -This feature provides the following solutions: - -Streaming replication-based remote DR solution (This solution is provided in MogDB 3.1.0 and later versions.) - -## Enhancements - -In MogDB 3.1.0, the remote DR solution based on streaming replication is added for the two-city 3DC cross-region DR feature. - -- DR database failover -- Planned switchover between the primary and standby database instances - -**Streaming replication-based remote DR solution**: - -For details about the constraints, see section [Two-City Three-DC DR](../../administrator-guide/database-deployment-scenario/two-city-three-dc-dr.md) in *Administrator Guide*. - -## Dependencies - -None - -## Related Pages - -[Two-City Three-DC DR](../../administrator-guide/database-deployment-scenario/two-city-three-dc-dr.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/2-logical-replication.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/2-logical-replication.md deleted file mode 100644 index d7724b5f..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/2-logical-replication.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Logical Replication -summary: Logical Replication -author: Guo Huan -date: 2022-05-07 ---- - -# Logical Replication - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -MogDB provides the logical decoding function to reversely parse physical logs to logical logs. Logical replication tools such as DRS convert logical logs to SQL statements and replay the SQL statements in the peer database. In this way, data can be synchronized between heterogeneous databases. Currently, unidirectional and bidirectional logical replication between the MogDB database and the MySQL or Oracle database is supported. - -## Benefits - -Logical replication is applicable to real-time database data migration, dual-database active-active system, and rolling upgrades. - -## Description - -DNs reversely parse physical logs to logical logs. Logical replication tools such as DRS extract logical logs from DNs, convert the logs to SQL statements, and replay the SQL statements in MySQL. Logical replication tools also extract logical logs from a MySQL database, reversely parse the logs to SQL statements, and replay the SQL statements in MogDB. In this way, data can be synchronized between heterogeneous databases. - -## Enhancements - -- MogDB 1.1.0 logic decoding supports the extraction of logs from full and incremental logs. -- MogDB 1.1.0 supports logical decoding on a standby node. - -## Constraints - -Column-store replication and DDL replication are not supported. - -## Dependencies - -It depends on logical replication tools that decode logical logs. - -## Related Pages - -[Logical Replication](../../developer-guide/logical-replication/logical-replication.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/4-logical-backup.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/4-logical-backup.md deleted file mode 100644 index e4bc0779..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/4-logical-backup.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Logical Backup -summary: Logical Backup -author: Guo Huan -date: 2022-05-07 ---- - -# Logical Backup - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Data in user tables in the database is backed up to a specified storage medium in a general format. - -## Benefits - -Through logical backup, you can achieve the following purposes: - -- Back up user data to a reliable storage medium to secure data. -- Support cross-version recovery and heterogeneous recovery using a general data format. -- Archive cold data. - -## Description - -MogDB provides the logical backup capability to back up data in user tables to local disk files in text or CSV format and restore the data in homogeneous or heterogeneous databases. - -## Enhancements - -None - -## Constraints - -For details about the restrictions on logical backup, see [gs_dump](../../reference-guide/tool-reference/server-tools/gs_dump.md) in the *Tool Reference*. - -## Dependencies - -None - -## Related Pages - -[Logical Backup And Restoration](../../administrator-guide/backup-and-restoration/logical-backup-and-restoration.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/5-physical-backup.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/5-physical-backup.md deleted file mode 100644 index 04c86846..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/5-physical-backup.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Physical Backup -summary: Physical Backup -author: Guo Huan -date: 2022-05-07 ---- - -# Physical Backup - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Data in the entire database is backed up to a specified storage medium in an internal format. - -## Benefits - -Through physical backup, you can achieve the following purposes: - -- Back up data of the entire database to a reliable storage medium, improving system reliability. -- Improve backup and restoration performance using an internal data format. -- Archive cold data. - -The typical physical backup policy and application scenario are as follows: - -- On Monday, perform a full backup of the database. -- On Tuesday, perform an incremental backup based on the full backup on Monday. -- On Wednesday, perform an incremental backup based on the incremental backup on Tuesday. -- … -- On Sunday, perform an incremental backup based on the incremental backup on Saturday. - -The preceding backup operations are executed every week. - -## Description - -MogDB provides the physical backup capability to back up data of the entire database to local disk files, OBS objects, NBU objects, or EISOO objects in the internal database format, and restore data of the entire database in a homogeneous database. In addition to the preceding functions, it also provides advanced functions such as compression, flow control, and resumable backup. - -Physical backup is classified into full backup and incremental backup. The difference is as follows: Full backup includes the full data of the database at the backup time point. The time required for full backup is long (in direct proportion to the total data volume of the database), and a complete database can be restored. An incremental backup involves only incremental data modified after a specified time point. It takes a short period of time (in direct proportion to the incremental data volume and irrelevant to the total data volume). However, a complete database can be restored only after the incremental backup and full backup are performed. - -## Enhancements - -Supports full backup and incremental backup simultaneously. - -## Constraints - -For details about the constraints of physical backup, see Backup and Restoration > [Physical Backup and Restoration](../../administrator-guide/backup-and-restoration/physical-backup-and-restoration.md) in *Administrator Guide*. - -## Dependencies - -None. - -## Related Pages - -[Physical Backup and Restoration](../../administrator-guide/backup-and-restoration/physical-backup-and-restoration.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/6-automatic-job-retry-upon-failure.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/6-automatic-job-retry-upon-failure.md deleted file mode 100644 index f703ecbe..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/6-automatic-job-retry-upon-failure.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Automatic Job Retry upon Failure -summary: Automatic Job Retry upon Failure -author: Guo Huan -date: 2022-05-07 ---- - -# Automatic Job Retry upon Failure - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -If an error occurs in batch processing jobs due to network exceptions or deadlocks, failed jobs are automatically retried. - -## Benefits - -In common fault scenarios, such as network exception and deadlock, queries retry automatically in case of failure to improve database usability. - -## Description - -MogDB provides the job retry mechanism: gsql Retry. - -- The gsql retry mechanism uses a unique error code (SQL STATE) to identify an error that requires a retry. The function of the client tool gsql is enhanced. The error code configuration file **retry_errcodes.conf** is used to configure the list of errors that require a retry. The file is stored in the installation directory at the same level as gsql. **gsql** provides the **\set RETRY** [*number*] command to enable or disable the retry function. The number of retry times ranges from 5 to 10, and the default value is **5**. When this function is enabled, **gsql** reads the preceding configuration file. The error retry controller records the error code list through the container. If an error occurs in the configuration file after the function is enabled, the controller sends the cached query statement to the server for retry until the query is successful or an error is reported when the number of retry times exceeds the maximum. - -## Enhancements - -None - -## Constraints - -- Functionality constraints: - - - Retrying increases execution success rate but does not guarantee success. - -- Error type constraints: - - Only the error types in Table 1 are supported. - - **Table 1** Supported error types - - | Error Type | Error Code | Remarks | - | :-------------------------------- | :--------- | :----------------------------------------------------------- | - | CONNECTION_RESET_BY_PEER | YY001 | TCP communication error. Print information: “Connection reset by peer” | - | STREAM_CONNECTION_RESET_BY_PEER | YY002 | TCP communication error. Print information: “Stream connection reset by peer” (communication between DNs) | - | LOCK_WAIT_TIMEOUT | YY003 | Lock wait timeout. Print information: “Lock wait timeout” | - | CONNECTION_TIMED_OUT | YY004 | TCP communication error. Print information: “Connection timed out” | - | SET_QUERY_ERROR | YY005 | Failed to deliver the **SET** command. Print information: “Set query error” | - | OUT_OF_LOGICAL_MEMORY | YY006 | Failed to apply for memory. Print information: “Out of logical memory” | - | SCTP_MEMORY_ALLOC | YY007 | SCTP communication error. Print information: “Memory allocate error” | - | SCTP_NO_DATA_IN_BUFFER | YY008 | SCTP communication error. Print information: “SCTP no data in buffer” | - | SCTP_RELEASE_MEMORY_CLOSE | YY009 | SCTP communication error. Print information: “Release memory close” | - | SCTP_TCP_DISCONNECT | YY010 | SCTP and TCP communication error. Print information: “SCTP, TCP disconnect” | - | SCTP_DISCONNECT | YY011 | SCTP communication error. Print information: “SCTP disconnect” | - | SCTP_REMOTE_CLOSE | YY012 | SCTP communication error. Print information: “Stream closed by remote” | - | SCTP_WAIT_POLL_UNKNOW | YY013 | Waiting for an unknown poll. Print information: “SCTP wait poll unknow” | - | SNAPSHOT_INVALID | YY014 | Invalid snapshot. Print information: “Snapshot invalid” | - | ERRCODE_CONNECTION_RECEIVE_WRONG | YY015 | Failed to receive a connection. Print information: “Connection receive wrong” | - | OUT_OF_MEMORY | 53200 | Out of memory. Print information: “Out of memory” | - | CONNECTION_EXCEPTION | 08000 | Failed to communicate with DNs due to connection errors. Print information: “Connection exception” | - | ADMIN_SHUTDOWN | 57P01 | System shutdown by the administrator. Print information: “Admin shutdown” | - | STREAM_REMOTE_CLOSE_SOCKET | XX003 | Remote socket disabled. Print information: “Stream remote close socket” | - | ERRCODE_STREAM_DUPLICATE_QUERY_ID | XX009 | Duplicate query. Print information: “Duplicate query id” | - | ERRCODE_STREAM_CONCURRENT_UPDATE | YY016 | Concurrent stream query and update. Print information: “Stream concurrent update” | - -- Statement type constraints: - - Support single-statement stored procedures, functions, and anonymous blocks. Statements in transaction blocks are not supported. - -- Statement constraints of a stored procedure: - - - If an error occurs during the execution of a stored procedure containing EXCEPTION (including statement block execution and statement execution in EXCEPTION), the stored procedure can be retried. If the error is captured by EXCEPTION, the stored procedure cannot be retried. - - Advanced packages that use global variables are not supported. - - DBE_TASK is not supported. - - PKG_UTIL file operation is not supported. - -- Data import constraints: - - - The **COPY FROM STDIN** statement is not supported. - - The **gsql \copy from** metacommand is not supported. - - Data cannot be imported using **JDBC CopyManager copyIn**. - -## Dependencies - -Valid only if the **gsql** tool works normally and the error list is correctly configured. - -## Related Pages - -[gsql](../../reference-guide/tool-reference/client-tool/gsql/client-tool-gsql.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/7-ultimate-rto.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/7-ultimate-rto.md deleted file mode 100644 index f5c9305a..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/7-ultimate-rto.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Ultimate RTO -summary: Ultimate RTO -author: Guo Huan -date: 2022-05-07 ---- - -# Ultimate RTO - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -- The database host can be quickly restored after being restarted. -- Logs can be synchronized between the primary and standby nodes to accelerate playback on the standby node. - -## Benefits - -When the service load is heavy, the playback speed of the standby node cannot catch up with that of the primary node. After the system runs for a long time, logs are accumulated on the standby node. If a host is faulty, data restoration takes a long time and the database is unavailable, which severely affects system availability. - -The ultimate recovery time object (RTO) is enabled to reduce the data recovery time after a host fault occurs and improve availability. - -## Description - -After the ultimate RTO function is enabled, multi-level pipelines are established for Xlog log playback to improve the concurrency and log playback speed. - -## Enhancements - -None. - -## Constraints - -The ultimate RTO focuses only on whether the RTO of the standby node meets the requirements. The ultimate RTO removes the built-in flow control and uses the **recovery_time_target** parameter for flow control. This feature does not support the read operation on the standby node. If you query the standby node, a core dump may occur on the standby node. - -## Dependencies - -None. - -## Related Pages - -[Log Replay](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/8-cascaded-standby-server.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/8-cascaded-standby-server.md deleted file mode 100644 index ebdffa5e..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/8-cascaded-standby-server.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Cascaded Standby Server -summary: Cascaded Standby Server -author: Guo Huan -date: 2022-05-07 ---- - -# Cascaded Standby Server - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -A cascaded standby server can be connected to a standby server based on the one-primary-multiple-standby architecture. - -## Benefits - -The one-primary-multiple-standby architecture cannot support a flexible structure in special service scenarios. The multi-equipment room deployment cannot meet requirements of the complete structure in the HA switchover scenario \(three instances in the primary-standby equipment rooms and two or three instances in the standby-standby equipment rooms\). If the number of standby servers increases, the primary server may be overloaded. Queries that have low real-time requirements can be implemented on cascaded standby servers. Therefore, the cascading backup capability is required. - -## Description - -The primary server replicates logs to the standby server in synchronous or asynchronous mode. The standby server replicates logs to the cascaded standby server only in asynchronous mode. - -In the current one-primary-multiple-standby architecture, the primary server uses the WAL sender process (walsender) to replicate logs to the standby server. The standby server uses the WAL receiver process (walreceiver) to receive and then flushes logs to local disks. The standby server reads redo logs to complete data replication between the primary and standby servers. There is a one-to-one mapping between walsender and walreceiver on the primary and standby servers. Logs are sent between the standby and cascaded standby servers in asynchronous mode using walsender and walreceiver, reducing the streaming replication pressure on the primary server. - -## Enhancements - -None - -## Constraints - -- A cascaded standby server can only replicate data from a standby server and cannot directly replicate data from the primary server. -- A cascaded standby server does not support data build from a standby server. Currently, data can be built only from the primary server. If the standby server is fully built, the cascaded standby server needs to be fully built. -- The cascaded standby node is in asynchronous replication mode. -- The cascaded standby server cannot be promoted. -- The cascaded standby server cannot be notified. -- Currently, the overall architecture of the primary-standby-cascaded standby cluster cannot be queried. You need to find the standby server through the primary server and then find the cascaded standby server based on the standby server. -- A cascaded standby server cannot own another cascaded standby server. -- When the ultimate RTO is enabled, no cascaded standby server is supported. - -## Dependencies - -None - -## Related Pages - -[Common Primary Backup Deployment Scenarios](../../administrator-guide/database-deployment-scenario/common-primary-backup-deployment-scenarios.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/9-delayed-replay.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/9-delayed-replay.md deleted file mode 100644 index 657bde68..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/9-delayed-replay.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Delayed Replay -summary: Delayed Replay -author: Guo Huan -date: 2022-05-07 ---- - -# Delayed Replay - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The time for a standby node to replay can be delayed. - -## Benefits - -By default, the standby server restores the Xlog records from the primary server as soon as possible. This function allows you to delay the time for a standby node to replay Xlog records. In this case, you can query a copy that records data before a period of time, which helps correct errors such as misoperations. - -## Description - -The GUC parameter **recovery_min_apply_delay** can be used to set the delay time so that a standby server can replay Xlog records from the primary server after a delay time. - -Value range: an integer ranging from 0 to INT_MAX. The unit is ms. - -Default value: **0** (no delay) - -## Enhancements - -None - -## Constraints - -- The **recovery_min_apply_delay** parameter is invalid on the primary node. It must be set on the standby node to be delayed. -- The delay time is calculated based on the timestamp of transaction commit on the primary server and the current time on the standby server. Therefore, ensure that the clocks of the primary and standby servers are the same. -- Operations without transactions are not delayed. -- After the primary/standby switchover, if the original primary server needs to be delayed, you need to manually set this parameter. -- When **synchronous_commit** is set to **remote_apply**, synchronous replication is affected by the delay. Each commit message is returned only after the replay on the standby server is complete. -- Using this feature also delays **hot_standby_feedback**, which may cause the primary server to bloat, so be careful when using both. -- If a DDL operation (such as DROP or TRUNCATE) that holds an AccessExclusive lock is performed on the primary server, the query operation on the operation object on the standby server will be returned only after the lock is released during the delayed replay of the record on the standby server. - -## Dependencies - -None - -## Related Pages - -[Log Replay](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/cm-cluster-management-component-supporting-two-node-deployment.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/cm-cluster-management-component-supporting-two-node-deployment.md deleted file mode 100644 index 40818ed5..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/cm-cluster-management-component-supporting-two-node-deployment.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: CM Supporting Two-Node Deployment -summary: CM Supporting Two-Node Deployment -author: zhang cuiping -date: 2022-11-11 ---- - -# CM Supporting Two-Node Deployment - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -The main goal of this feature is to improve the performance and reliability of CM cluster management components in a distributed environment. Specifically, by improving the DCF distributed consensus framework, the algorithm can be optimized, the network communication overhead can be reduced, and the fault-tolerance mechanism can be added, etc., so as to improve the data consistency guarantee. - -## Benefits - -CM Cluster is a database cluster solution with high availability. By supporting two-node deployment and introducing third-party gateway IPs as arbitration nodes, CM Cluster is able to realize automatic failover and self-arbitration mechanisms to ensure data security and stability. CM Cluster maintains data continuity and availability, both in high load environments and in the event of a failure. - -Overall, CM Cluster provides users with a high-availability database cluster solution by supporting two-node deployment and introducing third-party gateway IPs as arbitration nodes. It ensures data security and stability, reduces users' administrative work, and provides users with reliable database services by automatically switching over and recovering when failures occur. - -## Description - -In the two-node deployment architecture, a third-party gateway IP is introduced as an arbitration node, and the CM cluster management component realizes a self-arbitration mechanism based on majority. When the cluster network state is normal, when any party in the CM master and backup instances initiates a voting election, as long as it is connected to the gateway IP, it will increase the number of votes it already has by 1, so as to achieve the purpose of majority decision-making. This mechanism ensures that under normal network conditions, the master and backup instances in the cluster can conduct elections and reach agreement, ensuring the stability and reliability of the cluster. - -When the cluster network state is abnormal, once any party in the CM master and backup instances finds that it can only communicate with the gateway IP, it is considered that the opposite end may have a failure. The host side will perform the standby offline process, i.e., remove the standby from the cluster to avoid data inconsistency or conflict. The standby side will decide whether to perform automatic failover based on the configuration items, i.e., the standby machine will be switched to the host to ensure data continuity and availability. - -This self-arbitration mechanism can effectively cope with the need for failover under abnormal network conditions. By determining the connectivity with the IP of the third-party gateway, the master and backup instances can quickly respond to network failures and make decisions based on the number of existing votes. Such a mechanism ensures cluster availability and data security, providing users with reliable database services. - -## Related Pages - -[Cluster Management](../../high-available-guide/cluster-management/cluster-management.md), [CM supports two-node deployment](../../high-available-guide/cluster-management/feature-introduction.md),[CM Fault](../../common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-fault.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/ddl-query-of-view.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/ddl-query-of-view.md deleted file mode 100644 index 6f0fca9b..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/ddl-query-of-view.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Query of the Original DDL Statement for a View -summary: Query of the Original DDL Statement for a View -author: zhang cuiping -date: 2023-10-08 ---- - -# Query of the Original DDL Statement for a View - -## Availability - -This feature is available since MogDB 5.0.2. - -## Introduction - -MogDB supports querying the original DDL statement of a view using the **pg_get_ori_viewdef(view_name)** function. Additionally, the DDL statement includes the comment following AS. - -## Benefits - -You can query the original DDL statement of a view. If there are comments following AS, they are also presented in the DDL statement. - -## Description - -MogDB adds the **pg_get_ori_viewdef(view_name)** function to obtain the original DDL statement of a view. The DDL can present the comments following AS. There are two kinds of comment, including "--" and "/* \*/". - -## Example - -1. Create a test table, insert data, and query it. - - ```sql - MogDB=# create table t_auto(a int, b int); - CREATE TABLE - MogDB=# insert into t_auto values(1,2); - INSERT 0 1 - MogDB=# select * from t_auto; - a | b - ---+--- - 1 | 2 - (1 row) - ``` - -2. Create a view with "--" and "/* \*/" comment. - - ```sql - MogDB=# create /* line create*/ --line create - MogDB-# or /* line or */ --line or - MogDB-# replace /* line replace */ --line replace - MogDB-# view /* line view */ --line view - MogDB-# v1 /* line view_name */ --line view_name - MogDB-# as /*line as */ --line as - MogDB$# select /* line select */ --line select - MogDB$# * /* line target */ --line target - MogDB$# from /* line from */ --line from - MogDB$# t_auto /* line rel */ --line rel - MogDB$# ; - CREATE VIEW - ``` - -3. Query the view using pg_get_ori_viewdef(view_name). - - There are two query formats: - - - select pg_get_ori_viewdef ('view_name'): queries the original DDL statement of a view in the current schema. - - - select pg_get_ori_viewdef ('schema.view_name'): queries the original DDL statement of a view in the specified schema. - - ```sql - MogDB=# select pg_get_ori_viewdef ('v1'); - pg_get_ori_viewdef - --------------------------------------------- - CREATE OR REPLACE VIEW public.v1(a, b) AS + - /*line as */ --line as + - select /* line select */ --line select + - * /* line target */ --line target + - from /* line from */ --line from + - t_auto /* line rel */ --line rel + - ; - (1 row) - ``` - -## Related Pages - -[pg_get_ori_viewdef(view_name)](../../reference-guide/functions-and-operators/system-information-functions/system-catalog-information-functions.md#pg_get_ori_viewdef(view_name)) diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/high-availability-based-on-the-paxos-protocol.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/high-availability-based-on-the-paxos-protocol.md deleted file mode 100644 index e2f34624..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/high-availability-based-on-the-paxos-protocol.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: High Availability Based on the Paxos Protocol -summary: High Availability Based on the Paxos Protocol -author: zhang cuiping -date: 2022-10-24 ---- - -# High Availability Based on the Paxos Protocol - -## Availability - -This feature is available since MogDB 2.1.0. - -## Introduction - -After the DCF mode is enabled, database nodes support Paxos-based replication and quorum capabilities. Primary database node selection and log replication are performed based on Paxos. Compression and flow control are supported during the replication to prevent high bandwidth usage. It supports multiple types of nodes based on the Paxos protocol and the node roles can be adjusted as required. You can query the instance status in the current database. - -## Description - -- During log replication, DCF supports log compression before transmission to reduce network bandwidth usage. -- DCF supports passive nodes, which do not participate in node election and only synchronize and replay logs. When this type of nodes is heavily loaded during log synchronization, flow control is performed. -- DCF supports logger nodes, which participate in node election and voting. However, the logger nodes only replicate DCF logs. Xlogs are not replicated and redo is not performed. -- DCF supports online switchover between follower and passive nodes. That is, without interrupting services, a node can switch over between the follower and passive roles. -- DCF supports forcible startup of the minority of nodes. In case that the majority of database instances are faulty, a minority of standby nodes are selected and forcibly promoted to primary, and other normal standby nodes replicate logs from the primary nodes. -- DCF supports primary node selection. When the original primary node is faulty, a new primary node is automatically selected from the remaining standby nodes on the premise that data consistency is ensured. - -## Enhancements - -None. - -## Constraints - -To use this function, you need to enable DCF during installation and deployment. - -## Dependencies - -None. - -## Related Pages - -[DCF](../../high-available-guide/high-available-dcf.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/high-availability.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/high-availability.md deleted file mode 100644 index f0212e45..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-availability/high-availability.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: High Availability -summary: High Availability -author: Guo Huan -date: 2023-05-22 ---- - -# High Availability - -+ **[Primary/Standby](1-primary-standby.md)** -+ **[Logical Replication](2-logical-replication.md)** -+ **[Logical Backup](4-logical-backup.md)** -+ **[Physical Backup](5-physical-backup.md)** -+ **[Automatic Job Retry upon Failure](6-automatic-job-retry-upon-failure.md)** -+ **[Ultimate RTO](7-ultimate-rto.md)** -+ **[High Availability Based on the Paxos Protocol](high-availability-based-on-the-paxos-protocol.md)** -+ **[Cascaded Standby Server](8-cascaded-standby-server.md)** -+ **[Delayed Replay](9-delayed-replay.md)** -+ **[Adding or Deleting a Standby Node](10-adding-or-deleting-a-standby-server.md)** -+ **[Delaying Entering the Maximum Availability Mode](11-delaying-entering-the-maximum-availability-mode.md)** -+ **[Parallel Logical Decoding](12-parallel-logical-decoding.md)** -+ **[DCF](13-dcf.md)** -+ **[CM(Cluster Manager)](14-cm.md)** -+ **[Global SysCache](15-global-syscache.md)** -+ **[Using a Standby Node to Build a Standby Node](16-using-a-standby-node-to-build-a-standby-node.md)** -+ **[Two-City Three-DC DR](17-two-city-three-dc-dr.md)** -+ **[CM Supporting Two-Node Deployment](cm-cluster-management-component-supporting-two-node-deployment.md)** -+ **[Query of the Original DDL Statement for a View](ddl-query-of-view.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/1-cbo-optimizer.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/1-cbo-optimizer.md deleted file mode 100644 index 089cd4b8..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/1-cbo-optimizer.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: CBO Optimizer -summary: CBO Optimizer -author: Guo Huan -date: 2022-05-07 ---- - -# CBO Optimizer - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The MogDB optimizer is cost-based optimization (CBO). - -## Benefits - -The MogDB CBO optimizer can select the most efficient execution plan among multiple plans based on the cost to meet customer service requirements to the maximum extent. - -## Description - -By using CBO, the database calculates the number of tuples and the execution cost for each step under each execution plan based on the number of table tuples, column width, null record ratio, and characteristic values, such as distinct, MCV, and HB values, and certain cost calculation methods. The database then selects the execution plan that takes the lowest cost for the overall execution or for the return of the first tuple. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/10-xlog-no-lock-flush.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/10-xlog-no-lock-flush.md deleted file mode 100644 index bd7cc515..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/10-xlog-no-lock-flush.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Xlog no Lock Flush -summary: Xlog no Lock Flush -author: Guo Huan -date: 2022-05-07 ---- - -# Xlog no Lock Flush - -## Availability - -This feature is available since MogDB 2.0.0. - -## Introduction - -Canceled the WalInsertLock contention and dedicated WalWriter disk write thread. - -## Benefits - -The system performance is further improved on the basis that the original Xlog functions remain unchanged. - -## Description - -This feature optimizes the WalInsertLock mechanism by using log sequence numbers (LSNs) and log record counts (LRCs) to record the copy progress of each backend. The backend can directly copy logs to the WalBuffer without contending for the WalInsertLock. In addition, a dedicated WALWriter thread is used to write logs, and the backend thread does not need to ensure the Xlog flushing. - -## Enhancements - -None. - -## Constraints - -None. - -## Dependencies - -None. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/11-parallel-page-based-redo-for-ustore.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/11-parallel-page-based-redo-for-ustore.md deleted file mode 100644 index 886a45a0..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/11-parallel-page-based-redo-for-ustore.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Parallel Page-based Redo For Ustore -summary: Parallel Page-based Redo For Ustore -author: Guo Huan -date: 2022-05-07 ---- - -# Parallel Page-based Redo For Ustore - -## Availability - -This feature is available since MogDB 2.1.0. - -## Introduction - -Optimized Ustore inplace update WAL write and improved the degree of parallelism for Ustore DML operation replay. - -## Benefits - -The WAL space used by the update operation is reduced, and the degree of parallelism for Ustore DML operation replay is improved. - -## Description - -Prefixes and suffixes are used to reduce the write times of WAL update. Replay threads are classified to solve the problem that most Ustore DML WALs are replayed on multiple pages. In addition, Ustore data pages are replayed based on **blkno**. - -## Enhancements - -None. - -## Constraints - -None. - -## Dependencies - -This feature depends on the Ustore engine. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/12-row-store-execution-to-vectorized-execution.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/12-row-store-execution-to-vectorized-execution.md deleted file mode 100644 index 5a0ae7c6..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/12-row-store-execution-to-vectorized-execution.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Row-Store Execution to Vectorized Execution -summary: Row-Store Execution to Vectorized Execution -author: Guo Huan -date: 2022-05-10 ---- - -# Row-Store Execution to Vectorized Execution - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Row-store table queries are converted into vectorized execution plans for execution, improving the execution performance of complex queries. - -## Benefits - -Effectively improve the query performance of complex queries. - -## Description - -This feature adds a RowToVec operation to scan operators. After the data in the row-store table is converted into the vectorized format in the memory, the upper-layer operators can be converted into vectorized operators. In this way, the vectorized executor can be used for calculation. Scan operators that support this conversion include SeqScan, IndexOnlyscan, IndexScan, BitmapScan, FunctionScan, ValueScan and TidScan. - -## Constraints - -- Vectorization is not supported in the following scenarios: - - The targetList contains a function that returns set. - - The targetList or qual contains expressions that do not support vectorization, such as array expressions, multi-subquery expressions, field expressions, and system catalog columns. - - The following types do not support vectorization: POINTOID, LSEGOID, BOXOID, LINEOID, CIRCLEOID, POLYGONOID, PATHOID, and user-defined types. -- MOTs do not support vectorization. -- The vectorization engine needs to be turned on, i.e. set `enable_vector_engine = on`. - -## Example - -1. Create a table, insert data, and turn on the vectorization engine. - - ```sql - -- Create table, insert test data - create table mogdb_rowtovec_1 (id int, rating int, match text); - - insert into - mogdb_rowtovec_1 - values - ( - generate_series(1, 20000), - floor(random() * 100), - 'match# ' || generate_series(1, 113) - ); - - vacuum analyze mogdb_rowtovec_1; - - set enable_vector_engine = on; - ``` - -2. When `try_vector_engine_strategy = 'optimal'`, the optimizer evaluates the statement in terms of the amount of data to be processed, the expression complexity, and the overhead of row-column transformation, etc., and then automatically selects whether or not to vectoredize the row-store table plan based on the cost. The vectorized plan will be added with the `Vector Adapter / Row Adaptor` operator for row-column transformation. - - ```sql - -- Set vector strategy to optimal - set try_vector_engine_strategy = 'optimal'; - - -- Simple batch processing scenarios with no implementation of the vectorized row inventory schedule - MogDB=# explain (costs off) select id, rating from mogdb_rowtovec_1; - QUERY PLAN - ------------------------------ - Seq Scan on mogdb_rowtovec_1 - (1 row) - - -- Vectorization scenario, selecting the execution of a vectorized bank table plan - MogDB=# explain (costs off) - select - sum(rating), - avg(rating), - sum(rating + 10), - match - from - mogdb_rowtovec_1 - group by - rating, - match; - QUERY PLAN - ------------------------------------------------ - Row Adapter - -> Vector Sonic Hash Aggregate - Group By Key: rating, match - -> Vector Adapter(type: BATCH MODE) - -> Seq Scan on mogdb_rowtovec_1 - (5 rows) - ``` - -3. MogDB also supports Force mode (`try_vector_engine_strategy = 'force'`), in which the optimizer aggressively performs row and column transformations to the extent supported by the column-storage engine while ignoring the effects of cost. - - ```sql - -- Set vector strategy to Force - set try_vector_engine_strategy = 'force'; - - -- Re-execute simple batch processing scenarios - MogDB=# explain (costs off) select id, rating from mogdb_rowtovec_1; -- vectorized - QUERY PLAN - ------------------------------------------ - Row Adapter - -> Vector Adapter(type: BATCH MODE) - -> Seq Scan on mogdb_rowtovec_1 - (3 rows) - ``` - -## Related Pages - -[enable_vector_engine](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_vector_engine) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/2-llvm.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/2-llvm.md deleted file mode 100644 index 0cd7fdb7..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/2-llvm.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: LLVM -summary: LLVM -author: Guo Huan -date: 2022-05-07 ---- - -# LLVM - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -MogDB provides the Low Level Virtual Machine (LLVM) technology to query dynamic compilation execution. - -## Benefits - -The requery performance is greatly improved by dynamically building and executing queries. - -## Description - -Based on the query execution plan tree, with the library functions provided by the LLVM, MogDB moves the process of determining the actual execution path from the executor phase to the execution initialization phase. In this way, problems such as function calling, logic condition branch determination, and a large amount of data reading that are related to the original query execution are avoided, to improve the query performance. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -It depends on the LLVM open-source component. Currently, the open-source version 10.0.0 is used. - -## Related Pages - -[LLVM](../../reference-guide/sql-reference/sql-reference-llvm.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/3-vectorized-engine.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/3-vectorized-engine.md deleted file mode 100644 index 27f88a42..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/3-vectorized-engine.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Vectorized Engine -summary: Vectorized Engine -author: Guo Huan -date: 2022-05-07 ---- - -# Vectorized Engine - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The vectorized execution engine, provided by MogDB, is usually used in OLAP data warehouse systems because analytical systems are usually data-intensive and access most data in a table in a sequential manner, perform calculation, and finally output a calculation result to an end user. - -## Benefits - -Batch calculation greatly improves the performance of complex query. - -## Description - -The traditional database query execution uses the tuple-based pipeline execution mode. In most time, the CPU is not used to actually process data, but to traverse the query operation tree. As a result, the effective utilization of the CPU is not high. This also results in low instruction cache performance and frequent jumps. Worse still, this approach does not take advantage of the new capabilities of the new hardware to speed up the execution of queries. In the execution engine, another solution is to change a tuple to a column at a time. This is also the basis of our vectorized execution engine. - -The vectorized engine is bound to the column-store technology, because data of each column is stored together, and it may be considered that the data is stored in an array manner. Based on such a feature, when a same operation needs to be performed on the column data, calculation of each value of the data block may be efficiently completed by using a cycle. - -The advantages of the vectorized execution engine are as follows: - -- This reduces inter-node scheduling and improves CPU usage. -- Because the same type of data is put together, it is easier to leverage the new optimization features of hardware and compilation. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -It depends on column store. - -## Related Pages - -[Using the Vectorized Executor for Tuning](../../performance-tuning/system-tuning/configuring-vector-engine.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/4-hybrid-row-column-store.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/4-hybrid-row-column-store.md deleted file mode 100644 index 962a56f5..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/4-hybrid-row-column-store.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Hybrid Row-Column Store -summary: Hybrid Row-Column Store -author: Guo Huan -date: 2022-05-07 ---- - -# Hybrid Row-Column Store - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -MogDB supports both row-store and column-store models. Choose a row-store or column-store table as needed. - -Column-store is recommended if a table contains many columns (called a wide table) but its query involves only a few columns. Figure 1 shows the column-store model. Row store is recommended if a table contains only a few columns and a query involves most of the fields. - -**Figure 1** Column-store - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/hybrid-row-column-store-2.png) - -## Benefits - -In a wide table containing a huge amount of data, a query usually only includes certain columns. In this case, the query performance of the row-store engine is poor. For example, a single table containing the data of a meteorological agency has 200 to 800 columns. Among these columns, only 10 are frequently accessed. In this case, a vectorized execution and column-store engine can significantly improve performance by saving storage space. - -## Description - -Tables are categorized into row-store and column-store tables. Each storage model applies to specific scenarios. Select an appropriate model when creating a table. - -- Row-store table - - Row-store tables are created by default. Data is stored by row. Row-store supports adding, deleting, modifying, and querying data of a complete row. Therefore, this storage model applies to scenarios where data needs to be updated frequently. - -- Column-store table - - Data is stored by column. The I/O of data query in a single column is small, and column-store tables occupy less storage space than row-store tables. This storage model applies to scenarios where data is inserted in batches, less updated, and queried for statistical analysis. The performance of single point query and single record insertion in a column-store table is poor. - -- Selecting a storage model - - - Update frequency - - If data is frequently updated, use a row-store table. - - - Data insertion frequency - - If a small amount of data is frequently inserted each time, use a row-store table. If a large amount of data is inserted at a time, use a column-store table. - - - Number of columns - - If a table is to contain many columns, use a column-store table. - - - Number of columns to be queried - - If only a small number of columns (less than 50% of the total) is queried each time, use a column-store table. - - - Compression ratio - - The compression ratio of a column-store table is higher than that of a row-store table. High compression ratio consumes more CPU resources. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/5-adaptive-compression.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/5-adaptive-compression.md deleted file mode 100644 index c3b37503..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/5-adaptive-compression.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Adaptive Compression -summary: Adaptive Compression -author: Guo Huan -date: 2022-05-07 ---- - -# Adaptive Compression - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Data compression is the major technology used in current databases. Various compression algorithms are used for different data types. If pieces of data of the same type have different characteristics, their compression algorithms and results will also be different. Adaptive compression chooses the suitable compression algorithm for data based on the data type and characteristics, achieving high performance in compression ratio, import, and query. - -## Benefits - -Importing and frequently querying a huge amount of data are the main application scenarios. When you import data, adaptive compression greatly reduces the data volume, increases I/O operation efficiency several times, and clusters data before storage, achieving fast data import. In this way, only a small number of I/O operations is required and data is quickly decompressed in a query. Data can be quickly retrieved and the query result is quickly returned. - -## Description - -Currently, the database has implemented various compression algorithms on column store, including RLE, DELTA, BYTEPACK/BITPACK, LZ4, ZLIB, and LOCAL DICTIONARY. The following table lists data types and the compression algorithms suitable for them. - -| - | RLE | DELTA | BITPACK/BYTEPACK | LZ4 | ZLIB | LOCAL DICTIONARY | -| :----------------------------------------------------------- | :--- | :---- | :--------------- | :--- | :--- | :--------------- | -| Smallint/Int/Bigint/Oid
Decimal/Real/Double
Money/Time/Date/Timestamp | √ | √ | √ | √ | √ | - | -| Tinterval/Interval/Time with time zone/ | - | - | - | - | √ | - | -| Numeric/Char/Varchar/Text/Nvarchar2
and other supported data types | √ | √ | √ | √ | √ | √ | - -## Enhancements - -The compression level of compression algorithms can be adjusted. - -## Constraints - -None - -## Dependencies - -It depends on LZ4 or ZLIB. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/7-kunpeng-numa-architecture-optimization.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/7-kunpeng-numa-architecture-optimization.md deleted file mode 100644 index 602782ae..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/7-kunpeng-numa-architecture-optimization.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Kunpeng NUMA Architecture Optimization -summary: Kunpeng NUMA Architecture Optimization -author: Guo Huan -date: 2022-05-07 ---- - -# Kunpeng NUMA Architecture Optimization - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Kunpeng NUMA architecture optimization mainly focuses on Kunpeng processor architecture features and ARMv8 instruction set, and optimizes the system from multiple layers, including OS, software architecture, lock concurrency, logs, atomic operations, and cache access. This greatly improves the MogDB performance on the Kunpeng platform. - -## Benefits - -Transactions per minute (TPM) is a key performance indicator of the database competitiveness. Under the same hardware costs, a higher database performance means the database can process more services, thereby reducing the usage cost of customers. - -## Description - -- MogDB optimizes the Kunpeng NUMA architecture based on the architecture characteristics. This reduces cross-core memory access latency and maximizes multi-core Kunpeng computing capabilities. The key technologies include redo log batch insertion, NUMA distribution of hotspot data, and CLog partitions, greatly improving the TP system performance. -- Based on the ARMv8.1 architecture used by the Kunpeng chip, MogDB uses the LSE instruction set to implement efficient atomic operations, effectively improving the CPU usage, multi-thread synchronization performance, and Xlog write performance. -- Based on the wider L3 cache line provided by the Kunpeng chip, MogDB optimizes hotspot data access, effectively improving the cache access hit ratio, reducing the cache consistency maintenance overhead, and greatly improving the overall data access performance of the system. -- Kunpeng 920, 2P server (64 cores x 2, memory: 768 GB), 10 GE network, I/O: 4 NVMe PCIe SSDs, TPC-C: 1000 warehouses, performance: 1,500,000 tpmC. - -## Enhancements - -- Batch redo log insertion and CLog partition are supported, improving the database performance on the Kunpeng platform. -- Efficient atomic operations using the LSE instruction set are supported, improving multi-thread synchronization performance. - -## Constraints - -None - -## Dependencies - -None \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/8-high-concurrency-of-thread-pools.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/8-high-concurrency-of-thread-pools.md deleted file mode 100644 index 8a2b5303..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/8-high-concurrency-of-thread-pools.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: High Concurrency of Thread Pools -summary: High Concurrency of Thread Pools -author: Guo Huan -date: 2022-05-07 ---- - -# High Concurrency of Thread Pools - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The thread pooling technology is used to support stable running of databases at high concurrency. - -## Benefits - -The overall system throughput is stable in case of a large number of concurrent requests. - -## Description - -The overall design idea of the thread pool technology is to pool thread resources and reuse them among different connections. After the system is started, a fixed number of working threads are started based on the current number of cores or user configuration. A working thread serves one or more connection sessions. In this way, the session and thread are decoupled. The number of worker threads is fixed. Therefore, frequent thread switchover does not occur in case of high concurrency. The database layer schedules and manages sessions. - -## Enhancements - -This feature is available since MogDB 1.1.0. - -In MogDB 1.1.0, thread pools can be dynamically scaled in or out. - -## Constraints - -None - -## Dependencies - -None \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/9-smp-for-parallel-execution.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/9-smp-for-parallel-execution.md deleted file mode 100644 index cbd48966..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/9-smp-for-parallel-execution.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: SMP for Parallel Execution -summary: SMP for Parallel Execution -author: Guo Huan -date: 2022-05-07 ---- - -# SMP for Parallel Execution - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The Symmetric Multi-Processing (SMP) technology of MogDB uses the multi-core CPU architecture of a computer to implement multi-thread parallel computing, fully using CPU resources to improve query performance. - -## Benefits - -Fully utilizes the system multi-core capability to improve requery performance. - -## Description - -In complex query scenarios, a single query takes long time and the system concurrency is low. Therefore, the SMP technology is used to implement operator-level parallel execution, which effectively reduces the query time and improves the query performance and resource utilization. The overall implementation of the SMP technology is as follows: For query operators that can be executed in parallel, data is sliced, multiple working threads are started for computation, and then the results are summarized and returned to the frontend. The data interaction operator **Stream** is added to the SMP architecture to implement data interaction between multiple working threads, ensuring the correctness and integrity of the query. - -## Enhancements - -None. - -## Constraints - -- Index scanning cannot be executed in parallel. -- MergeJoin cannot be executed in parallel. -- WindowAgg order by cannot be executed in parallel. -- The cursor cannot be executed in parallel. -- Queries in stored procedures and functions cannot be executed in parallel. -- Subplans and initplans cannot be queried in parallel, and operators that contain subqueries cannot be executed in parallel, either. -- Query statements that contain the median operation cannot be executed in parallel. -- Queries with global temporary tables cannot be executed in parallel. -- Updating materialized views cannot be executed in parallel. - -## Dependencies - -None. - -## Related Pages - -[Configuring SMP](../../performance-tuning/system-tuning/configuring-smp.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/adaptive-two-phase-aggregation.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/adaptive-two-phase-aggregation.md deleted file mode 100644 index a1878efb..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/adaptive-two-phase-aggregation.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Adaptive Two-phase Hash Aggregation -summary: Adaptive Two-phase Hash Aggregation -author: Guo Huan -date: 2022-05-10 ---- - -# Adaptive Two-phase Hash Aggregation - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -Adaptive two-phase hash aggregation is an optimization technique used to solve the problem of selecting the wrong execution and thus reducing the execution efficiency due to inaccurate cost estimation by the optimizer when performing aggregation operations. - -This optimization technique is dynamically adjusted by collecting statistical information at runtime. Specifically, the system will select the less expensive way of execution based on the cost estimation of one-phase aggregation and two-phase aggregation when executing the aggregation operation. The system then monitors the resource utilization and performance metrics during the actual execution and records them. - -## Benefits - -In scenarios where adaptive two-phase hash aggregation is appropriate, query performance can be significantly improved when this feature is turned on. According to actual test data and user feedback, the specific performance improvement is about 6% to 17%. This performance improvement may fluctuate due to specific queries and data size and other factors, but overall, adaptive two-phase hash aggregation can significantly improve the database query performance. - -Therefore, for query scenarios suitable for adaptive two-phase hash aggregation, turn on the feature can get a significant performance improvement, but also improve the user's query experience and the overall efficiency of the system. - -## Description - -With adaptive two-phase hash aggregation turned on, the optimizer selects the two-phase hash aggregation for the aggregation query by default. - -In the specific implementation, the push-down aggregation operator counts the number of rows in each group of aggregation. When the size of the hash table reaches a certain number, if it is found that most of the groups of the current data have fewer aggregated rows, the system will remove this batch of data from the hash table and upload it to the upper level operator. After a number of consecutive occurrences, the subsequent data will only enter the hash table with a smaller probability to do the aggregation operation, while other data that have not entered the hash table will be directly passed to the upper-layer operator, which is equivalent to performing only one stage of aggregation. - -Through such dynamic adjustment, adaptive two-phase hash aggregation can be based on the actual distribution of data and aggregation needs, autonomous decision-making whether to perform two-phase aggregation, thus avoiding unnecessary resource overhead and improving query performance. - -## Constraints - -- The parallelism switch needs to be turned on, i.e., set the `query_dop` parameter value greater than 1. -- The adaptive two-phase hash aggregation switch needs to be turned on, i.e., set the `enable_adaptive_hashagg` parameter value to `on`. -- This feature does not support vectorized execution engines at this time. -- This feature does not support scenarios such as grouping sets, distinct aggregation, and ordered-set aggregation functions. - -## Example - -1. Create a table and insert data. - - ```sql - CREATE TABLE MogDB_adaptive_hashagg(a int , b int ); - INSERT INTO MogDB_adaptive_hashagg(a, b) select i / 2, i / 2 from generate_series( 1, 1000000) as t(i); - ``` - -2. Collecting statistical information. - - ```sql - ANALYZE MogDB_adaptive_hashagg; - ``` - -3. Turn on the parallel query switch and set the `query_dop` parameter value greater than 1. Also turn on the adaptive two-phase aggregation operator switch. - - ```sql - SET query_dop = 4; - SET enable_adaptive_agg to on; - ``` - -4. View aggregated query details via explain. - - ```sql - EXPLAIN (COSTS off , ANALYZE ) SELECT a, count ( *), sum (b), avg (b) FROM MogDB_adaptive_hashagg group by a; - ``` - - - Setting `adaptive_hashagg_reduce_ratio_threshold` (number of rows after aggregation/number of rows before aggregation) to 0.4 when the actual value is 0.5, so there are Passthrogh rows: 650034 rows that are passed directly to the final aggregation stage, i.e., there are 650034 rows for which only one-phase of aggregation is performed. - - ```sql - SET adaptive_hashagg_reduce_ratio_threshold = 0.4; - EXPLAIN (COSTS off , ANALYZE ) SELECT a, count ( *), sum (b), avg (b) FROM MogDB_adaptive_hashagg group by a; - - QUERY PLAN - --------------------------------------------------------------------------------------------------------------------- - Streaming(type: LOCAL GATHER dop: 1 / 4) (actual time =[ 390.677, 672.311]..[ 390.677, 672.311], rows = 500001) - - > HashAggregate (actual time =[ 368.699, 589.262]..[ 373.300, 627.672], rows = 500001) - Group By Key: a - Max File Num: 48 Min File Num: 48 - - > Streaming(type: LOCAL REDISTRIBUTE dop: 4 / 4) (actual time =[ 0.036, 205.972]..[ 4.657, 243.092], rows = 899897) - - > Adaptive HashAggregate (actual time =[ 7.003, 117.946]..[ 8.701, 150.899], rows = 899897) - Group By Key: a - Passthrough rows: 650034 - - > Seq Scan on mogdb_adaptive_hashagg (actual time =[ 0.007, 19.261]..[ 0.009, 26.093], rows = 1000000) - ``` - - - Set `adaptive_hashagg_reduce_ratio_threshold` (number of rows after aggregation/number of rows before aggregation) to 0.8. Based on the display message Passthrough rows is 0, it can be seen that there is no fallback to one-phase aggregation at this point. - - ```sql - SET adaptive_hashagg_reduce_ratio_threshold = 0.8; - EXPLAIN (COSTS off , ANALYZE ) SELECT a, count ( *), sum (b), avg (b) FROM MogDB_adaptive_hashagg group by a; - - QUERY PLAN - ---------------------------------------------------------------------------------------------------------------------- - Streaming(type: LOCAL GATHER dop: 1 / 4) (actual time =[ 385.932, 678.295]..[ 385.932, 678.295], rows = 500001) - - > HashAggregate (actual time =[ 362.077, 562.587]..[ 369.987, 630.232], rows = 500001) - Group By Key: a - Max File Num: 48 Min File Num: 48 - - > Streaming(type: LOCAL REDISTRIBUTE dop: 4 / 4) (actual time =[ 15.825, 217.394]..[ 23.797, 280.069], rows = 500045) - - > Adaptive HashAggregate (actual time =[ 30.079, 203.190]..[ 34.597, 224.362], rows = 500045) - Group By Key: a - Passthrough rows: 0 - - > Seq Scan on mogdb_adaptive_hashagg (actual time =[ 0.007, 18.349]..[ 0.010, 20.279], rows = 1000000) - ``` - - - Setting to allow disk drops when there is insufficient memory in the pre-aggregation phase, i.e., setting the value of the `adaptive_hashagg_allow_spill` parameter to true, will allow you to see from the Max File Num that a disk drop occurs at this point. - - ```sql - SET adaptive_hashagg_allow_spill = true; - EXPLAIN (COSTS off , ANALYZE ) SELECT a, count ( *), sum (b), avg (b) FROM MogDB_adaptive_hashagg group by a; - - QUERY PLAN - ---------------------------------------------------------------------------------------------------------------------- - Streaming(type: LOCAL GATHER dop: 1 / 4) (actual time =[ 498.529, 773.567]..[ 498.529, 773.567], rows = 500001) - - > HashAggregate (actual time =[ 476.078, 710.897]..[ 486.680, 734.490], rows = 500001) - Group By Key: a - Max File Num: 48 Min File Num: 48 - - > Streaming(type: LOCAL REDISTRIBUTE dop: 4 / 4) (actual time =[ 67.091, 300.522]..[ 78.236, 439.433], rows = 500045) - - > Adaptive HashAggregate (actual time =[ 82.496, 238.676]..[ 89.664, 309.068], rows = 500045) - Group By Key: a - Passthrough rows: 0 - Max File Num: 48 Min File Num: 48 - - > Seq Scan on mogdb_adaptive_hashagg (actual time =[ 0.008, 19.271]..[ 0.017, 19.918], rows = 1000000) - ``` - - - Setting the `adaptive_hashagg_min_rows` parameter for determining if the minimum number of rows suitable for two-phase is 10,000,000, which cannot be degraded to a one-phase aggregation because this value exceeds the actual number of rows in the table by 1,000,000. - - ```sql - SET adaptive_hashagg_min_rows = 10000000; - - RESET adaptive_hashagg_reduce_ratio_threshold; - EXPLAIN (COSTS off , ANALYZE ) SELECT a, count ( *), sum (b), avg (b) FROM MogDB_adaptive_hashagg group by a; - QUERY PLAN - ---------------------------------------------------------------------------------------------------------------------- - Streaming(type: LOCAL GATHER dop: 1 / 4) (actual time =[ 556.257, 843.716]..[ 556.257, 843.716], rows = 500001) - - > HashAggregate (actual time =[ 533.540, 723.769]..[ 540.334, 796.572], rows = 500001) - Group By Key: a - Max File Num: 48 Min File Num: 48 - - > Streaming(type: LOCAL REDISTRIBUTE dop: 4 / 4) (actual time =[ 69.097, 336.018]..[ 75.762, 457.721], rows = 500045) - - > Adaptive HashAggregate (actual time =[ 85.874, 243.790]..[ 88.188, 319.401], rows = 500045) - Group By Key: a - Passthrough rows: 0 - Max File Num: 48 Min File Num: 48 - - > Seq Scan on mogdb_adaptive_hashagg (actual time =[ 0.008, 19.203]..[ 0.011, 19.495], rows = 1000000) - ``` - -## Related Pages - -[query_dop](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#query_dop), [enable_adaptive_hashagg](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_adaptive_hashagg), [adaptive_hashagg_reduce_ratio_threashold](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#adaptive_hashagg_reduce_ratio_threashold), [adaptive_hashagg_min_rows](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#adaptive_hashagg_min_rows), [adaptive_hashagg_allow_spill](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#adaptive_hashagg_allow_spill) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/astore-row-level-compression.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/astore-row-level-compression.md deleted file mode 100644 index 59827723..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/astore-row-level-compression.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: Enhancement of Astore Row-Level Compression -summary: Enhancement of Astore Row-Level Compression -author: Guo Huan -date: 2022-11-10 ---- - -# Enhancement of Astore Row-Level Compression - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -MogDB implements the row-level compression feature of Astore row-store tables, which saves disk space and improves storage efficiency by automatically compressing data in compressed tables, while ensuring data integrity and accessibility. This makes MogDB an efficient database management system for scenarios that store large amounts of data. - -## Benefits - -Astore row-level compression can save organizations a significant amount of storage space, reducing the storage space occupied by the database by 50% to 76%. Such storage space savings are very valuable to enterprises, reducing storage costs, improving storage efficiency, and helping them meet the challenges of explosive data growth. Meanwhile, in peak business scenarios, compressed tables outperform uncompressed tables by 7.3%, which further proves the advantages of compressed tables. In addition to saving storage space, compressed tables can also improve query performance and reduce disk IO operations, thus providing enterprises with higher efficiency and responsiveness. - -## Description - -Astore row-level compression supports the following features: - -- Creation and modification of compression tables; -- Automatic completion of compression or decompression operations during read and write operations on compression tables; -- Support for compression table primary and standby synchronization; -- Support for compression table expired version recovery; -- Supports the automatic completion of compression when data is imported into the compression table; -- Supports displaying the compression of all compression tables of the system through the [GS_COMPRESSION](../../reference-guide/system-catalogs-and-system-views/system-views/GS_COMPRESSION.md) view; -- Compatible with first-level partition table, including all functions of moving, migrating, merging, updating, splitting, adding, deleting, truncating partitions; -- Compatible with secondary partitioned tables. The operations related to secondary sub-partitions include adding, deleting, splitting, truncating partitions; -- Support the creation of compressed tables in segmented page mode, and segmented page compressed table additions, deletions, modifications, checks and other features can be executed normally; -- Compressed tables are compatible with the tools already released by MogDB; -- add compress page process in autovacuum thread to reduce the number of disk IO times, locking times, so as to reduce the background compression overhead; - -At the same time, Astore row-level compression optimizes the relevant compression algorithms; table compression effect is transparent to the user. - -## Constrains - -- Only works for Astore rowstore tables, not for Ustore rowstore tables, columnstore tables, and MOTs; -- Creates uncompressed tables by default; -- Cannot specify compression attributes for system tables; -- Cannot specify compression attributes for table tables; -- The tablespace compression attribute is not supported; -- Partition compression tables. compression is performed only if the amount of data in a single partition is greater than 128MB; - -- Ordinary vacuum command, will not execute compression; vacuum full command will execute compression; -- The space saved by the background compression will not be immediately reacted to the space occupied by the disk, the subsequent data insertion will reuse the space saved by the compression; -- Version 3.0 segment-page compression tables are not supported for upgrading to version 5.0. If there is a segmented compressed table in version 3.0, before upgrading, please import the data in the segmented compressed table to a non-compressed table for backup, then delete the segmented compressed table, re-create the segmented compressed table after upgrading, and import the backup data to the newly created segmented compressed table; - -## Example - -1. Create compressed and uncompressed tables. - - ```sql - MogDB=# CREATE TABLE tb_mogdb_compress (id INT, name TEXT, addr TEXT, info TEXT) WITH (compression = yes); - CREATE TABLE - MogDB=# CREATE TABLE tb_mogdb_no_compress (id INT, name TEXT, addr TEXT, info TEXT); - CREATE TABLE - ``` - -2. Insert random data. - - ```sql - MogDB=# INSERT INTO tb_mogdb_compress VALUES (generate_series(0, 1999999), 'fasdfasdhigasidfdfhgioashdfgohaosdgh', 'fasdfasdfasdahasdhsfsdgstyjdth', 'fasdhgsoidfhisdifgiosdfiogio'); - INSERT 0 2000000 - MogDB=# INSERT INTO tb_mogdb_no_compress VALUES (generate_series(0, 1999999), 'fasdfasdhigasidfdfhgioashdfgohaosdgh', 'fasdfasdfasdahasdhsfsdgstyjdth', 'fasdhgsoidfhisdifgiosdfiogio'); - INSERT 0 2000000 - ``` - -3. Executing vacuum full immediately triggers the compression command. - - ```sql - MogDB=# vacuum full tb_mogdb_compress; - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**Note**: Generally in practice, you can set the time interval between two background compression operations to trigger compression by the parameter `autocmpr_naptime`. For details, please refer to [autocmpr_naptime](../../reference-guide/guc-parameters/backend-compression.md#autocmpr_naptime). - -4. View the size occupied by compressed and uncompressed tables. - - ```sql - MogDB=# \d+ - List of relations - Schema | Name | Type | Owner | Size | Storage | Description - --------+----------------------+-------+--------+--------+-----------------------------------+------------- - public | tb_mogdb_compress | table | yaojun | 105 MB | {orientation=row,compression=yes} | - public | tb_mogdb_no_compress | table | yaojun | 256 MB | {orientation=row,compression=no} | - (2 rows) - ``` - -## Related Pages - -[CREATE TABLE](../../reference-guide/sql-syntax/CREATE-TABLE.md), [ALTER TABLE](../../reference-guide/sql-syntax/ALTER-TABLE.md), [VACUUM](../../reference-guide/sql-syntax/VACUUM.md), [COPY](../../reference-guide/sql-syntax/COPY.md), [GS_COMPRESSION](../../reference-guide/system-catalogs-and-system-views/system-views/GS_COMPRESSION.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/btree-index-compression.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/btree-index-compression.md deleted file mode 100644 index ffebbc26..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/btree-index-compression.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -title: BTree Index Compression -summary: BTree Index Compression -author: zhang cuiping -date: 2022-11-10 ---- - -# BTree Index Compression - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -This feature supports compression of index data. Compressed index data is loaded to memory, reducing memory usage and helping load more index data at a time. - -![image](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/btree-index-compression-1.png) - -## Benefits - -The use of BTree index storage structure and memory compression can provide many advantages to meet system business requirements for efficient data queries.The following are a few of the key benefits of this feature: - -- Improved data query speed: BTree indexes are an efficient index structure that provides fast data lookup and access. By compressing the index data, the size of the index data can be reduced, thus speeding up the query. Compressed index data can be loaded into memory faster and reduce the cost of memory access. - -- Reduced storage footprint: Indexed data usually takes up a lot of storage space. By compressing index data, you can reduce the storage footprint. A smaller index data size also means fewer disk IO operations and less network transfer overhead. - -- Improve overall system performance: By reducing memory footprint and disk IO operations, the BTree index compression feature can significantly improve overall system performance. The increase in data query speed and storage space saving can speed up the response time of the system and improve the user experience. In addition, the reduction of IO operations can also reduce the load of the system and improve the stability and reliability of the system. - -In conclusion, the use of BTree index storage structure and memory compression can optimize the storage and query efficiency of indexed data, improve data query speed, reduce storage space occupation, and improve the overall performance of the system. This is very useful for meeting the demand of system business for efficient data query. - -## Description - -The use of BTree index storage structure and compression of index data stored in memory can further improve query efficiency. - -When there is a large amount of duplicate data in one or more columns, the creation of the index and the de-compression of the index data are especially important. By de-compressing the index data, you can reduce the size of the index data, save storage space, and load the index data into memory faster. - -At the same time, this feature also supports the de-compression of non-unique index data. For non-unique indexes, de-compression can further reduce the storage space occupied and speed up the query. For unique indexes, although there is no compression effect, you can still enjoy other advantages, such as fast data lookup and access. - -In summary, this feature can accelerate data query efficiency through the BTree index storage structure and compression of index data stored in memory, and is particularly applicable to the existence of a large number of duplicate data index creation and index data de-compression. Both non-unique and unique indexes can benefit from this feature. - -## Constraints - -- The include index is not supported. -- The system table index is not supported. -- Only the BTree index is supported. - -## Example - -1. Create a table. - - ```sql - -- Create a table - CREATE TABLE mogdb_index_compression_table_01 - ( - seq int, - id int, - a text, - b text, - c timestamp - ); - ``` - -2. Creates an index, specifying the compression parameter. - - ```sql - -- Create a btree compressed index, no compression parameter is specified by default, and the index compression attribute is turned off. - CREATE INDEX idx_single_col_01 ON mogdb_index_compression_table_01(a) WITH (compression =yes); - CREATE INDEX idx_multi_col_01 ON mogdb_index_compression_table_01(id,a) WITH (compression =yes); - - -- The same data table, the same index attribute columns, create a btree index, the compression attribute is in default off state, used to compare the effect of compression space reduction - CREATE INDEX idx_single_col_02 ON mogdb_index_compression_table_01(a); - CREATE INDEX idx_multi_col_02 ON mogdb_index_compression_table_01(id,a); - - -- catalog system table to view index compression switch information - MogDB =# SELECT * FROM pg_indexes WHERE tablename = 'mogdb_index_compression_table_01'; - schemaname | tablename | indexname | tablespace | indexdef - ------------+----------------------------------+-------------------+------------+----------------------------------------------------------------------------------------------------------------------- - ------------- - public | mogdb_index_compression_table_01 | idx_single_col_01 | | CREATE INDEX idx_single_col_01 ON mogdb_index_compression_table_01 USING btree (a) WITH (compression =yes) TABLESPACE p - g_default - public | mogdb_index_compression_table_01 | idx_multi_col_01 | | CREATE INDEX idx_multi_col_01 ON mogdb_index_compression_table_01 USING btree (id, a) WITH (compression =yes) TABLESPAC - E pg_default - public | mogdb_index_compression_table_01 | idx_single_col_02 | | CREATE INDEX idx_single_col_02 ON mogdb_index_compression_table_01 USING btree (a) TABLESPACE pg_default - public | mogdb_index_compression_table_01 | idx_multi_col_02 | | CREATE INDEX idx_multi_col_02 ON mogdb_index_compression_table_01 USING btree (id, a) TABLESPACE pg_default - ( 4 rows) - MogDB =# SELECT relname,reloptions FROM pg_class WHERE relname = 'idx_single_col_01'; - relname | reloptions - -------------------+------------------- - idx_single_col_01 | {compression =yes} - ( 1 row) - MogDB =# SELECT relname,reloptions FROM pg_class WHERE relname = 'idx_multi_col_01'; - relname | reloptions - ------------------+------------------- - idx_multi_col_01 | {compression =yes} - ( 1 row) - ``` - -3. Construct random data to insert into the data table. - - ```sql - -- Insert data - INSERT INTO mogdb_index_compression_table_01 - SELECT seqno, - seqno % 20, - md5((random() * 20):: INTEGER::text), - md5(random()::text), - tt - FROM generate_series( 1, 1000) AS seqno, - generate_series(now(),now() + '1 week', '1 day') AS tt; - - -- Viewing inserted data in a data table - MogDB =# SELECT * FROM mogdb_index_compression_table_01 LIMIT 15; - seq | id | a | b | c - -----+----+----------------------------------+----------------------------------+---------------------------- - 1 | 1 | c4ca4238a0b923820dcc509a6f75849b | d5e055cf4a50c7f2790bacae0685108c | 2023 -05 -25 16: 12: 46.776094 - 1 | 1 | 6512bd43d9caa6e02c990b0a82652dca | 59124424a2bb70cba5cd9495912c6f3f | 2023 -05 -26 16: 12: 46.776094 - 1 | 1 | 1679091c5a880faf6fb5e6087eb1b2dc | 8b49e1ccec21c9fc2bfb08dfbea85995 | 2023 -05 -27 16: 12: 46.776094 - 1 | 1 | 1679091c5a880faf6fb5e6087eb1b2dc | 0b1660223cd7777ef587b535b913ce5a | 2023 -05 -28 16: 12: 46.776094 - 1 | 1 | 45c48cce2e2d7fbdea1afc51c7c6ad26 | 1b9abe418c57bdcee01ee66415cf07de | 2023 -05 -29 16: 12: 46.776094 - 1 | 1 | c4ca4238a0b923820dcc509a6f75849b | d60b4db2e8390bc96887f944be6af3e8 | 2023 -05 -30 16: 12: 46.776094 - 1 | 1 | c20ad4d76fe97759aa27a0c99bff6710 | 7e1d2447ed0f40c4d77f163c925361b1 | 2023 -05 -31 16: 12: 46.776094 - 1 | 1 | cfcd208495d565ef66e7dff9f98764da | 1c4c7d26e8e04643051da09f32425ce9 | 2023 -06 -01 16: 12: 46.776094 - 2 | 2 | c51ce410c124a10e0db5e4b97fc2af39 | 7c330d7e0940db58c56b6c2a48618d91 | 2023 -05 -25 16: 12: 46.776094 - 2 | 2 | 6f4922f45568161a8cdf4ad2299f6d23 | 5cba36cfee55985bef28e77cd95be9f6 | 2023 -05 -26 16: 12: 46.776094 - 2 | 2 | d3d9446802a44259755d38e6d163e820 | 6e94dde54130dedd4166adde26a47ff7 | 2023 -05 -27 16: 12: 46.776094 - 2 | 2 | a87ff679a2f3e71d9181a67b7542122c | f834dd99d9e24eb2f8fde50560f73679 | 2023 -05 -28 16: 12: 46.776094 - 2 | 2 | c4ca4238a0b923820dcc509a6f75849b | d3ebea8e5ee091eba9fbb458bee90d15 | 2023 -05 -29 16: 12: 46.776094 - 2 | 2 | 8f14e45fceea167a5a36dedd4bea2543 | 681df81facbf975107287365d7c2b568 | 2023 -05 -30 16: 12: 46.776094 - 2 | 2 | aab3238922bcc25a6f606eb525ffdc56 | c5b256a5aa07c723df488a432147b39e | 2023 -05 -31 16: 12: 46.776094 - ( 15 rows) - ``` - -4. View compressed and uncompressed index file sizes. - - ```sql - -- View the data file size and index file size, due to random data, file size may be different from the example, mainly focus on non-compressed and compressed scenarios under the index file size changes - -- 1. View data file size - MogDB =# SELECT pg_size_pretty(pg_relation_size( 'mogdb_index_compression_table_01')); - pg_size_pretty - ---------------- - 920 kB - ( 1 row) - - -- 2.View single-column indexed uncompressed scenario file sizes - MogDB =# SELECT pg_size_pretty(pg_relation_size( 'idx_single_col_02')); - pg_size_pretty - ---------------- - 504 kB - ( 1 row) - - -- 3.View Single-Column Index Compression Scenario File Size - MogDB =# SELECT pg_size_pretty(pg_relation_size( 'idx_single_col_01')); - pg_size_pretty - ---------------- - 88 kB - ( 1 row) - - -- 4.View Uncompressed Scenario File Size for Multi-Column Combined Indexes - MogDB =# SELECT pg_size_pretty(pg_relation_size( 'idx_multi_col_02')); - pg_size_pretty - ---------------- - 600 kB - ( 1 row) - - -- 5.View Multi-column Combined Index Compression Scenario file size - MogDB =# SELECT pg_size_pretty(pg_relation_size( 'idx_multi_col_01')); - pg_size_pretty - ---------------- - 120 kB - ( 1 row) - - -- Directly view comparisons by other means - MogDB =# \d + - List of relations - Schema | Name | Type | Owner | Size | Storage | Description - --------+----------------------------------+-------+----------+--------+----------------------------------+------------- - public | mogdb_index_compression_table_01 | table | xyhmogdb | 952 kB | {orientation = row,compression = no} | - ( 1 row) - MogDB =# \di + - List of relations - Schema | Name | Type | Owner | Table | Size | Storage | Description - --------+-------------------+-------+----------+----------------------------------+--------+-------------------+------------- - public | idx_multi_col_01 | index | xyhmogdb | mogdb_index_compression_table_01 | 120 kB | {compression =yes} | - public | idx_multi_col_02 | index | xyhmogdb | mogdb_index_compression_table_01 | 600 kB | | - public | idx_single_col_01 | index | xyhmogdb | mogdb_index_compression_table_01 | 88 kB | {compression =yes} | - public | idx_single_col_02 | index | xyhmogdb | mogdb_index_compression_table_01 | 504 kB | | - ( 4 rows) - ``` - -## Related Pages - -[CREATE INDEX](../../reference-guide/sql-syntax/CREATE-INDEX.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/enhancement-of-tracing-backend-key-thread.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/enhancement-of-tracing-backend-key-thread.md deleted file mode 100644 index e9b8ad17..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/enhancement-of-tracing-backend-key-thread.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Trace Observation Enhancement -summary: Trace Observation Enhancement -author: Guo Huan -date: 2022-11-14 ---- - -# Trace Observation Enhancement - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -MogDB version 5.0 provides the trace function for the three key thread modules walwriter, pagewriter and checkpointer. By turning on the trace feature, the system generates detailed log messages during the runtime of the background modules. These logs record key information such as function call stacks, parameter values and return values. Debuggers can analyze and troubleshoot based on these log messages to understand the specifics of thread operation. - -## Benefits - -The trace feature in MogDB version 5.0 provides debuggers with a powerful tool to trace and observe the operation of background threads. By generating detailed log information, debuggers can analyze and troubleshoot thread anomalies to solve problems. At the same time, according to specific debugging needs, debuggers can choose to trace the operation of the entire module or specify the tracing of specific functions in the module to achieve more accurate debugging results. - -## Description - -Three trace modules, walwriter, pagewriter, and checkpointer are added. The trace function allows users to use `gstrace start` to trace the three modules and trace one or more functions related to the background thread specified in advance. Some data probes are included in these functions to help later observation and diagnosis. - -By enabling, exporting, and disabling gstrace, users can use gstrace to trace specified modules during the period from the time when the gstrace is enabled to the time when data is exported. - -When commissioners find that any thread module is running abnormally, they can specify gstrace to trace the thread module and some functions in the thread module. With `gstrace dump`, the execution path (displayed in stack) of the function that is traced can be obtained in the specified time period and key data or data structures in functions can be traced. This method can help obtain effective tracing results, quickly locate the fault, and improve the diagnosis efficiency. - -## Related Pages - -[gstrace](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gstrace.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/enhancement-of-wal-redo-performance.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/enhancement-of-wal-redo-performance.md deleted file mode 100644 index b21faf28..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/enhancement-of-wal-redo-performance.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Enhancement of WAL Redo Performance -summary: Enhancement of WAL Redo Performance -author: zhang cuiping -date: 2023-10-07 ---- - -# Enhancement of WAL Redo Performance - -## Availability - -This feature is available since MogDB 5.0.2. - -## Introduction - -MogDB has the WAL redo performance enhanced. - -## Benefits - -In primary/standby deployment scenarios, the WAL redo performance can be improved. In the TPCC scenario where there are 1000 warehouses, over 100 concurrent requests, one primary and one standby database servers, the WAL redo performance is promoted by 50%, and the the RTO time is shortened by 1/3. - -## Description - -In the primary/standby deployment, the standby database obtains WALs from the primary database, and the WALs redo is performed to finish data synchronization between the primary and standby databases. When the primary database fails to provide services, the standby database can take over the services of the primary database. During this process, the standby database performs redo operations on all WAL logs sent by the primary database before being promoted to the primary database and providing services. - -The redo performance of the standby database is not optimal, resulting in a long failover time or a long switchover operation during an primary/standby switchover drill. On the one hand, the database cannot provide services for a long time, resulting in user services being stopped for a long time. On the other hand, the data in the standby database will be delayed for a longer time than that in the primary database, causing WAL files in the standby database to accumulate and therefore occupy disk space. - -MogDB provides a parallel redo mechanism that allows multiple threads to work simultaneously during redo. This feature optimizes table-level parallel redo and provides redo performance views to query the redo status. Specific optimization points are as follows: - -- Increase the number of batches of WAL logs handed over by starup threads to reduce performance degradation caused by WAL record flow. -- Modify the table distribution policy to distribute redo tasks more evenly across worker threads. -- Observation view: You can view the time spent and WAL redo status in each stage of the redo process. - -## Related Pages - -**Parameters**: - -| No. | Parameter Description | -| ---- | ------------------------------------------------------------ | -| 1 | [enable_batch_dispatch](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#enable_batch_dispatch): specifies whether to enable "batch optimization + load balancing optimization". | -| 2 | [enable_time_report](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#enable_time_report): specifies whether to count information required by **redo_time_detail()**. | -| 3 | [parallel_recovery_batch](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#parallel_recovery_batch): specifies the quantity of WALs temporarily stored in startup threads in page-level concurrent recovery. | -| 4 | [parallel_recovery_timeout](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#parallel_recovery_timeout): specifies the time period for which WAL records temporarily stored in startup are distributed if there are no WALs distributed in page-level concurrent recovery. | -| 5 | [parallel_recovery_dispatch_algorithm](../../reference-guide/guc-parameters/write-ahead-log/log-replay.md#parallel_recovery_dispatch_algorithm): specifies the startup thread distribution algorithm in page-level concurrent recovery. | - -**Functions**: - -| No. | Function Description | -| ---- | ------------------------------------------------------------ | -| 1 | [redo_stat_detail()](../../reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-3.md#redo_stat_detail()): queries the speed at which a standby database receives, flushes, or applies WALs in recent time period. It intuitively presents the WAL processing capabilities of the standby database. | -| 2 | [redo_time_detail()](../../reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-3.md#redo_time_detail()): provides data to analyze redo problem. | -| 3 | [dispatch_stat_detail()](../../reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions-3.md#dispatch_stat_detail()): queries redo loads of each worker thread to judge whether the load of each worker thread is in balanced. | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/high-performance.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/high-performance.md deleted file mode 100644 index 62b86049..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/high-performance.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: High Performance -summary: High Performance -author: Guo Huan -date: 2023-05-22 ---- - -# High Performance - -+ **[CBO Optimizer](1-cbo-optimizer.md)** -+ **[LLVM](2-llvm.md)** -+ **[Vectorized Engine](3-vectorized-engine.md)** -+ **[Hybrid Row-Column Store](4-hybrid-row-column-store.md)** -+ **[Adaptive Compression](5-adaptive-compression.md)** -+ **[SQL Bypass](sql-bypass.md)** -+ **[Kunpeng NUMA Architecture Optimization](7-kunpeng-numa-architecture-optimization.md)** -+ **[High Concurrency of Thread Pools](8-high-concurrency-of-thread-pools.md)** -+ **[SMP for Parallel Execution](9-smp-for-parallel-execution.md)** -+ **[Xlog no Lock Flush](10-xlog-no-lock-flush.md)** -+ **[Parallel Page-based Redo For Ustore](11-parallel-page-based-redo-for-ustore.md)** -+ **[Row-Store Execution to Vectorized Execution](12-row-store-execution-to-vectorized-execution.md)** -+ **[Enhancement of Astore Row-Level Compression](astore-row-level-compression.md)** -+ **[BTree Index Compression](btree-index-compression.md)** -+ **[SQL Tracing](tracing-SQL-function.md)** -+ **[Parallel Index Scanning](parallel-index-scan.md)** -+ **[Trace Observation Enhancement](enhancement-of-tracing-backend-key-thread.md)** -+ **[Sorting Operator Optimization](ordering-operator-optimization.md)** -+ **[OCK-accelerated Data Transmission](ock-accelerated-data-transmission.md)** -+ **[OCK SCRLock Accelerate Distributed Lock](ock-scrlock-accelerate-distributed-lock.md)** -+ **[Enhancement of WAL Redo Performance](enhancement-of-wal-redo-performance.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/ock-accelerated-data-transmission.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/ock-accelerated-data-transmission.md deleted file mode 100644 index cb681249..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/ock-accelerated-data-transmission.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: OCK-accelerated Data Transmission -summary: OCK-accelerated Data Transmission -author: Guo Huan -date: 2023-04-04 ---- - -# OCK-accelerated Data Transmission - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -RDMA is used to transmit data and messages between nodes, improving the read consistency on standby nodes. - -## Benefits - -As the data scale and nodes increase, network data transmission between nodes takes a long time, affecting end-to-end database user experience. RDMA can significantly reduce network latency and improve read consistency on standby nodes. - -## Description - -OCK-accelerated data transmission is a lightweight RPC framework implemented based on high-performance RDMA networks. It is used to replace the original TCP/IP message transmission module and transmit data and various messages between nodes, building μs-level competitiveness, significantly reducing CPU resource overhead and network latency, and improving the read consistency on standby nodes. - -## Enhancements - -None - -## Constraints - -The database server must use the CX5 NIC. - -## Dependencies - -Primary/Standby shared storage feature. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/ock-scrlock-accelerate-distributed-lock.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/ock-scrlock-accelerate-distributed-lock.md deleted file mode 100644 index 4441e9cf..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/ock-scrlock-accelerate-distributed-lock.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: OCK SCRLock Accelerate Distributed Lock -summary: OCK SCRLock Accelerate Distributed Lock -author: Guo Huan -date: 2023-04-04 ---- - -# OCK SCRLock Accelerate Distributed Lock - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -Use SCRLock to provide distributed locking capabilities and improve distributed locking performance. - -## Benefits - -With the increase in data size and data nodes, acquiring distributed locks between nodes requires consuming many events, which affects the client-to-end database experience. Using the SCRLock feature, we can significantly reduce the distributed locking remote latency and provide fairness to prevent the phenomenon of node starvation. - -## Description - -MogDB resource pooling uses a passive mode distributed lock based on the TCP protocol, which has the disadvantages of high network latency and complex locking process, which affects the performance of the business to acquire locks. SCRLock provides an active mode distributed lock based on the RDMA protocol, which accelerates the network through RDMA to improve the efficiency of acquiring locks, and the active mode also brings the natural advantage of a simple process; SCRLock provides lock fairness, which prevents the phenomenon of starvation in the cluster; SCRLock also provides the ability to locally cache locks, reducing the frequency of lock acquisition at the remote end, thus reducing latency. SCRLock also provides local lock caching capability to reduce the frequency of remote lock acquisition, thus reducing latency. - -## Enhancements - -None - -## Constraints - -The database server must use the CX5 NIC. - -## Dependencies - -Primary/Standby shared storage feature. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/ordering-operator-optimization.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/ordering-operator-optimization.md deleted file mode 100644 index 6c2d45c9..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/ordering-operator-optimization.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: Sorting Operator Optimization -summary: Sorting Operator Optimization -author: zhang cuiping -date: 2022-11-10 ---- - -# Sorting Operator Optimization - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -This feature improves the performance of the sorting operator in several ways. First, it is optimized for sorting individual columns, using more efficient algorithms and data structures that reduce the computational and storage resources required for sorting. Second, a specialized fast sort function is introduced, which is a commonly used and efficient sorting algorithm capable of achieving high sorting performance in the average case. In addition, incremental sorting is supported, which can utilize a portion of the data that has already been sorted and quickly insert it into the sorted dataset when new data arrives. - -## Benefits - -Compared to the traditional way of comparing function calls, the fast sort function can reduce overhead and thus improve performance. Typically, a performance improvement of 2% to 5% can be obtained by using the quick sort function. - -The incremental sort method utilizes the ordered arrangement of indexes to incrementally sort other fields, which can reduce the number of sorted fields and thus improve performance. Depending on the specific situation, this method can bring 10 to 100 times performance improvement. - -Both optimization methods can be used in query and sort operations to improve performance and reduce overhead. The specific performance improvement will be affected by the amount of data, index design, query conditions, and other factors. Overall, however, these optimization methods are designed to improve the performance of query and sort operations so that your business can process data more efficiently. - -## Description - -This feature optimizes the sorting of individual columns and reduces overhead by saving only one Datum structure, avoiding the operation of copying the tuple into sort memory. - -When sorting with Quick Sort in MogDB, each data type has its own comparison function. To avoid the overhead of calling the compare function multiple times, this feature introduces a new set of fast sort functions. The comparison functions of these functions are inlined, which improves performance by eliminating the overhead required by a large number of comparison function calls. - -In addition, this feature introduces an incremental sorting method that utilizes the ordered arrangement of indexes on which other fields are incrementally sorted to reduce the number of sorted fields, thereby improving performance. - -All of these optimizations are very helpful in improving the performance of the sort operation, reducing the overhead and making the sort more efficient and faster. - -## Constraints - -This feature supports the following sorting modes except ORDER BY LIMIT: integer, date, timestamp, uuid, text, varchar, and char. - -## Example - -1. Create a table, insert data, and turn on incremental sorting. - - ```sql - -- Create a table and insert test data - drop table if exists MogDB_incresort_1; - create table MogDB_incresort_1 (id int, pname name, match text); - - create index on MogDB_incresort_1(id); - - insert into MogDB_incresort_1 - values ( - generate_series(1, 20000), - 'player# ' || generate_series(1, 20000), - 'match# ' || generate_series(1, 11) - ); - - vacuum analyze MogDB_incresort_1; - - -- Turn on incremental sorting - set enable_incremental_sort = on; - ``` - -2. Incremental sorting operators are typically generated in partially ordered scenarios with a LIMIT, such as the Index Scan + LIMIT scenario. - - ```sql - -- Incremental sorting using index scanning - MogDB=# explain (costs off) select id, pname from MogDB_incresort_1 where id < 20 order by id, pname limit 20; - QUERY PLAN - ---------------------------------------------------------------------------- - Limit - -> Incremental Sort - Sort Key: id, pname - Presorted Key: id - -> Index Scan using mogdb_incresort_1_id_idx on mogdb_incresort_1 - Index Cond: (id < 20) - (6 rows) - ``` - -3. Replacing the index scan of step 2 with an ordered subquery also allows for incremental sorting. - - ```sql - -- Incremental sorting using ordered subqueries - MogDB=# explain (costs off) - select players.pname, - random() as lottery_number - from ( - select distinct pname - from MogDB_incresort_1 - group by pname - order by pname - ) as players - order by players.pname, - lottery_number - limit 20; - QUERY PLAN - ----------------------------------------------------------------------- - Limit - -> Incremental Sort - Sort Key: players.pname, (random()) - Presorted Key: players.pname - -> Subquery Scan on players - -> Unique - -> Sort - Sort Key: mogdb_incresort_1.pname - -> HashAggregate - Group By Key: mogdb_incresort_1.pname - -> Seq Scan on mogdb_incresort_1 - (11 rows) - ``` - -## Related Pages - -[enable_incremental_sort](../../reference-guide/guc-parameters/query-planning/optimizer-method-configuration.md#enable_incremental_sort) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/parallel-index-scan.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/parallel-index-scan.md deleted file mode 100644 index 42352d63..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/parallel-index-scan.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Parallel Index Scanning -summary: Parallel Index Scanning -author: zhang cuiping -date: 2022-11-14 ---- - -# Parallel Index Scanning - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -Parallel index scanning is a database query feature that can dramatically improve data access speed. By breaking the query task into multiple parallel subtasks and scanning multiple indexes at the same time, you can avoid scanning all data files, improve query efficiency, and speed up data access. - -## Benefits - -Using the parallel index scanning feature can be expected to yield performance gains of around 25% or so, while for overall execution, performance gains of around 10% or so can be expected. - -## Description - -This feature supports parallel index scanning for ordinary and partitioned tables, which mainly contains three ways: indexscan, indexonlyscan and bitmapscan. - -- indexscan: When performing range query on a data table, you can locate the start and end positions through the index, scan the indexed data first, and then perform parallel scanning on the data file according to the scanning result to improve the query efficiency. - -- indexonlyscan: in table scanning, when the target columns are contained in the index, only the index data can be scanned, reducing the number of files to be scanned, and at the same time, according to the constraints to determine the scope of the index data can be scanned in parallel. - -- bitmapscan: When performing a table scan, the index scan is divided into two phases. First, scan the index files in parallel to get all the datafile pages to be scanned; then scan the datafile pages in parallel, which can reduce the number of random accesses and reads of the datafiles. - -## Enhancements - -None - -## Constraints - -- This feature requires enabling of the parallel switch (setting `query_dop` to a value greater than 1. -- This feature supports BTree index and does not support Ustore and Cstore storage. - -## Example - -Partitioned table indexes are divided into LOCAL indexes and GLOBAL indexes, a LOCAL index corresponds to a specific partition, while a GLOBAL index corresponds to the entire partitioned table. The following is an example of parallel index scanning for partitioned tables. - -- GLOBAL index - - ```sql - -- 1. Create a partitioned table and insert data. - CREATE TABLE parallel_partition_index_01 - ( - c1 int, - c2 int, - c3 int - ) - PARTITION BY RANGE(c1) - ( - PARTITION P1 VALUES LESS THAN(2000), - PARTITION P2 VALUES LESS THAN(4000), - PARTITION P3 VALUES LESS THAN(6000), - PARTITION P4 VALUES LESS THAN(MAXVALUE) - )enable row movement; - INSERT INTO parallel_partition_index_01 VALUES (generate_series(1, 10000), generate_series(1,10000), generate_series(1, 10000)); - - -- 2. Create an index. - CREATE INDEX index_parallel_partition_index_01 on parallel_partition_index_01(c1) GLOBAL; - - -- 3. Unable other scan types such as seqscan, bitmapscan, indexonlyscan, etc. - SET enable_seqscan = OFF; - SET enable_bitmapscan = OFF; - SET enable_indexonlyscan = OFF; - - -- 4. Enable parallelism to set the synchronization cost (smp_thread_cost) for communication between threads. - -- Note: A low value for the smp_thread_cost parameter can prompt the optimizer to prefer parallelism. - SET query_dop = 2; - SET smp_thread_cost = 0; - - -- 5. Performs a query operation. - SELECT * FROM parallel_partition_index_01 WHERE c1=100; - c1 | c2 | c3 - -----+-----+----- - 100 | 100 | 100 - (1 row) - - -- 6. Execute the EXPLAIN statement to view execution plan information. - EXPLAIN (COSTS OFF) SELECT * FROM parallel_partition_index_01 WHERE c1<1000; - QUERY PLAN - ----------------------------------------------------------------------------------------- - Streaming(type: LOCAL GATHER dop: 1/2) - -> Index Scan using index_parallel_partition_index_01 on parallel_partition_index_01 - Index Cond: (c1 < 1000) - (3 rows) - ``` - -- LOCAL index - - ```sql - -- 1. Create a partitioned table and insert data. - CREATE TABLE parallel_partition_index_01 - ( - c1 int, - c2 int, - c3 int - ) - PARTITION BY RANGE(c1) - ( - PARTITION P1 VALUES LESS THAN(2000), - PARTITION P2 VALUES LESS THAN(4000), - PARTITION P3 VALUES LESS THAN(6000), - PARTITION P4 VALUES LESS THAN(MAXVALUE) - )enable row movement; - INSERT INTO parallel_partition_index_01 VALUES (generate_series(1, 10000), generate_series(1,10000), generate_series(1, 10000)); - - -- 2. Create an index. - CREATE INDEX index_parallel_partition_index_01 on parallel_partition_index_01(c1) LOCAL; - - -- 3. Unable other scan types such as seqscan, bitmapscan, indexonlyscan, etc. - SET enable_seqscan = OFF; - SET enable_bitmapscan = OFF; - SET enable_indexonlyscan = OFF; - - -- 4. Enable parallelism to set the synchronization cost (smp_thread_cost) for communication between threads. - -- Note: A low value for the smp_thread_cost parameter can prompt the optimizer to prefer parallelism. - SET query_dop = 2; - SET smp_thread_cost = 0; - - -- 5. Performs a query operation. - SELECT * FROM parallel_partition_index_01 WHERE c1=100; - c1 | c2 | c3 - -----+-----+----- - 100 | 100 | 100 - (1 row) - - -- 6. Execute the EXPLAIN statement to view execution plan information. - EXPLAIN (COSTS OFF) SELECT * FROM parallel_partition_index_01 WHERE c1<1000; - QUERY PLAN - ----------------------------------------------------------------------------------------- - Streaming(type: LOCAL GATHER dop: 1/2) - -> Partition Iterator - Iterations: 1 - Selected Partitions: 1 - -> Partitioned Index Scan using index_parallel_partition_index_01 on parallel_partition_index_01 - Index Cond: (c1 < 1000) - (6 rows) - ``` - -## Related Pages - -[smp_thread_cost](../../reference-guide/guc-parameters/query-planning/optimizer-cost-constants.md) diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/parallel-query-optimization.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/parallel-query-optimization.md deleted file mode 100644 index b2002dd9..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/parallel-query-optimization.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Parallel Query Optimization -summary: Parallel Query Optimization -author: Guo Huan -date: 2022-05-10 ---- - -# Parallel Query Optimization - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -MogDB version 5.0 introduces the parallel sort merge join feature, which is mainly used to optimize the execution speed of join operations. When executing join operations, when the system configuration allows parallel execution and the execution plan is suitable for parallel execution, multiple threads can be used to scan and match the associated columns at the same time, thus improving the execution efficiency of the join operation. - -## Benefits - -With the parallel sort merge join feature, MogDB version 5.0 is able to better cope with the demands of large-scale data join operations, providing higher query performance and faster execution speed. The introduction of this feature provides users with more powerful data processing capabilities and a more optimized query experience. - -## Description - -Parallel sort merge join is an optimization method to improve efficiency when performing join operations. A traditional sort merge join scans and matches data row by row in sequential order after sorting each of the associated columns of a related table. Although ordering is guaranteed, it is less efficient. Parallel query optimization, on the other hand, provides the ability to execute sort merge join in parallel. By utilizing multiple worker threads to match with the associated table data at the same time, resource utilization can be improved, thus enhancing the query performance of sort merge join. - -Specifically, when the system configuration allows parallel execution and the execution plan is suitable for parallel execution, MogDB can utilize multiple threads to scan and match data from the associated columns in parallel. This can speed up the execution of join operations and improve query efficiency. - -## Enhancements - -None - -## Constraints - -- Requires the parallel switch to be turned on, i.e., set the `query_dop` parameter to a value greater than 1. -- Multi-table query operations in JOIN_UNIQUE_OUTER, JOIN_FULL and JOIN_RIGHT scenarios are not supported. - -## Dependencies - -None - -## Example - -1. Create a table and insert data. - - ```sql - CREATE TABLE MogDB_parallel_merge_join(id int ); - INSERT INTO MogDB_parallel_merge_join VALUES ( generate_series ( 1, 1000000)); - ``` - -2. Create index. - - ```sql - CREATE INDEX index_parallel_merge_join ON MogDB_parallel_merge_join(id); - ``` - -3. Collect statistical information. - - ```sql - analyze MogDB_parallel_merge_join; - ``` - -4. Turn on the parallel query switch and set the `query_dop` parameter value greater than 1. - - ```sql - set query_dop = 4; - ``` - -5. View merge join as a parallel query via explain. - - ```sql - EXPLAIN (COSTS OFF ) SELECT * FROM MogDB_parallel_merge_join t1, MogDB_parallel_merge_join t2 WHERE t1.id = t2.id order by t1.id limit 10; - ``` - - A message similar to the following is displayed: - - ```sql - QUERY PLAN - ------------------------------------------------------------------------------------------------------------------ - Limit - - > Sort - Sort Key: t1.id - - > Streaming(type: LOCAL GATHER dop: 1 / 4) - - > Limit - - > Merge Join - Merge Cond: (t1.id = t2.id) - - > Index Only Scan using index_parallel_merge_join on mogdb_parallel_merge_join t1 - - > Full Index Only Scan using index_parallel_merge_join on mogdb_parallel_merge_join t2 - ``` - -## Related pages - -[query_dop](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#query_dop) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/sql-bypass.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/sql-bypass.md deleted file mode 100644 index 2bbd968b..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/sql-bypass.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: SQL Bypass -summary: SQL Bypass -author: Guo Huan -date: 2022-05-07 ---- - -# SQL Bypass - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -SQL Bypass is an optimization technique designed to simplify query scenarios to improve query performance and efficiency. By rationally optimizing query logic, utilizing indexes and caching, you can speed up query execution and reduce unnecessary computational overhead. - -## Benefits - -SQL Bypass is an optimization technique designed for simple query scenarios. Compared to traditional execution logic, SQL Bypass is able to improve performance by more than 10% in basic scenarios. - -For example, SQL Bypass can significantly improve query performance when performing equal lookups with 200,000 data items. The actual performance improvement may vary from case to case, but it is usually 10% or more in the base scenario. - -In PBE mode, SQL Bypass can provide even more significant performance improvements, up to 20% or more. For example, SQL Bypass can further improve query performance when performing on-the-fly query operations with 200,000 pieces of data. - -It should be noted that these performance improvements depend on the actual data volume, query conditions and specific query scenarios. However, overall, SQL Bypass can bring higher performance and efficiency to the query operation of the system. - -## Description - -In a typical OLTP (Online Transaction Processing) scenario, simple queries usually make up a large portion of the total. These types of queries are characterized by queries involving only single tables and simple expressions, and do not involve complex joins, subqueries, aggregate operations, and so on. In order to accelerate the execution of such simple queries, 5.0 enhances SQL Bypass. - -The main idea of the SQL Bypass framework is to do simple schema judgment on such simple queries in the parse stage, and then enter into a special execution path that skips the classic executor execution frameworks, including the classic frameworks for initialization and execution of operators, expressions and projections. Instead, the SQL Bypass framework directly rewrites a set of concise execution paths and calls the storage interface directly to execute queries, thus greatly accelerating the execution of simple queries. - -By using the SQL Bypass framework, the overhead of complex queries can be avoided, unnecessary computations and data transfers can be reduced, and the performance and efficiency of queries can be improved while meeting the needs of simple queries. This is especially important for OLTP systems where highly concurrent query requests require fast response and processing in this scenario. - -## Enhancements - -Added logic to handle scanning filter conditions after fetching and returning data using index conditions. The SQL Bypass function is available for statements that have both index and filter conditions. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) Note: Under the current MogDB execution logic, the index condition is used to retrieve data in the B-tree, and it acts directly on the index scan operator. The scan filter condition is only used to filter the returned data after the scan method is called. - -## Constraints - -SQL Bypass needs to be enabled, i.e. set the `enable_opfusion` parameter value to on. - -## Example - -1. Enable SQL Bypass. - - ```sql - set enable_opfusion = on; - ``` - -2. Create a table and insert data. - - ```sql - CREATE TABLE MogDB_sql_bypass_1 (id int, pname name, match text); - INSERT INTO MogDB_sql_bypass_1 - VALUES ( - generate_series(1, 20000), - 'player# ' || generate_series(1, 20000), - 'match# ' || generate_series(1, 11) - ); - ``` - -3. CREATE an index. - - ```sql - CREATE index on MogDB_sql_bypass_1(id); - ``` - -4. Execute Explain to view the results of the index scan query. - - - Simple index scanning scenario - - ```sql - MogDB=# explain (costs on) select id from MogDB_sql_bypass_1 where id = 1; - QUERY PLAN - ----------------------------------------------------------------------------------------------------------- - [Bypass] - Index Only Scan using mogdb_sql_bypass_1_id_idx on mogdb_sql_bypass_1 (cost=0.00..48.13 rows=11 width=4) - Index Cond: (id = 1) - (3 rows) - ``` - - - Index scanning scenarios with filter conditions - - ```sql - MogDB=# explain (costs off) select id, pname, match from MogDB_sql_bypass_1 where id = 1; - QUERY PLAN - ------------------------------------------------------------------ - [Bypass] - Index Scan using mogdb_sql_bypass_1_id_idx on mogdb_sql_bypass_1 - Index Cond: (id = 1) - (3 rows) - - MogDB=# explain (costs off) select id, pname, match from MogDB_sql_bypass_1 where id = 1; - QUERY PLAN - ------------------------------------------------------------------ - [Bypass] - Index Scan using mogdb_sql_bypass_1_id_idx on mogdb_sql_bypass_1 - Index Cond: (id = 1) - (3 rows) - ``` - - - Index scanning in PBE scenarios. - - ```sql - MogDB=# PREPARE p1 ASSELECT id FROM MogDB_sql_bypass_1 WHERE id = $1; - MogDB=# PREPARE p2 ASSELECT id, pname, match FROM MogDB_sql_bypass_1 WHERE id = $1; - MogDB=# PREPARE p3 ASSELECT id, pname, match FROM MogDB_sql_bypass_1 WHERE id = $1 AND match <> $2; - - MogDB=# explain(costs OFF)EXECUTE p1(2); - QUERY PLAN - ----------------------------------------------------------------------- - [Bypass] - Index Only Scan using mogdb_sql_bypass_1_id_idx on mogdb_sql_bypass_1 - Index Cond: (id = $1) - (3 rows) - - MogDB=# explain (costs off) EXECUTE p2(2); - QUERY PLAN - ------------------------------------------------------------------ - [Bypass] - Index Scan using mogdb_sql_bypass_1_id_idx on mogdb_sql_bypass_1 - Index Cond: (id = $1) - (3 rows) - - MogDB=# explain (costs off) EXECUTE p3(2, 'match# 7'); - QUERY PLAN - ------------------------------------------------------------------ - [Bypass] - Index Scan using mogdb_sql_bypass_1_id_idx on mogdb_sql_bypass_1 - Index Cond: (id = $1) - Filter: (match <> $2) - (4 rows) - ``` - -## Related Page - -[enable_opfusion](../../reference-guide/guc-parameters/query-planning/other-optimizer-options.md#enable_opfusion) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/tracing-SQL-function.md b/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/tracing-SQL-function.md deleted file mode 100644 index 0de7ac7f..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/high-performance/tracing-SQL-function.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: SQL Tracing -summary: SQL Tracing -author: Guo Huan -date: 2022-11-15 ---- - -# SQL Tracing - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -MogDB SQL Trace is an enhanced advanced feature designed to provide the ability to dynamically export and observe SQL runs in the MogDB database. This feature focuses on observability enhancements that make it easier to observe and troubleshoot business-critical systems without downtime. - -## Benefits - -With MogDB SQL Trace, users can monitor and record the SQL execution process in real-time, including the execution time of SQL statements, execution plan, IO operations, locks and other key information. This information helps users gain insight into SQL performance bottlenecks and potential problems, and provides data support for troubleshooting and performance optimization. - -## Description - -This feature is an enhancement to the gstrace tool. It supports tracing all SQL trace information without stopping the library in production, tracing one or up to 8 sessions or tracing one or up to 8 threads, and exporting the trace information for later analysis and diagnosis. Users can enable, export and disable the SQL trace function of gstrace. - -The contents of SQL trace export include: - -- Basic information related to machine and database, total execution time of PARSE (parse) and EXEC (execute) statements, as well as the execution time and number of execution entries of the main operators SCAN and SORT AGGREGATE. -- Total time information for OPTIMIZER, i.e., information starting with "OPTIMIZER + #session_id". -- Waiting for event information, such as IO and locks (normal and light locks). -- Waiting status information, such as network communication, lock waiting related, and writing file related status information during sql execution. -- Waiting for event or wait state information, you must enable the real-time collection of wait event information parameter `enable_instr_track_wait`. - -## Related Pages - -[gstrace](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gstrace.md), [enable_instr_track_waite](../../reference-guide/guc-parameters/wait-events.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/2-workload-diagnosis-report.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/2-workload-diagnosis-report.md deleted file mode 100644 index f555ffb6..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/2-workload-diagnosis-report.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: WDR -summary: WDR -author: Guo Huan -date: 2022-05-07 ---- - -# WDR - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -The workload diagnosis report (WDR) provides database performance diagnosis reports based on the baseline performance and incremental data that reflects performance changes. - -## Benefits - -- The WDR is the main method for diagnosing long-term performance problems. Based on the performance baseline of a snapshot, performance analysis is performed from multiple dimensions, helping DBAs understand the system load, performance of each component, and performance bottlenecks. -- Snapshots are also an important data source for self-diagnosis and self-optimization suggestions on subsequent performance problems. - -## Description - -The WDR generates a performance report between two different time points based on the system performance snapshot data at these time points. The report is used to diagnose database kernel performance faults. - -You can use generate_wdr_report(…) to generate a performance report based on two performance snapshots. - -The WDR depends on the following two components: - -- Snapshot: The performance snapshot can be configured to collect a certain amount of performance data from the kernel at a specified interval and store the data in the user tablespace. Any snapshot can be used as a performance baseline for comparison with other snapshots. -- WDR Reporter: This tool analyzes the overall system performance based on two snapshots, calculates the changes of more specific performance indicators between the two time points, and generates summarized and detailed performance data. For details, see Table 1 and Table 2. - -**Table 1** Summarized diagnosis report - -| Diagnosis Type | Description | -| :------------------------------ | :----------------------------------------------------------- | -| Database Stat | Evaluates the load and I/O status of the current database. Load and I/O are the most important indicators of the TP system.
The statistics include the number of sessions connected to the database, number of committed and rolled back transactions, number of read disk blocks, number of disk blocks found in the cache, number of rows returned, captured, inserted, updated, and deleted through database query, number of conflicts and deadlocks, usage of temporary files, and I/O read/write time. | -| Load Profile | Evaluates the current system load from the time, I/O, transaction, and SQL dimensions.
The statistics include the job running elapse time, CPU time, daily transaction quality, logical and physical read volume, read and write I/O times and size, login and logout times, SQL, transaction execution volume, and SQL P80 and P95 response time. | -| Instance Efficiency Percentages | Evaluates the cache efficiency of the current system.
The statistics include the database cache hit ratio. | -| Events | Evaluates the performance of key system kernel resources and key events.
The statistics include the number of times that the key events of the database kernel occur and the waiting time. | -| Wait Classes | Evaluates the performance of key events in the system.
The statistics include the release of the data kernel in the main types of wait events, such as **STATUS**, **LWLOCK_EVENT**, **LOCK_EVENT**, and **IO_EVENT**. | -| CPU | Includes time release of the CPU in user mode, kernel mode, I/O wait mode, or idle mode. | -| IO Profile | Includes the number of database I/O times, database I/O data volume, number of redo I/O times, and redo I/O volume. | -| Memory Statistics | Includes maximum process memory, used process memory, maximum shared memory, and used shared memory. | - -**Table 2** Detailed diagnosis report - -| Diagnosis Type | Description | -| :--------------------- | :----------------------------------------------------------- | -| Time Model | Evaluates the performance of the current system in the time dimension.
The statistics include time consumed by the system in each phase, including the kernel time, CPU time, execution time, parsing time, compilation time, query rewriting time, plan generation time, network time, and I/O time. | -| SQL Statistics | Diagnoses SQL statement performance problems.
The statistics include normalized SQL performance indicators in multiple dimensions: elapsed time, CPU time, rows returned, tuple reads, executions, physical reads, and logical reads. The indicators can be classified into execution time, number of execution times, row activity, and cache I/O. | -| Wait Events | Diagnoses performance of key system resources and key time in detail.
The statistics include the performance of all key events in a period of time, including the number of events and the time consumed. | -| Cache IO Stats | Diagnoses the performance of user tables and indexes.
The statistics include read and write operations on all user tables and indexes, and the cache hit ratio. | -| Utility status | Diagnoses the background task performance.
The statistics include the performance of background tasks such as replication. | -| Object stats | Diagnoses the performance of database objects.
The statistics include user tables, tables on indexes, index scan activities, as well as insert, update, and delete activities, number of valid rows, and table maintenance status. | -| Configuration settings | Determines whether the configuration is changed.
It is a snapshot that contains all current configuration parameters. | -| SQL detail | Displays information about unique query text. | - -## Enhancements - -None. - -## Constraints - -- The WDR snapshot collects performance data of different databases. If there are a large number of databases or tables in the database instance, it takes a long time to create a WDR snapshot. -- If WDR snapshot is performed when a large number of DDL statements are executed, WDR snapshot may fail. -- When the database is dropped, WDR snapshot may fail. - -## Dependencies - -None. - -## Related Pages - -[WDR Snapshot](../../performance-tuning/wdr/wdr.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/3-slow-sql-diagnosis.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/3-slow-sql-diagnosis.md deleted file mode 100644 index c8e008ad..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/3-slow-sql-diagnosis.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: Slow SQL Diagnosis -summary: Slow SQL Diagnosis -author: Guo Huan -date: 2022-05-07 ---- - -# Slow SQL Diagnosis - -## Availability - -This feature is available since MogDB 1.1.0. - -The following slow SQL views have been discarded before reconstruction: dbe_perf.gs_slow_query_info, dbe_perf.gs_slow_query_history, dbe_perf.global_slow_query_history, and dbe_perf.global_slow_query_info. - -This feature is available since MogDB 3.1.0. - -## Introduction - -Slow SQL diagnosis provides necessary information for diagnosing slow SQL statements, helping developers backtrack SQL statements whose execution time exceeds the threshold and diagnose SQL performance bottlenecks. - -## Benefits - -Slow SQL provides detailed information required for slow SQL diagnosis. You can diagnose performance problems of specific slow SQL statements offline without reproducing the problem. The table-based and function-based APIs help users collect statistics on slow SQL indicators and connect to third-party platforms. - -## Description - -Slow SQL diagnosis records information about all jobs whose execution time exceeds the threshold **log_min_duration_statement**. - -On the primary node, slow SQL statements provide table-based and function-based query APIs. You can query the execution plan, start time, end time, query statement, row activity, kernel time, CPU time, execution time, parsing time, compilation time, query rewriting time, plan generation time, network time, I/O time, network overhead, lock overhead, and wait events. All information is anonymized. - -On the standby node, slow SQL statements provide a dedicated function as the query API. The standby node cannot write performance diagnosis data to the statement\_history table. Therefore, the standby node uses a new method to record data and queries the data through the function API. The information queried through the API is the same as that in the statement\_history table on the primary node. - -## Enhancements - -Optimized slow SQL indicators, security (anonymization), execution plans, and query interfaces. - -```sql -Primary node: Run the following command to check the execution information about the SQL statements in the database instance: -gsql> select * from dbe_perf.get_global_full_sql_by_timestamp(start_timestamp, end_timestamp); -For example: -MogDB=# select * from DBE_PERF.get_global_full_sql_by_timestamp('2020-12-01 09:25:22', '2020-12-31 23:54:41'); --[ RECORD 1 ]--------+--------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------- -node_name | dn_6001_6002_6003 -db_name | postgres -schema_name | "$user",public -origin_node | 1938253334 -user_name | user_dj -application_name | gsql -client_addr | -client_port | -1 -unique_query_id | 3671179229 -debug_query_id | 72339069014839210 -query | select name, setting from pg_settings where name in (?) -start_time | 2020-12-19 16:19:51.216818+08 -finish_time | 2020-12-19 16:19:51.224513+08 -slow_sql_threshold | 1800000000 -transaction_id | 0 -thread_id | 139884662093568 -session_id | 139884662093568 -n_soft_parse | 0 -n_hard_parse | 1 -query_plan | Datanode Name: dn_6001_6002_6003 - | Function Scan on pg_show_all_settings a (cost=0.00..12.50 rows=5 width=64) - | Filter: (name = '***'::text) -... - -Primary node: Run the following command to check the execution information about the slow SQL statements in the database instance: -gsql> select * from dbe_perf.get_global_slow_sql_by_timestamp(start_timestamp, end_timestamp); -MogDB=# select * from DBE_PERF.get_global_slow_sql_by_timestamp('2020-12-01 09:25:22', '2020-12-31 23:54:41'); --[ RECORD 1 ]--------+--------------------------------------------------------------------------------------------------- -node_name | dn_6001_6002_6003 -db_name | postgres -schema_name | "$user",public -origin_node | 1938253334 -user_name | user_dj -application_name | gsql -client_addr | -client_port | -1 -unique_query_id | 2165004317 -debug_query_id | 72339069014839319 -query | select * from DBE_PERF.get_global_slow_sql_by_timestamp(?, ?); -start_time | 2020-12-19 16:23:20.738491+08 -finish_time | 2020-12-19 16:23:20.773714+08 -slow_sql_threshold | 10000 -transaction_id | 0 -thread_id | 139884662093568 -session_id | 139884662093568 -n_soft_parse | 10 -n_hard_parse | 8 -query_plan | Datanode Name: dn_6001_6002_6003 - | Result (cost=1.01..1.02 rows=1 width=0) - | InitPlan 1 (returns $0) - | -> Seq Scan on pgxc_node (cost=0.00..1.01 rows=1 width=64) - | Filter: (nodeis_active AND ((node_type = '***'::"char") OR (node_type = '***'::"char"))) -... - -Primary node: Check the execution information about the SQL statement on the current node. -gsql> select * from statement_history; -For example: -MogDB=# select * from statement_history; --[ RECORD 1 ]--------+--------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------- -db_name | postgres -schema_name | "$user",public -origin_node | 1938253334 -user_name | user_dj -application_name | gsql -client_addr | -client_port | -1 -unique_query_id | 3671179229 -debug_query_id | 72339069014839210 -query | select name, setting from pg_settings where name in (?) -start_time | 2020-12-19 16:19:51.216818+08 -finish_time | 2020-12-19 16:19:51.224513+08 -slow_sql_threshold | 1800000000 -transaction_id | 0 -thread_id | 139884662093568 -session_id | 139884662093568 -n_soft_parse | 0 -n_hard_parse | 1 -query_plan | Datanode Name: dn_6001_6002_6003 - | Function Scan on pg_show_all_settings a (cost=0.00..12.50 rows=5 width=64) - | Filter: (name = '***'::text) - -Standby node: Check the execution information about the SQL statement on the current node. -gsql> select * from dbe_perf.standby_statement_history(only_slow, start_time, end_time); -Example: -openGauss=# select * from dbe_perf.standby_statement_history(false); --[ RECORD 1 ]--------+--------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------- -db_name | postgres -schema_name | "$user",public -origin_node | 1938253334 -user_name | user_dj -application_name | gsql -client_addr | -client_port | -1 -unique_query_id | 3671179229 -debug_query_id | 72339069014839210 -query | select name, setting from pg_settings where name in (?) -start_time | 2020-12-19 16:19:51.216818+08 -finish_time | 2020-12-19 16:19:51.224513+08 -slow_sql_threshold | 1800000000 -transaction_id | 0 -thread_id | 139884662093568 -session_id | 139884662093568 -n_soft_parse | 0 -n_hard_parse | 1 -query_plan | Datanode Name: dn_6001_6002_6003 - | Function Scan on pg_show_all_settings a (cost=0.00..12.50 rows=5 width=64) - | Filter: (name = '***'::text) -``` - -## Constraints - -- The SQL tracing information is based on the normal execution logic. The tracing information may inaccurate if SQL statements fail to be executed. -- Restarting a node may cause data loss on the node. -- If you exit a session immediately after SQL statements are executed, the session data that is not updated to the system catalog may be lost. -- The number of SQL statements to be collected is specified by a GUC parameter. If the number of SQL statements exceeds the threshold, new SQL statement execution information will not be collected. -- The maximum number of bytes of lock event details collected by a single SQL statement is specified by a GUC parameter. If the number of bytes exceeds the threshold, new lock event details will not be collected. -- The SQL statement information is updated in asynchronous mode. Therefore, after a query statement is executed, the related view function result is slightly delayed. -- When **track\_stmt\_parameter** is set to **off**, the maximum value of the **query** field is determined by the value of by **track\_activity\_query\_size**. -- Certain indicator information (such as row activities, cache I/O, and time distribution) depends on the dbe_perf.statement view. If the number of records in the view exceeds the preset size (depending on GUC:instr_unique_sql_count), related indicators may not be collected. -- Functions and views related to the statement\_history table and the **details** column in dbe_perf.standby\_statement\_history on the standby node are in binary format. To parse the detailed information, use the pg\_catalog.statement\_detail\_decode\(details, 'plaintext', true\) function. -- The statement_history table can be queried only in the postgres database. The data in other databases is empty. -- To query the dbe_perf.standby\_statement\_history function on the standby node, you need to switch to the postgres database. If you query the function in other databases, a message is displayed indicating that the function is unavailable. -- The standby node uses the track\_stmt\_standby\_chain\_size parameter to limit the memory and disk space occupied by recorded data. -- The content of the statement\_history table and the dbe_perf.standby\_statement\_history function on the standby node are controlled by track\_stmt\_stat\_level. The default value is **'OFF,L0'**. The first part of the parameter indicates the full SQL statement, and the second part indicates the slow SQL statement. Slow SQL statements are recorded only when the execution time exceeds the value of log\_min\_duration\_statement. - -## Dependencies - -None. - -## Related Pages - -[Logging Time](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-time.md), [Statistics Information Functions](../../reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions.md), [STATEMENT_HISTORY](../../reference-guide/schema/DBE_PERF/query/STATEMENT_HISTORY_query.md), [Global Temporary Table Functions](../../reference-guide/functions-and-operators/global-temporary-table-functions.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/4-session-performance-diagnosis.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/4-session-performance-diagnosis.md deleted file mode 100644 index 5dae0fb3..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/4-session-performance-diagnosis.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: Session Performance Diagnosis -summary: Session Performance Diagnosis -author: Guo Huan -date: 2022-05-07 ---- - -# Session Performance Diagnosis - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -Session performance diagnosis targets session-level performance faults. - -## Benefits - -- Display the latest events that consume the most resources of user sessions. -- Check the wait events that occupy the most resource-consuming SQL statements. -- Check the wait events that occupy the most resource-consuming sessions. -- Check information about the most resource-consuming users. -- Check the waiting relationship between blocked sessions. - -## Description - -The session performance diagnosis function diagnoses performance of all active sessions in the system. As real-time collection of indicators of all active sessions has a greater impact on user load, the session snapshot technology is used to sample indicators of active sessions, and collect statistics on active sessions from the sampling. The statistics reflect the basic information, status, and resources of active sessions from the dimensions of client information, execution start time, execution end time, SQL text, wait events, and current database objects. The active session information collected based on the probability can help users diagnose which sessions consume more CPU and memory resources, which database objects are hot objects, and which SQL statements consume more key event resources in the system. In this way, users can locate faulty sessions, SQL statements, and database designs. - -Session sampling data is classified into two levels, as shown in Figure 1. - -1. The first level is real-time information stored in the memory. The active session information in the latest several minutes is displayed, which has the highest precision. -2. The second level is the persistent historical information stored in disk files. It displays the historical active session information in a long period of time and is sampled from the memory data. This level is suitable for long-run statistics and analysis. - -**Figure 1** Session performance diagnosis principle - - ![Session性能诊断原理](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/session-performance-diagnosis-1.png) - -Some application scenarios are as follows: - -1. Check the blocking relationship between sessions. - - ```sql - select sessionid, block_sessionid from pg_thread_wait_status; - ``` - -2. Sample information about blocked sessions. - - ```sql - select sessionid, block_sessionid from DBE_PERF.local_active_session; - ``` - -3. Display the final blocked session. - - ```sql - select sessionid, block_sessionid, final_block_sessionid from DBE_PERF.local_active_session; - ``` - -4. Check the wait event that consumes the most resources. - - ```sql - SELECT s.type, s.event, t.count - FROM dbe_perf.wait_events s, ( - SELECT event, COUNT (*) - FROM dbe_perf.local_active_session - WHERE sample_time > now() - 5 / (24 * 60) - GROUP BY event)t WHERE s.event = t.event ORDER BY count DESC; - ``` - -5. Check the events that consume the most session resources in the last five minutes. - - ```sql - SELECT sessionid, start_time, event, count - FROM ( - SELECT sessionid, start_time, event, COUNT(*) - FROM dbe_perf.local_active_session - WHERE sample_time > now() - 5 / (24 * 60) - GROUP BY sessionid, start_time, event) as t ORDER BY SUM(t.count) OVER (PARTITION BY t. sessionid, start_time)DESC, t.event; - ``` - -6. Check the events that consume the most resources in the last five minutes. - - ```sql - SELECT query_id, event, count - FROM ( - SELECT query_id, event, COUNT(*) - FROM dbe_perf.local_active_session - WHERE sample_time > now() - 5 / (24 * 60) - GROUP BY query_id, event) t ORDER BY SUM (t.count) OVER (PARTITION BY t.query_id ) DESC, t.event DESC; - ``` - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -None - -## Related Pages - -[LOCAL_ACTIVE_SESSION](../../reference-guide/schema/DBE_PERF/session-thread/LOCAL_ACTIVE_SESSION.md), [WAIT_EVENTS](../../reference-guide/schema/DBE_PERF/wait-events/WAIT_EVENTS.md), [PG_THREAD_WAIT_STATUS](../../reference-guide/system-catalogs-and-system-views/system-views/PG_THREAD_WAIT_STATUS.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/5-system-kpi-aided-diagnosis.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/5-system-kpi-aided-diagnosis.md deleted file mode 100644 index 74f5caf3..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/5-system-kpi-aided-diagnosis.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: System KPI-aided Diagnosis -summary: System KPI-aided Diagnosis -author: Guo Huan -date: 2022-05-07 ---- - -# System KPI-aided Diagnosis - -## Availability - -This feature is available since MogDB 1.1.0. - -## Introduction - -KPIs are views of key performance indicators for kernel components or the entire system. Based on KPIs, users can learn about the real-time and historical running status of the system. - -## Benefits - -- Summarized system load diagnosis - - Precise alarms for system load exceptions (overload, stall, and SLA exceptions) and precise system load profile - -- Summarized system time model diagnosis - - Instance-level and query-level time model segmentation, diagnosing the root causes of instance and query performance problems - -- Query performance diagnosis - - Database-level query summary, including top SQL, SQL CPU usage, I/O consumption, execution plan, and excessive hard parsing - -- Diagnosis of disk I/O, index, and buffer performance problems - -- Diagnosis of connection and thread pool problems - -- Diagnosis of checkpoint and redo (RTO) performance problems - -- Diagnosis of system I/O, LWLock, and wait performance problems - - Diagnosis of over 60 modules and over 240 key operation performance problems - -- Function-level performance monitoring and diagnosis (by GSTRACE) - - Tracing of over 50 functions at the storage and execution layers - -## Description - -MogDB provides KPIs of 11 categories and 26 sub-categories, covering instances, files, objects, workload, communication, sessions, threads, cache I/O, locks, wait events, and clusters. - -Figure 1 shows the distribution of kernel KPIs. - -**Figure 1** Distribution of kernel KPIs -![distribution-of-kernel-kpis](https://cdn-mogdb.enmotech.com/docs-media/mogdb/characteristic-description/system-kpi-aided-diagnosis-2.png) - -## Enhancements - -None. - -## Constraints - -- Utility statements do not support normalization. Non-DML statements, such as CREATE, DROP, COPY, and VACUUM, are not supported. -- Currently, only the top-level normalized SQL statements are recorded. SQL statements in a stored procedure are not normalized, and only the SQL statements that call the stored procedure are recorded. - -## Dependencies - -None. - -## Related Pages - -[DBE_PERF Schema](../../reference-guide/schema/DBE_PERF/DBE_PERF.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/built-in-stack-tool.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/built-in-stack-tool.md deleted file mode 100644 index 3320ea0a..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/built-in-stack-tool.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Built-in Stack Tool -summary: Built-in Stack Tool -author: Guo Huan -date: 2023-04-04 ---- - -# Built-in Stack Tool - -## Availability - -This feature is available since 5.0.0. - -## Introduction - -The stack tool is used to obtain the call stack of each thread in the database. It helps database O&M personnel locate faults such as deadlock and hang. - -## Benefits - -Provides function-level call stack information to improve the efficiency of database kernel O&M personnel in analyzing and locating faults such as deadlock and hang. - -## Description - -You can use the gs_stack() function or the gs_ctl stack tool to obtain the call stacks of threads in the database. - -1. gs_stack() function - - - Run **select \* from gs_stack(pid)** to obtain the call stack of a specified thread. - - ```csharp - MogDB=# select * from gs_stack(139663481165568); - gs_stack - -------------------------------------------------------------------- - __poll + 0x2d + - WaitLatchOrSocket(Latch volatile*, int, int, long) + 0x29f + - WaitLatch(Latch volatile*, int, long) + 0x2e + - JobScheduleMain() + 0x90f + - int GaussDbThreadMain<(knl_thread_role)9>(knl_thread_arg*) + 0x456+ - InternalThreadFunc(void*) + 0x2d + - ThreadStarterFunc(void*) + 0xa4 + - start_thread + 0xc5 + - clone + 0x6d + - (1 row) - ``` - - - Run **select \* from gs_stack()** to obtain the call stacks of all threads. - - ```cpp - MogDB=# select * from gs_stack(); - -[ RECORD 1 ]------------------------------------------------------------------------------------------------------- - tid | 139670364324352 - lwtid | 308 - stack | __poll + 0x2d - | CommWaitPollParam::caller(int (*)(pollfd*, unsigned long, int), unsigned long) + 0x34 - | int comm_socket_call(CommWaitPollParam*, int (*)(pollfd*, unsigned long - , int)) + 0x28 - | comm_poll(pollfd*, unsigned long, int) + 0xb1 - | ServerLoop() + 0x72b - | PostmasterMain(int, char**) + 0x314e - | main + 0x617 - | __libc_start_main + 0xf5 - | 0x55d38f8db3a7 - [ RECORD 2 ]------------------------------------------------------------------------------------------------------- - tid | 139664851859200 - lwtid | 520 - stack | __poll + 0x2d - | WaitLatchOrSocket(Latch volatile*, int, int, long) + 0x29f - | SysLoggerMain(int) + 0xc86 - | int GaussDbThreadMain<(knl_thread_role)17>(knl_thread_arg*) + 0x45d - | InternalThreadFunc(void*) + 0x2d - | ThreadStarterFunc(void*) + 0xa4 - | start_thread + 0xc5 - | clone + 0x6d - ``` - -2. gs_ctl stack tool - - - Run the following command to obtain the call stack of a specified thread: - - ```cpp - gs_ctl stack -D data_dir -I lwtid - ``` - - In the preceding command, **-D data_dir** specifies the data directory of the mogdb process whose call stack needs to be obtained, and **-I lwtid** specifies the lwtid of the target thread. You can run the **ls /proc/pid/task/** command to obtain the lwpid. The following specifies the procedure: - - 1. Obtain the mogdb process ID and data directory. - - ```bash - ps -ux | more - USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND - perfadm 308 9.3 10.1 8719348 1649108 ? Sl May20 58:58 /xxx/bin/mogdb -u 92617 -D /xxx/MogDB/cluster/data1/dn1 -M pending - ``` - - 2. Obtain the lwtid based on the process ID. The directory name in the **task** directory is the lwtid. - - ```bash - ls /proc/308/task/ - 1096 505 522 525 529 532 536 539 542 546 549 552 555 558 561 565 569 575 584 833 923 926 929 932 935 938 - ``` - - 3. Obtain the call stack based on the specified lwtid. - - ```cpp - gs_ctl stack -D /xxx/MogDB/cluster/data1/dn1 -I 1096 - [2022-05-21 10:52:51.354][24520][][gs_ctl]: gs_stack start: - tid<140409677575616> lwtid<1096> - __poll + 0x2d - CommWaitPollParam::caller(int (*)(pollfd*, unsigned long, int), unsigned long) + 0x34 - int comm_socket_call(CommWaitPollParam*, int (*)(pollfd*, unsigned long, int)) + 0x28 - comm_poll(pollfd*, unsigned long, int) + 0xb1 - ServerLoop() + 0x72b - PostmasterMain(int, char**) + 0x329a - main + 0x617 - __libc_start_main + 0xf5 - 0x55cf616e7647 - [2022-05-21 10:52:51.354][24520][][gs_ctl]: gs_stack finished! - ``` - - - Run the following command to obtain the call stacks of all threads: - - ```cpp - gs_ctl stack -D data_dir - ``` - - In the preceding command, **-D data_dir** specifies the data directory of the mogdb process whose call stack needs to be obtained. The following specifies the procedure: - - 1. Obtain the mogdb process ID and data directory. - - ```bash - ps -ux | more - USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND - perfadm 308 9.3 10.1 8719348 1649108 ? Sl May20 58:58 /xxx/bin/mogdb -u 92617 -D /xxx/MogDB/cluster/data1/dn1 -M pending - ``` - - 2. Obtain the call stacks of all threads. - - ```cpp - [panhongchang@euler_phy_194 panhongchang]$ gs_ctl stack -D /xxx/MogDB/cluster/data1/dn1 - [2022-05-21 10:59:44.063][34511][][gs_ctl]: gs_stack start: - Thread 0 tid<140409677575616> lwtid<21045> - __poll + 0x2d - CommWaitPollParam::caller(int (*)(pollfd*, unsigned long, int), unsigned long) + 0x34 - int comm_socket_call(CommWaitPollParam*, int (*)(pollfd*, unsigned long, int)) + 0x28 - comm_poll(pollfd*, unsigned long, int) + 0xb1 - ServerLoop() + 0x72b - PostmasterMain(int, char**) + 0x329a - main + 0x617 - __libc_start_main + 0xf5 - 0x55cf616e7647 - - Thread 1 tid<140405343516416> lwtid<21060> - __poll + 0x2d - WaitLatchOrSocket(Latch volatile*, int, int, long) + 0x29f - SysLoggerMain(int) + 0xc86 - int GaussDbThreadMain<(knl_thread_role)17>(knl_thread_arg*) + 0x45d - InternalThreadFunc(void*) + 0x2d - ThreadStarterFunc(void*) + 0xa4 - start_thread + 0xc5 - clone + 0x6d - ``` - - The remaining call stacks are omitted here. - -## Enhancements - -None - -## Constraints - -1. This tool is used only for the mogdb process. Other processes, such as CMS and GTM, are not supported. -2. If you run SQL statements to execute this tool, ensure that the CN and DN processes are running properly and can be connected to execute SQL statements. -3. If gs_ctl is used, CN and DN processes must be responsive. -4. Concurrency is not supported. In the scenario where the call stacks of all threads are obtained, the call stacks of threads are not at the same time point. -5. A maximum of 128 call stack layers are supported. If there are more than 128 call stack layers, only the top 128 layers are retained. -6. The symbol table is not tripped. (In the current release, **strip –d** is used, and only the debug information is removed. The symbol table is not tripped. If **strip –s** is used, only the pointer can be displayed, and the symbol name cannot be displayed.) -7. Only the **monadmin** and **sysadmin** users can execute this tool using SQL statements. -8. The call stack can be obtained only after the thread has registered the SIGURG signal. -9. For the code segment that shields the operating system SIGUSR2, the call stack cannot be obtained. If no signal slot has been allocated to the thread, the call stack still cannot be obtained. - -## Dependencies - -None - -## Related Pages - -[Statistics Information Functions](../../reference-guide/functions-and-operators/statistics-information-functions/statistics-information-functions.md), [gs_ctl](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_ctl.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/dcf-module-tracing.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/dcf-module-tracing.md deleted file mode 100644 index 9c3d8a37..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/dcf-module-tracing.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: DCF Module Tracing -summary: DCF Module Tracing -author: Guo Huan -date: 2022-05-22 ---- - -# DCF Module Tracing - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -In order to solve the problem that MogDB is difficult to debug when running with errors, MogDB 5.0 introduces the ability to trace calls to the DCF (Distributed Consensus Framework) module. - -DCF is a key module within MogDB that handles tasks such as data consistency and cluster management in a distributed environment. The operation of this module is very important for troubleshooting and problem localization when MogDB experiences a failure or error. - -## Benefits - -When a fault or error occurs, debuggers can view the trace log to get the specific operation of MogDB when calling DCF modules, including the call path, call parameters, call return value, etc.. These details allow in-depth analysis of the cause of the failure and quick localization of the problem. In addition, MogDB 5.0 also provides some debugging tools and commands for analyzing and interpreting trace logs. These tools help debuggers understand and process trace logs more efficiently, speeding up troubleshooting and repair. By supporting the ability to trace calls to DCF modules, MogDB 5.0 provides debuggers with more accurate and valuable information to help them solve problems when MogDB runs wrong. This not only improves debugging efficiency, but also enhances the stability and reliability of MogDB. - -## Description - -This feature is an enhancement to the gstrace tool. It supports specifying gstrace to trace one or more functions defined by DCF in advance. Through the gstrace dump command to obtain the user-specified time period, the module has traced the execution path of the function (call stack mode display), as well as tracing the function of the key data or data structure specific information. Through this method, you can get more targeted tracing results, quickly locate the problem and improve diagnostic efficiency. - -## Constraints - -DCF mode needs to be enabled, i.e. set the `enable_dcf` parameter value to on. - -## Related Pages - -[gstrace](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gstrace.md), [enable_dcf](../../reference-guide/guc-parameters/DCF-parameters-settings.md#enable_dcf), [DCF](../../high-available-guide/high-available-dcf.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/error-when-writing-illegal-characters.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/error-when-writing-illegal-characters.md deleted file mode 100644 index e8f4f514..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/error-when-writing-illegal-characters.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Error When Writing Illegal Characters -summary: Error When Writing Illegal Characters -author: Guo Huan -date: 2023-09-25 ---- - -# Error When Writing Illegal Characters - -## Availability - -This feature is available since MogDB 5.0.2. - -## Introduction - -Supports error reporting for input data not conforming to encoding rules when the server and client character set encodings are the same. - -## Benefits - -Output illegal character set error message to facilitate users to repair the fault in time to ensure the normal operation of the system. - -## Description - -Currently, when the server-side and client-side encoding is the same, data written through JDBC and other means will be modified to '?' by default if illegal characters appear. and there is no prompt message, resulting in mismatch between the actual data written and the expected data. - -MogDB 5.0.2 version added session level USERSET type parameter emit_illegal_bind_chars, which is used to control whether to report error for illegal characters. The default value of this parameter is off, which is compatible with the behavior of the old version (no error reporting). Change the parameter value to on to enable error reporting. - -## Constraints - -This feature is only related to the server-side and client-side character set is consistent, through the JDBC and other client-side PBE mode execution and the number of parameters is greater than 0, illegal characters in the parameters to report an error, the following cases are not within the scope of this feature: - -1. the character set is the same, but the number of parameters is 0, for the illegal characters in the statement, the current behavior is already an error. -2. statements executed via gsql, etc., where the behavior is already an error. -3. the behavior of the copy statement is controlled by COMPATIBLE_ILLEGAL_CHARS, which is out of the scope of this feature. -4. When the server-side and client-side character sets do not match, whether or not to report an error is determined by the conversion function, and is not within the scope of this feature. - -## Examples - -Create test database and table for use in subsequent DML statements. - -`````sql -CREATE DATABASE db_gbk TEMPLATE template0 encoding 'GBK' lc_ctype 'zh_CN.GBK' lc_collate 'zh_CN.GBK'; - -CREATE TABLE test (a integer,b character varying); -````` - -Using Go as an example, write code that connects to a database and executes a DML statement. - -```go -package main - -import ( - "database/sql" - "encoding/hex" - _ "github.com/lib/pq" -) - -func main() { - db, err: = sql.Open("postgres", "port=5434 user=test password=Qwer1234 dbname=db_gbk sslmode=disable") - checkErr(err) - defer db.Close() - _, err = db.Exec("set client_encoding = 'GBK'"); - checkErr(err) - _, err = db.Exec("set emit_illegal_bind_chars = on") - checkErr(err) - s: = "8139EF31" - b, _: = hex.DecodeString(s) - _, err = db.Exec("insert into test values ($1, $2)", 1, string(b)) - checkErr(err) -} - -func checkErr(err error) { - if err != nil { - panic(err) - } -} -``` - -An error was reported: - -`````go -panic: pq: invalid byte sequence for encoding "GBK": 0x81 0x39 - -goroutine 1 [running]: -main.checkErr(...) - /home/test/Documents/test_go/main.go:25 -main.main() - /home/test/Documents/test_go/main.go:20 +0x225 -````` - -## Related Pages - -[emit_illegal_bind_chars](../../reference-guide/guc-parameters/miscellaneous-parameters.md#emit_illegal_bind_chars) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/extension-splitting.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/extension-splitting.md deleted file mode 100644 index 29f7d271..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/extension-splitting.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Extension Splitting -summary: Extension Splitting -author: Guo Huan -date: 2022-11-21 ---- - -# Extension Splitting - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -In MogDB 5.0.0, extensions are independently developed. Each extension of the related version corresponding to the server package can be downloaded from the website, facilitating installation of the latest extension package and independent upgrade of each extension. - -Additionally, Dolphin MySQL is integrated into MogDB server and is not independently packaged. Users can create a database whose compatibility type is B without running any command. - -## Benefits - -MogDB version 5.0.0 extensions will be released independently so that users can obtain and install extensions individually. This means that you can selectively install and upgrade extensions according to your needs to meet your specific business requirements. - -In addition to this, we have also integrated the Dolphin MySQL Compatible extension, that allows users to use MogDB while enjoying the convenience of being compatible with MySQL. This means that you can operate MogDB with similar syntax and functionality as MySQL, without having to learn a new database language and features. In this way, you can migrate and manage your data more easily and enjoy the benefits of MogDB's high performance and reliability. - -## Description - -**Extension list**: - -1. [pg_bulkload](../../developer-guide/extension/pg_bulkload-user-guide.md): data import in batches - -2. [pg_prewarm](../../developer-guide/extension/pg_prewarm-user-guide.md): data warm-up - -3. [pg_repack](../../developer-guide/extension/pg_repack-user-guide.md): unlock vacuum - -4. [PostGIS](../../developer-guide/extension/postgis-extension/postgis-overview.md): spatial data extension - -5. [wal2json](../../developer-guide/extension/wal2json-user-guide.md): logical replication - -6. [db_link](../../developer-guide/extension/foreign-data-wrapper/dblink.md): homogeneous database connection - -7. [pg_trgm](../../developer-guide/extension/pg_trgm-user-guide.md): full-text search - -8. [whale](../../developer-guide/extension/whale.md): Oracle compatibility (MogDB version) - -**fdw**: - -1. [postgresql_fdw](../../developer-guide/extension/foreign-data-wrapper/3-postgres_fdw.md): supports PostgreSQL/openGauss. It uses the openGauss libpq package. - -2. [oracle_fdw](../../developer-guide/extension/foreign-data-wrapper/1-oracle_fdw.md): supports Oracle. It uses the Oracle 19c package, and supports the arm and x86 packages. - -3. [mysql_fdw](../../developer-guide/extension/foreign-data-wrapper/2-mysql_fdw.md): support MySQL. It uses the MySQL 8.0 client package. - -**The following extension will be released with server and is not packaged independently**: - -1. [Dolphin MySQL compatibility extension](../../developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-overview.md) - -## Constraints - -1. For built-in extensions, Dolphin, whale, and db_link, and pg_trgm will be packaged with MogDB server since MogDB 3.1.0. -2. Hot-upgrade extension is not supported temporarily. -3. Uninstallation extension is not supported temporarily. -4. Extension naming rule: {Extension name}-{Community version of an extension}-{Version of MogDB server}-{Supported operating system}-{CPU architecture}.tar.gz - -## Related Pages - -[Extension Acquisition](https://www.mogdb.io/downloads/mogdb/) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/fault-diagnosis.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/fault-diagnosis.md deleted file mode 100644 index c957bbba..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/fault-diagnosis.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Fault Diagnosis -summary: Fault Diagnosis -author: Zhang Cuiping -date: 2022-06-17 ---- - -# Fault Diagnosis - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -In order to quickly locate faults, collect system fault information, export fault data and then rectify faults, MogDB 3.0 has enhanced OM functions and gstrace diagnostic capabilities. - -## Benefits - -The enhanced fault diagnosis capability can facilitate R&D personnel to rectify faults in time and ensure the normal operation of the system. - -## Description - -The gs_check tool can compare the difference of scenario check results and output a difference analysis report to help users locate the problem quickly. - -The gs_watch tool can monitor MogDB processes and automatically call gs_collector to collect the system status when a process crash is found for later analysis. - -The gs_gucquery tool can automatically collect, organize, and export GUC values, and compare the changes of GUC values at different moment. - -gstrace diagnostic capability is enhanced. It supports opening the trace item of one or more component (module) and function by module name and function name. It enhances the number of gstrace points in the code and the ability to express gstrace output information. It supports export of new key data structures PGPROC and user session data. It realizes fault injection, including system call error report simulation and variable content saving write. - -## Related Pages - -[gs_check](../../reference-guide/tool-reference/server-tools/gs_check.md), [gstrace](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gstrace.md), [gs_watch](../../reference-guide/tool-reference/server-tools/gs_watch.md) diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/light-lock-export-and-analysis.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/light-lock-export-and-analysis.md deleted file mode 100644 index 17939fab..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/light-lock-export-and-analysis.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Lightweight Lock Export and Analysis -summary: Lightweight Lock Export and Analysis -author: Guo Huan -date: 2022-05-22 ---- - -# Lightweight Lock Export and Analysis - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -MogDB allows lightweight lock data to be exported and analyzed while the business system is running. The so-called lightweight lock data refers to the lock information of each table, row or column in the database. By exporting and analyzing lightweight lock data, you can obtain information about concurrent access, lock conflicts and other aspects of performance optimization, troubleshooting and other work. - -## Benefits - -Supports exporting lightweight lock state information to improve troubleshooting efficiency without any impact on MogDB's performance. - -## Description - -With this feature, users can comprehensively analyze the lightweight lock data in the database without downtime, discover potential performance bottlenecks, solve concurrent conflict problems, and so on. This not only improves the efficiency and accuracy of database management, but also ensures the continuous and stable operation of the business system. - -## Constraints - -None - -## Related Pages - -[gstrace](../../reference-guide/tool-reference/tools-used-in-the-internal-system/gstrace.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/maintainability.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/maintainability.md deleted file mode 100644 index 792d95a5..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/maintainability.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Maintainability -summary: Maintainability -author: Guo Huan -date: 2023-05-22 ---- - -# Maintainability - -+ **[WDR](2-workload-diagnosis-report.md)** -+ **[Slow SQL Diagnosis](3-slow-sql-diagnosis.md)** -+ **[Session Performance Diagnosis](4-session-performance-diagnosis.md)** -+ **[System KPI-aided Diagnosis](5-system-kpi-aided-diagnosis.md)** -+ **[Fault Diagnosis](fault-diagnosis.md)** -+ **[Extension Splitting](extension-splitting.md)** -+ **[Built-in Stack Tool](built-in-stack-tool.md)** -+ **[SQL PATCH](sql-patch.md)** -+ **[DCF Module Tracing](dcf-module-tracing.md)** -+ **[Error When Writing Illegal Characters](error-when-writing-illegal-characters.md)** -+ **[Support For Pageinspect & Pagehack](pageinspect-pagehack.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/pageinspect-pagehack.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/pageinspect-pagehack.md deleted file mode 100644 index fb7e0012..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/pageinspect-pagehack.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: Support For Pageinspect & Pagehack -summary: Support For Pageinspect & Pagehack -author: Guo Huan -date: 2023-10-07 ---- - -# Support For Pageinspect & Pagehack - -## Availability - -This feature is available since MogDB 5.0.2. - -## Introduction - -New pageinspect and pagehack tools are used to assist in locating related issues. pageinspect is an extension, after installation to create extension to use, pagehack is an executable tool, need to be installed on the command line to use. - -## Benefits - -The new tool is only available to front-line maintenance, testers, and developers for internal use in locating problems. - -## Description - -### pageinspect - -This extension implements in the row-store table (astore) scenario to view the data file and Btree index file related page content information, belongs to the online query (MogDB use case ALIVE), in the SQL side of the execution of the relevant commands to view the information. - -Pageinspect is an extension, you need to install manually, before installation, please download the released Toolkits in [official website download page](https://www.mogdb.io/downloads/mogdb/), unpack the corresponding files and copy them to the specified directory and change the permissions. Log in to the database and create the extension. - -```bash -# Unpack the toolkit with the root user -tar -xzvf Toolkits-5.0.2-Kylin-arm64.tar.gz - -# Use the database instance user (e.g., omm), go to the pageinspect folder under toolkits and copy the appropriate files to the specified directory -cd pageinspect/ -cp pageinspect--1.0.sql $GAUSSHOME/share/postgresql/extension -cp pageinspect.control $GAUSSHOME/share/postgresql/extension -cp pageinspect--unpackaged--1.0.sql $GAUSSHOME/share/postgresql/extension -cp pageinspect.so $GAUSSHOME/lib/postgresql - -# Modify permissions -cd $GAUSSHOME/share/postgresql/extension -chmod 644 pageinspect--1.0.sql pageinspect.control pageinspect--unpackaged--1.0.sql -cd $GAUSSHOME/lib/postgresql -chmod 755 pageinspect.so - -# Log into the database and create extension -gsql -r -CREATE EXTENSION pageinspect; -``` - -The extension provides several system functions to query using a fixed format with the following syntax: - -```sql -# View the contents of the heap data table -SELECT * FROM heap_page_items(get_raw_page('table_name','main',page_no)); -# View header information on a heap page -SELECT * FROM page_header(get_raw_page('table_name','main',page_no)); -# View meta page information -SELECT * FROM bt_metap('index_name'); -# View the contents of the specified index page number -SELECT * FROM bt_page_items('index_name', page_no); -``` - -### pagehack - -The pagehack tool is used to parse offline files generated by row-store tables and BTree indexes created in MogDB instances. Pagehack is a binary executable tool executed directly on the terminal command line, with parameters for the files to be parsed (row-store tables and BTree index files are supported), displaying the specific data content of the page in the file. This tool needs to be installed manually, the method is as follows: - -```bash -# Unpack the toolkit with the root user -tar -xzvf Toolkits-5.0.2-Kylin-arm64.tar.gz - -# Use the database instance user (e.g., omm), go to the pagehack folder under toolkits and copy the appropriate files to the specified directory -cd pagehack/ -cp pagehack $GAUSSHOME/bin -cp pagehack.so $GAUSSHOME/lib/postgresql - -# Modify permissions -cd $GAUSSHOME/bin -chmod 755 pagehack -cd $GAUSSHOME/lib/postgresql -chmod 755 pagehack.so -``` - -Once the installation is complete, execute the following command directly to use the tool: - -```bash -pagehack -f [filename] -``` - -## Examples - -### pageinspect - -```sql -# Create the pageinspect extension, then create a row-store table and a BTree index, and insert some data into the table. -MogDB=# CREATE EXTENSION pageinspect; -CREATE EXTENSION -MogDB=# CREATE TABLE t(id int, a text); -CREATE TABLE -MogDB=# CREATE INDEX idx_id on t(id); -CREATE INDEX -MogDB=# INSERT INTO t SELECT num, repeat('ABCD', 8) || num from generate_series(1, 500) as num; -INSERT 0 500 -MogDB=# SELECT * FROM t LIMIT 32; - id | a -----+------------------------------------ - 1 | ABCDABCDABCDABCDABCDABCDABCDABCD1 - 2 | ABCDABCDABCDABCDABCDABCDABCDABCD2 - 3 | ABCDABCDABCDABCDABCDABCDABCDABCD3 - ...... - 32 | ABCDABCDABCDABCDABCDABCDABCDABCD32 -(32 rows) - -# View the contents of the heap data table -MogDB=# SELECT * FROM heap_page_items(get_raw_page('t','main',0)); - lp | lp_off | lp_flags | lp_len | t_xmin | t_xmax | t_field3 | t_ctid | t_infomask2 | t_infomask | t_hoff | t_bits | t_oid ------+--------+----------+--------+--------+--------+----------+---------+-------------+------------+--------+--------+------- - 1 | 8128 | 1 | 62 | 14111 | 0 | 0 | (0,1) | 2 | 2306 | 24 | | - 2 | 8064 | 1 | 62 | 14111 | 0 | 0 | (0,2) | 2 | 2306 | 24 | | - 3 | 8000 | 1 | 62 | 14111 | 0 | 0 | (0,3) | 2 | 2306 | 24 | | - 4 | 7936 | 1 | 62 | 14111 | 0 | 0 | (0,4) | 2 | 2306 | 24 | | - 5 | 7872 | 1 | 62 | 14111 | 0 | 0 | (0,5) | 2 | 2306 | 24 | | - -# View header information on a heap page -MogDB=# SELECT * FROM page_header(get_raw_page('t','main',0)); - lsn | tli | flags | lower | upper | special | pagesize | version | prune_xid ------------+-----+-------+-------+-------+---------+----------+---------+----------- - 0/C4DC510 | 0 | 0 | 516 | 576 | 8192 | 8192 | 6 | 14108 -(1 row) - -# View meta page information -MogDB=# SELECT * FROM bt_metap('idx_id'); - magic | version | root | level | fastroot | fastlevel ---------+---------+------+-------+----------+----------- - 340322 | 2 | 3 | 1 | 3 | 1 -(1 row) - -# View the contents of the specified index page number -MogDB=# SELECT * FROM bt_page_items('idx_id', 1); - itemoffset | ctid | itemlen | nulls | vars | data -------------+---------+---------+-------+------+------------------------- - 1 | (3,9) | 16 | f | f | 6e 01 00 00 00 00 00 00 - 2 | (0,1) | 16 | f | f | 01 00 00 00 00 00 00 00 - 3 | (0,2) | 16 | f | f | 02 00 00 00 00 00 00 00 - 4 | (0,3) | 16 | f | f | 03 00 00 00 00 00 00 00 - 5 | (0,4) | 16 | f | f | 04 00 00 00 00 00 00 00 -``` - -### pagehack - -```sql -# As in the previous example, the MogDB database instance has generated a row-store table/BTree index data file, and the data has been written to the file (if there is no disk you can force a checkpoint or create another table and insert data to make the transaction move forward). -# Get object OID -MogDB=# \d+ - List of relations - Schema | Name | Type | Owner | Size | Storage | Description ---------+------+-------+--------+-------+----------------------------------+------------- - public | t | table | omm | 72 kB | {orientation=row,compression=no} | -(1 row) - -MogDB=# \d+ t - Table "public.t" - Column | Type | Modifiers | Storage | Stats target | Description ---------+---------+-----------+----------+--------------+------------- - id | integer | | plain | | - a | text | | extended | | -Indexes: - "idx_id" btree (id) TABLESPACE pg_default -Has OIDs: no -Options: orientation=row, compression=no - -# Upon querying, table t has oid=17092 and index idx_id has oid=17098 -MogDB=# select pg_relation_filepath('t'); - pg_relation_filepath ----------------------- - base/15318/17092 -(1 row) - -MogDB=# select pg_relation_filepath('idx_id'); - pg_relation_filepath ----------------------- - base/15318/17098 -(1 row) - -# Execute the following command at the command line to parse the heap data of the row-store table -[omm@mogdb-kernel-001 toolkits]$ pagehack -f /opt/mogdb/data/base/15318/17092 | head -n 32 -page information of block 0/5 - pd_lsn: 0/C4DC510 - pd_checksum: 0xB4FA, verify success - pd_flags: - pd_lower: 516, non-empty - pd_upper: 576, old - pd_special: 8192, size 0 - Page size & version: 8192, 6 - pd_xid_base: 14108, pd_multi_base: 0 - pd_prune_xid: 14108 - - Heap tuple information on this page - - Tuple #1 is normal: length 62, offset 8128 - t_xmin/t_xmax/t_cid: 14111/0/0 - ctid:(block 0/0, offset 1) - t_infomask: HEAP_HASVARWIDTH HEAP_XMIN_COMMITTED HEAP_XMAX_INVALID HEAP_HAS_NO_UID - t_infomask2: Attrs Num: 2 - t_hoff: 24 - t_bits: - NNNNNNNN - - - Tuple #2 is normal: length 62, offset 8064 - t_xmin/t_xmax/t_cid: 14111/0/0 - ctid:(block 0/0, offset 2) - t_infomask: HEAP_HASVARWIDTH HEAP_XMIN_COMMITTED HEAP_XMAX_INVALID HEAP_HAS_NO_UID - t_infomask2: Attrs Num: 2 - t_hoff: 24 - t_bits: - NNNNNNNN - -# Parsing data from BTree indexes -[omm@mogdb-kernel-001 toolkits]$ pagehack -f /opt/mogdb/data/base/15318/17098 | head -n 32 -page information of block 0/4 - pd_lsn: 0/C4EBD58 - pd_checksum: 0xF9AA, verify success - pd_flags: - pd_lower: 48, non-empty - pd_upper: 8168, old - pd_special: 8168, size 24 - Page size & version: 8192, 5 - pd_xid_base: 8590274914, pd_multi_base: 4294967299 - pd_prune_xid: 8590274914 - - Heap tuple information on this page - - Tuple #1 is redirected: length 2, offset 12642 - Tuple #2 is unused - - Tuple #3 is unused - - Tuple #4 is unused - - Tuple #5 is unused - - Tuple #6 is unused - Summary (6 total): 5 unused, 0 normal, 0 dead - -Normal Heap Page, special space is 0 - -page information of block 1/4 - pd_lsn: 0/C4EBD58 - pd_checksum: 0x95F3, verify success - pd_flags: - pd_lower: 1488, non-empty -``` diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/sql-patch.md b/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/sql-patch.md deleted file mode 100644 index 8610c6d1..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/maintainability/sql-patch.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: SQL PATCH -summary: SQL PATCH -author: Guo Huan -date: 2023-04-04 ---- - -# SQL PATCH - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -SQL PATCH adjusts the query execution plan without directly modifying users' service statements. If the execution plan or execution mode of a query statement does not meet the expectation, you can create a query patch and use hints to optimize the query plan or handle errors for specific statements by short-circuiting. - -## Benefits - -If performance problems occur due to poor query plans or services are unavailable due to internal system errors, you can invoke O&M functions in the database to optimize specific scenarios or report errors in advance to avoid more serious problems and greatly reduce O&M costs. - -## Description - -SQL PATCH is designed for database administrators (DBAs), O&M personnel, and other roles who need to optimize SQL statements. If performance problems caused by poor plans of service statements are identified through other O&M views or fault locating methods, you can create an SQL patch to optimize service statements based on hints. Currently, the following hints are supported: number of rows, scanning mode, join mode, join sequence, PBE custom/generic plan selection, statement-level parameter setting, and parameterized path. In addition, in case that services are unavailable due to internal system errors that are triggered by specific statements, you can create SQL patches to rectify single-point failures without changing service statements. In this way, errors can be reported in advance to avoid greater loss. - -SQL PATCH is implemented based on the unique SQL ID. Therefore, to use SQL PATCH, related O&M parameters (for details, see [Feature Constraints](#Constraints)) must be enabled for the SQL patch to take effect. The unique SQL ID can be obtained from both the WDR and slow SQL view. You must specify the unique SQL ID when creating an SQL patch. The following provides a simple example. - -Scenario 1: Use SQL PATCH to optimize specific statements based on hints. - -```sql -MogDB=# set track_stmt_stat_level = 'L1,L1'; -- Enable full SQL statistics. -SET -MogDB=# select * from hint_t1 t1 where t1.a = 1; -- Execute the SQL statement. - a | b | c ----+---+--- - 1 | 1 | 1 -(1 row) -MogDB=# select unique_query_id, query, query_plan from dbe_perf.statement_history where query like '%hint_t1%'; -- Obtain the query plan and unique SQL ID. --[ RECORD 1 ]---+---------------------------------------------------------------------------------------------- -unique_query_id | 2578396627 -query | select * from hint_t1 t1 where t1.a = ?; -query_plan | Datanode Name: sgnode - | Bitmap Heap Scan on hint_t1 t1 (cost=4.33..15.70 rows=10 p-time=0 p-rows=0 width=12) - | Recheck Cond: (a = '***') - | -> Bitmap Index Scan on hint_t1_a_idx (cost=0.00..4.33 rows=10 p-time=0 p-rows=0 width=0) - | Index Cond: (a = '***') - | - | -MogDB=# select * from dbe_sql_util.create_hint_sql_patch('patch1', 2578396627, 'indexscan(t1)'); -- Specify a hint patch for the specified unique SQL ID. --[ RECORD 1 ]---------+-- -create_hint_sql_patch | t -MogDB=# explain select * from hint_t1 t1 where t1.a = 1; -- Check whether the hint takes effect. -NOTICE: Plan influenced by SQL hint patch - QUERY PLAN ------------------------------------------------------------------------------------ - [Bypass] - Index Scan using hint_t1_a_idx on hint_t1 t1 (cost=0.00..32.43 rows=10 width=12) - Index Cond: (a = 1) -(3 rows) -MogDB=# select * from hint_t1 t1 where t1.a = 1; -- Execute the statement again. - a | b | c ----+---+--- - 1 | 1 | 1 -(1 row) -MogDB=# select unique_query_id, query, query_plan from dbe_perf.statement_history where query like '%hint_t1%'; -- The query plan has been changed. --[ RECORD 1 ]---+-------------------------------------------------------------------------------------------------- -unique_query_id | 2578396627 -query | select * from hint_t1 t1 where t1.a = ?; -query_plan | Datanode Name: sgnode - | Bitmap Heap Scan on hint_t1 t1 (cost=4.33..15.70 rows=10 p-time=0 p-rows=0 width=12) - | Recheck Cond: (a = '***') - | -> Bitmap Index Scan on hint_t1_a_idx (cost=0.00..4.33 rows=10 p-time=0 p-rows=0 width=0) - | Index Cond: (a = '***') - | - | --[ RECORD 2 ]---+-------------------------------------------------------------------------------------------------- -unique_query_id | 2578396627 -query | select * from hint_t1 t1 where t1.a = ?; -query_plan | Datanode Name: sgnode - | Index Scan using hint_t1_a_idx on hint_t1 t1 (cost=0.00..8.27 rows=1 p-time=0 p-rows=0 width=12) - | Index Cond: (a = '***') - | - | -``` - -Scenario 2: Run the SQL PATCH command to report an error for a specific statement in advance. - -```sql --- Delete patch 1. -MogDB=# select * from dbe_sql_util.drop_sql_patch('patch1'); - drop_sql_patch ----------------- - t -(1 row) - --- Create an abort patch for the statement of the unique SQL ID. -MogDB=# select * from dbe_sql_util.create_abort_sql_patch('patch2', 2578396627); - create_abort_sql_patch - create_abort_sql_patch ------------------------- - t -(1 row) - --- An error is reported in advance when the statement is executed again. -MogDB=# select * from hint_t1 t1 where t1.a = 1; -ERROR: Statement 2578396627 canceled by abort patch patch2 -``` - -## Enhancements - -None - -## Constraints - -1. Patches can be created only by unique SQL ID. If unique SQL IDs conflict, SQL patches that are used for hint-based optimization may affect performance but do not affect semantic correctness. -2. Only hints that do not change SQL semantics can be used as patches. SQL rewriting is not supported. -3. This tool is not applicable to logical backup and restoration. -4. The patch validity cannot be verified during patch creation. If the patch hint has syntax or semantic errors, the query execution is not affected. -5. Only the initial user, O&M administrator, monitoring administrator, and system administrator have the permission to perform this operation. -6. Patches are not shared between databases. When creating SQL patches, you need to connect to the target database. -7. In the centralized deployment scenario where the standby node is readable, you must specify the primary node to run the SQL PATCH command to create, modify, or delete functions and the standby node to report errors. -8. There is a delay in synchronizing an SQL patch to the standby node. The patch takes effect after the standby node replays related logs. -9. This function does not take effect for SQL statements in stored procedures because no unique SQL ID is generated for statements in stored procedures. -10. It is not recommended that the abort patch be used in the database for a long time. It should be used only as a workaround. If the database service is unavailable due to a kernel fault triggered by a specific statement, you must rectify the service fault or upgrade the kernel as soon as possible. After the upgrade, the method of generating unique SQL IDs may change. Therefore, the workaround may become invalid. -11. Currently, except DML statements, unique SQL IDs of SQL statements (such as CREATE TABLE) are generated by hashing the statement text. Therefore, SQL PATCH is sensitive to uppercase and lowercase letters, spaces, and line breaks. That is, even statements of different texts are semantically relative, you still need to create different SQL patches for them. For DML operations, SQL PATCH can take effect for the same statement with different input parameters, regardless of uppercase letters, lowercase letters, and spaces. - -## Dependencies - -This feature depends on the real-time resource monitoring function. To use this feature, set the **enable_resource_track** parameter to **on** and set **instr_unique_sql_count** to a value greater than 0. For different statements, if the generated unique SQL IDs conflict, the SQL PATCH statement incorrectly hits other statements that are not expected. Compared with hint patches that are used for optimization, abort patches have more side effects and should be used with caution. - -## Related Pages - -[Load Management](../../reference-guide/guc-parameters/load-management.md), [Query](../../reference-guide/guc-parameters/query.md), [Hint Based Tuning](../../performance-tuning/sql-tuning/hint-based-tuning.md) diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/middleware/deploying-a-distributed-database-using-kubernetes.md b/product/en/docs-mogdb/v5.2/characteristic-description/middleware/deploying-a-distributed-database-using-kubernetes.md deleted file mode 100644 index 819e1cab..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/middleware/deploying-a-distributed-database-using-kubernetes.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Deploying a Distributed Database Using Kubernetes -summary: Deploying a Distributed Database Using Kubernetes -author: Guo Huan -date: 2022-05-10 ---- - -# Deploying a Distributed Database Using Kubernetes - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -Deploys a distributed database in one-click mode. - -## Benefits - -Quickly builds a distributed database, and verifies and uses the distributed capability. - -## Description - -Patroni is used to implement planned switchover and automatic failover in case of faults. HAProxy is used to implement read and write load balancing between the primary and standby MogDB nodes. ShardingSphere is used to implement distributed capabilities. All functions are packaged into images and one-click deployment scripts are provided. - -## Enhancements - -None. - -## Constraints - -Only CentOS and openEuler are supported. - -## Dependencies - -ShardingSphere, Patroni, HAProxy \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/middleware/distributed-analysis-capabilities.md b/product/en/docs-mogdb/v5.2/characteristic-description/middleware/distributed-analysis-capabilities.md deleted file mode 100644 index 010e67e2..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/middleware/distributed-analysis-capabilities.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Distributed Analysis Capabilities -summary: Distributed Analysis Capabilities -author: zhang cuiping -date: 2022-10-13 ---- - -# Distributed Analysis Capabilities - -## Availability - -This feature is available since MogDB 3.1.0. - -## Introduction - -Implements distributed analysis capabilities based on openLookeng and works with ShardingSphere to form an HTAP database. - -## Benefits - -OpenLookeng is used to quickly analyze massive data. - -## Description - -openLookeng reuses the database and table sharding capabilities of ShardingSphere middleware so that openLookeng can obtain massive data for analysis and calculation. - -## Enhancements - -None - -## Constraints - -None - -## Dependencies - -openLookeng middleware and ShardingSphere middleware. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/middleware/distributed-database-capability.md b/product/en/docs-mogdb/v5.2/characteristic-description/middleware/distributed-database-capability.md deleted file mode 100644 index 4693652d..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/middleware/distributed-database-capability.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Distributed Database Capability -summary: Distributed Database Capability -author: Guo Huan -date: 2022-05-10 ---- - -# Distributed Database Capability - -## Availability - -This feature is available since MogDB 3.0.0. - -## Introduction - -This feature uses the distributed middleware shardingsphere to provide MogDB the distributed database capability. When 16 Kunpeng 920 (128 cores) nodes are used for networking (1 x shardingsphere-proxy, 7 x shardingsphere-jdbc, 8 x MogDB), the perfect sharding performance is greater than 10 million transactions per minute C (tpmC). - -## Benefits - -A distributed database that logically has no resource restriction can be built over the middleware. - -## Description - -With the sharding capability of shardingsphere, multiple MogDB databases can logically form a larger database with distributed transactions and elastic scaling capabilities. The usage method is the same as that of an MogDB database. - -## Enhancements - -None. - -## Constraints - -None. - -## Dependencies - -Shardingsphere middleware \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/middleware/middleware.md b/product/en/docs-mogdb/v5.2/characteristic-description/middleware/middleware.md deleted file mode 100644 index 1a8f29ab..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/middleware/middleware.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Middleware -summary: Middleware -author: Guo Huan -date: 2023-05-22 ---- - -# Middleware - -+ **[Distributed Database Capability](distributed-database-capability.md)** -+ **[Deploying a Distributed Database Using Kubernetes](deploying-a-distributed-database-using-kubernetes.md)** -+ **[Distributed Analysis Capabilities](distributed-analysis-capabilities.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/workload-management/high-latency-escape-at-the-infrastructure-layer.md b/product/en/docs-mogdb/v5.2/characteristic-description/workload-management/high-latency-escape-at-the-infrastructure-layer.md deleted file mode 100644 index 2a02b950..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/workload-management/high-latency-escape-at-the-infrastructure-layer.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: High-Latency Escape at the Infrastructure Layer -summary: High-Latency Escape at the Infrastructure Layer -author: Guo Huan -date: 2023-04-04 ---- - -# High-Latency Escape at the Infrastructure Layer - -## Availability - -This feature is available since MogDB 5.0.0. - -## Introduction - -If the infrastructure layer is abnormal, the database SQL execution latency increases. As a result, the memory or thread pool is overloaded. To deal with this scenario, MogDB provides the automatic escape capability. - -## Benefits - -When the SQL execution latency increases, sessions are stacked, and the memory or thread pool is overloaded, the database cannot provide services due to exceptions at the infrastructure layer. In this scenario, escape is possible to recover the database so as to provide services within a short period of time. - -## Description - -- When the database memory is overloaded, sessions are killed instantly and new connections are forbidden till the memory is recovered to provide services. The memory threshold is specified by the **resilience_memory_reject_percent** GUC parameter. By default, this function is disabled. -- When the number of stacked sessions in the database reaches the upper limit of the thread pool, sessions are killed instantly and new connections are forbidden till the number of sessions decreases to the acceptable range of the thread pool. Then, services are recovered. The session threshold is specified by the **resilience_thread_reject_cond** GUC parameter. By default, this function is disabled. - -## Enhancements - -None - -## Constraints - -- When the escape capability is triggered due to memory or thread pool overload, the sessions of users with the **sysadmin** or **monitoradmin** permission are not cleared. -- In upgrade mode, this feature is not triggered. - -## Dependencies - -None - -## Related Pages - -[Memory](../../reference-guide/guc-parameters/resource-consumption/memory.md), [Thread Pool](../../reference-guide/guc-parameters/thread-pool.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/characteristic-description/workload-management/workload-management.md b/product/en/docs-mogdb/v5.2/characteristic-description/workload-management/workload-management.md deleted file mode 100644 index ce00624f..00000000 --- a/product/en/docs-mogdb/v5.2/characteristic-description/workload-management/workload-management.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Workload Management -summary: Workload Management -author: Guo Huan -date: 2023-05-22 ---- - -# Workload Management - -- **[High-Latency Escape at the Infrastructure Layer](high-latency-escape-at-the-infrastructure-layer.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-cluster-brain-split-fault.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-cluster-brain-split-fault.md deleted file mode 100644 index fd5f6336..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-cluster-brain-split-fault.md +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: Manual Rectification of the Brain Split Fault in a Database Cluster -summary: Manual Rectification of the Brain Split Fault in a Database Cluster -author: zhang cuiping -date: 2022-11-22 ---- - -# Manual Rectification of the Brain Split Fault in a Database Cluster - -## Symptom - -In the brain split scenario of a database cluster, a primary instance is stopped to be in the manually stopped status. Make sure that there is only one primary instance running normally. You can judge whether there is a brain split fault using the following two methods. - -- Error log - - The log of the cm_server primary instance includes split brain failure in db service, as shown in the following. - - ``` - CM_AGENT ERROR: [Primary], line 1529: split brain failure in db service, more dynamic primary and their term(7504) are the most(7504). Due to auto crash recovery is disabled, no need send restart msg to instance(6002) that had been restarted, waiting for manual intervention. - ``` - - Or - - ``` - CM_AGENT ERROR: line 570: split brain failure in db service, instance 6002 local term(7403) is not max term(7504). Due to auto crash recovery is disabled, will not restart current instance, waiting for manual intervention. - ``` - -- Cluster status - - Run the `cm_ctl query -Cvp -z ALL` command to query the cluster status, as shown in the following figure. - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-1.png) - - **Note**: In extreme scenarios, two database instances will be stopped. In this case, start the instance whose ID is small, such as the node 6001 so that it can continuously provide services. The start command is as follows. - - ``` - cm_ctl start -n -D - ``` - -## Cause Analysis - -If a network partitioning fault occurs, the failover process may be triggered due to the following reasons to meet the requirement that the instance that owning more votes can continuously provide services. - -- cms primary cannot perceive the status of the primary instance in the database cluster. -- The standby instance cannot communicate with the primary instance, such as stream replication error. -- The connectivity between the standby instance and the virtual IP address (configured) is abnormal. - -Once the network partitioning fault is rectified, brain split may occur. Additionally, because the automatic fault rectification function `cms_enable_db_crash_recovery` of the database cluster is disabled, the dual-primary decision will not be made but a primary instance is stopped to ensure that there is one primary instance available and a DBA then manually deal with the issue to ensure data consistency of the database cluster. - -## Procedure - -### Tool Download - -First, access the Download [MogDB: MogDB Downloads](https://www.mogdb.io/downloads/mogdb/) page of the MogDB official website, switch to the **Package Download** tab, choose a version, OS, and CPU, and then choose a package in the `ToolKits- \-\.tar.gz` format, as shown in the following figure. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-1-en.png) - -Put the software package in the **bin** directory of MogDB and decompress it. The mog_xlogdump tool is used for data verification. The decompression result is as follows. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-3.png) - -The tool depends on the following parameters. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-4.png) - -As shown in the figure, mog_xlogdump depends on the transaction ID (-x), OID (-o) of a data table, and the xlog file (-p). - -### Tool Usage - -The brain split fault will cause that the logs of the two database instances differ from a time point. If it lasts a long time, the difference may cause that tens of thousands of data records in single tables or multiple tables are inconsistent (in online environment, even if the difference occurs, it lasts short time and can be controlled). In this case, the DBA needs to confirm the data difference range using the mog_xlogdump tool to facilitate data merging. - -#### Experimental Data Table - -The experimental data table is test_example_01. - -(1) Create a table on the primary instance and insert five data records. - -``` -create table test_example_01(id integer primary key, user_name varchar(10), register_time timestamp); - -insert into test_example_01 values(1, 'zhangsan', '2022-09-22 10:57:10'); - -insert into test_example_01 values(2, 'lisi', '2022-09-22 10:58:10'); - -insert into test_example_01 values(3, 'wangwu', '2022-09-22 17:03:10'); - -insert into test_example_01 values(4, 'mazi', '2022-09-22 17:05:10'); - -insert into test_example_01 values(5, 'laoliu', '2022-09-22 17:09:10'); -``` - -(2) Query the table information before the network partitioning fault occurs. - -``` -\d+ test_example_01; //Query the table structure. -``` - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-5.png) - -```` -select pg_relation_filenode('test_example_01'); // Query the filenode, that is the OID. - -select oid from pg_class where relname='test_example_01'; // Query the OID of the table. -```` - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-6.png) - -``` -select xmin, xmax, * from test_example_01; // Query the transaction ID of the table, that is, xmin. -select pg_current_xlog_location(); // Query the WAL log file of the primary instance, such as 000000010000000000000004. -``` - -- wrz-cm-test-01 node - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-7.png) - -#### Confirm the Data Change Range Based on the Start and End LSN - -For tables where data is inconsistent or not is unclear, the mog_xlogdump tool can be used for finding inconsistent data based on the specified start and end LSN, that is, -s and -e. You can run either of the following commands. - -``` -mog_xlogdump -c Database connection -s Start lsn -e End lsn wal log file of the primary instance - -mog_xlogdump -c Database connection -p xlog directory -s Start lsn -e End lsn -``` - -**Note**: - -1. The database connection string rule is `postgres://:@:\/`. You do not need to specify it in the local. -2. Because the OID is not specified, the data obtained using this method contains all table data. It is hard to merge data later. - -#### Confirm the Data Change Range Based on Special Tables (Recommended) - -For tables where data is inconsistent or not is unclear, the mog_xlogdump tool can be used for parsing data of the table with specified OID based on the parameters -o and -R, and start and end LSN, that is, -s and -e. You can run either of the following commands. - -``` -mog_xlogdump -c Database connection -o OID of a table -R Column type of a table -s Start lsn -e End lsn wal log file of the primary instance - -mog_xlogdump -c Database connection -p xlog directory -o OID of a table -R Column type of a table -s Start lsn -e End lsn -``` - -**Note**: The database connection string rule is `postgres://:@:\/`. You do not need to specify it in the local. - -(1) Query the cluster status to make sure that the cluster is running normally. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-8.png) - -(2) Use a firewall to simulate network isolation between the primary and standby nodes - -Run the following command on any node of the cluster and isolate its network from the other node. - -``` -iptables -I OUTPUT -d 192.168.122.232 -j DROP; iptables -I INPUT -s 192.168.122.232 -j DROP -``` - -(3) Query the cluster status to make sure that the CMS and database instance role in each partition is primary. - -- wrz-cm-test-01 node - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-9.png) - -- wrz-cm-test-02 node - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-10.png) - -##### Perform the Write, Update, and Delete Operations on the Primary Instances in Different Partitions - -(1) Perform the write operation on the wrz-cm-test-01 node. - -``` -insert into test_example_01 values(6, 'xiaoming', '2022-09-22 17:09:10'); // Repeated IDs -``` - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-11.png) - -(2) Perform the write operation on the wrz-cm-test-02 node. - -``` -insert into test_example_01 values(6, 'xiaoming', '2022-09-22 17:10:10'); // Repeated IDs - -insert into test_example_01 values(7, 'huluwa', '2022-09-22 17:12:10'); -``` - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-12.png) - -##### Rectify the Network Partitioning Fault - -(1) Clear the firewall. - -Run the following command on the node in [the second procedure in the network partitioning fault simulation](#renyijiedian) part. - -``` -iptables -I OUTPUT -d 192.168.122.232 -j ACCEPT; iptables -I INPUT -s 192.168.122.232 -j ACCEPT -``` - -(2) Query the cluster status to make sure that it is running normally. - -The cluster status is consistent with that in the [Symptom](#2) part, that is, the cluster is in the manually stopped status. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-13.png) - -##### Data Recovery and Merging - -This procedure depends on logs. Once log data is lost, data recovery will fail. Assume that there is only one table (test_example_01) whose data is changed after network partitioning. - -(1) Confirm the LSN information before the network partitioning fault occurs and when the brain split occurs. - -- Judge the time point at which the networking partitioning fault occurs. - - Based on the original cluster status of [the node in simulating the network partitioning fault](#renyijiedian1), the CMS and the database instance role on the wrz-cm-test-01 node will be changed as the primary instance after the network partitioning fault occurs. Therefore, the time point when the CMS is changed to a primary instance can be found in the cm_server log of the wrz-cm-test-01 node. With the time point, the LSN information corresponding to the last data synchronization of the primary and standby instances when the cluster is running normally can be obtained. (-s indicates the start time for mog_xlogdump to parse the log). - - The log for the CMS to be switched as the primary instance is as follows. - - ``` - /cm_server-2022-09-23_104436-current.log:2022-09-23 15:16:23.005 tid=1245295 LOG: [DccNotifyStatus] g_dbRole is 1, roleType is 1. - ``` - - The LSN information reported from the CMA node before the CMS is switched to be a primary node is as follows. - - ``` - 2022-09-23 14:59:27.495 tid=1245411 CM_AGENT LOG: [InstanceIsUnheal], current report instance is 6001, node 1, instId[6001: 6002], node[1: 2], staticRole[2=Standby: 1=Primary], dynamicRole[2=Standby: 5=Down], term[703: 604], lsn[0/4016230: 0/4007840], dbState[2: 0], buildReason[2=Disconnected: 7=Unknown], doubleRestarting[0: 0], disconn_mode[3=prohibit_connection: 1=polling_connection], disconn[:0, :0], local[192.168.122.231:27001, 192.168.122.232:27001], redoFinished[1: 1], arbiTime[104: 100], syncList[cur: (sync list is empty), exp: (sync list is empty), vote: (dynamic status is empty)], groupTerm[604], sync_standby_mode[0: 0: 0]. - ``` - -- Judge the time point at which brain split occurs in the cluster. - - Based on the brain split fault rectification mechanism, the LSN corresponding to the time point at which the brain split occurs can be replaced with the xlog position of the manually stopped database instance. (-e indicates the end time for mog_xlogdump to parse the log.) - - Under network isolation, start the manually stopped database instance, log in to the instance using gsql, and run the following command to obtain the xlog position. - - ``` - select pg_current_xlog_location(); - ``` - -(2) Use a firewall to isolate the node where the database instance is manually stopped and start the database instance on the node. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-14.png) - -Obtain the data of the test_example_01 table and the LSN information when the brain split fault occurs. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-15.png) - -(3) Use mog_xlogdump to confirm the data change range, and export the data. - -Data change on the wrz-cm-test-01 node: - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-16.png) - -Data change on the wrz-cm-test-02 node: - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-brain-split-17.png) - -Generate the data to be merged into the primary instance of the current database cluster. - -``` -insert into test_example_01 values(6, 'xiaoming', '2022-09-22 17:10:10'); // IDs conflict. Choose the ID of the latest time. -insert into test_example_01 values(7, 'huluwa', '2022-09-22 17:12:10'); -``` - -After confirmation, stop the current database instance. - -(4) Import the changed data into the primary instance of the database cluster. - -Use gsql to log in to the primary instance and perform the above insert statement. - -Query the database record after data is merged. - -(5) Add the manually stopped database instance to the database cluster. - -Based on the second step, make sure that the database instance on the node is in the stopped status, clear the firewall to cancel network isolation, and start the database instance on the node by running the `cm_ctl start -n nodeid -D datapath` command. - -The instance will be added to the database cluster as a standby instance. - -(6) Query the cluster status to ensure that any device is normal. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-cluster-manual-failover.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-cluster-manual-failover.md deleted file mode 100644 index ed81242f..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-cluster-manual-failover.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Manual Failover -summary: Manual Failover -author: zhang cuiping -date: 2022-11-22 ---- - -# Manual Failover - -## Symptom - -Before network partitioning, the status of the two-node cluster is normal. There are one primary node and one standby node that are running normally in both the CM and database clusters. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-failover-1.png) - -After network partitioning, because the automatic failover parameter `cms_enable_failover_on2nodes is disabled` (in a two-node cluster, the number of votes is 2) of the CM cluster is disabled, the standby instance will not take over the services of the primary instance and be kept as the standby instance. The primary instance is isolated and changed as a standby instance. As a result, there is no primary instance in the CM cluster, and the database cluster is unavailable, as shown in the following figure. - -- Status of the CM cluster on node 1 - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-failover-2.png) - -- Status of the CM cluster on node 2 - - ![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-failover-3.png) - -## Procedure - -Confirm the primary instance in the database cluster before the fault and forcibly switch the standby instance on the node to the primary instance. - -### Confirm the Primary Instance of the Database Cluster - -On each node, run the `gs_ctl query -D \` command to obtain the database instance role. As shown in the following figure, `Primary` indicates that the node is that where the primary instance is located. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-failover-4.png) - -If there is no primary instance in a database cluster, run the gsql command to choose the instance whose term/lsn is high as the primary one and perform failover on the node. - -**Note**: gsql cannot be used for connecting a database instance in pending status. In that case, logs can be used for judging the original primary instance of the database cluster. For details, see [Judge the Primary/Standby Status of the Database Cluster Based on Logs](#Judge the Primary/Standby Status of the Database Cluster Based on Logs) - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-failover-5.png) - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-failover-6.png) - -### Forcibly Switch the CM instance to the Primary Instance - -Once the primary instance is confirmed, forcibly switch the CM instance on the node where the primary instance of the database cluster is located to the primary instance. For details, see the following figure. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-failover-7.png) - -### Cancel Forcible Switchover - -After finishing the previous procedure, there must be a primary instance in the CM cluster or event brain split may occur, as shown in the following figure. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-failover-8.png) - -**Note**: Forcible switchover needs to be canceled no matter whether brain split occurs in a CM cluster, as shown in the following figure. The brain split fault will then be dealt with. The CM instance whose term is high will be chosen as the unique primary instance. For details about how to rectify the brain split fault caused by this mechanism, see [Manual Rectification of the Brain Split Fault](./cm-cluster-brain-split-fault.md). - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/common-faults-and-diagnosis-guide/cm-failover-9.png) - -## Appendix - -### Judge the Primary/Standby Status of the Database Cluster Based on Logs - -(1) Confirm the original primary instance of a CM cluster. - -Among all instances in the CM cluster, query the latest log `cm_server_timestamp_process name-current.log` and find the following content. The instance including the content will be the original primary instance. - -``` -2023-01-06 11:40:34.443 tid=7050 HA LOG: node(1) cms role is Primary, cms change to standby by ddb, and g_ddbRole is 2. -``` - -(2) Confirm the original primary instance of a database cluster. - -Find the primary/standby status when the database cluster is available at the last time before network partitioning in the latest log file `cm_server\_timestamp_process name-current.log` of the original primary instance in the CM cluster. - -As shown in the following example, there is a primary instance in the database cluster. The index of the node where it is located is 0, and the node number is 6001. - -``` -2023-01-06 11:30:47.088 tid=7046 CM_AGENT DEBUG1: [GetCandicate], instanceId(6001), this group has dynamic primary(0), validPrimIdx is 0, not need to choose candicate. -``` - -There is a primary instance in the database cluster: dyPrimary: [0: 6001: 1704] - -``` -2023-01-06 11:30:47.088 tid=7046 CM_AGENT LOG: [DnArbitrateNormal]: instd(6001) staPrimary: [0: 6001: 1704], dyPrimary: [0: 6001: 1704], dyNorPrim: [0: 6001: 1704], notPendCmd: [insInfo is empty], cascade: [sta: (insInfo is empty); dy: (insInfo is empty)]. -``` - -The above log files show that the index of the node where the primary instance of the database cluster is located is 0 before network partitioning, the number is 6001. In this section, the CM instance on this node is forcibly switched to be as a primary instance. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-fault.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-fault.md deleted file mode 100644 index d35051d9..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/cm-fault/cm-fault.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: CM Two-Node Fault Location -summary: CM Two-Node Fault Location -author: zhang cuiping -date: 2023-04-07 ---- - -# CM Two-Node Fault Location - -- **[Manual Failover](cm-cluster-manual-failover.md)** -- **[Manual Rectification of the Brain Split Fault in a Database Cluster](cm-cluster-brain-split-fault.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/common-fault-locating-cases.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/common-fault-locating-cases.md deleted file mode 100644 index f8badda7..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/common-fault-locating-cases.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Common Fault Locating Cases -summary: Common Fault Locating Cases -author: zhang cuiping -date: 2023-04-07 ---- - -# Common Fault Locating Cases - -- **[Core Fault Locating](./core-fault-locating/core-fault-locating.md)** -- **[Permission/Session/Data Type Fault Location](./permission-session-data-type/permission-session-data-type.md)** -- **[Service/High Availability/Concurrency Fault Location](./service-ha-concurrency/service-ha-concurrency.md)** -- **[Table/Partition Table Fault Location](./table-partition-table/table-partition-table.md)** -- **[File System/Disk/Memory Fault Location](./file-system-disk-memory/file-system-disk-memory.md)** -- **[SQL Fault Location](./sql-fault/sql-fault.md)** -- **[Index Fault Location](./index-fault/index-fault.md)** -- **[CM Two-Node Fault Location](./cm-fault/cm-fault.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-after-installation-on-x86.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-after-installation-on-x86.md deleted file mode 100644 index 0258da54..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-after-installation-on-x86.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Core Dump Occurs After Installation on x86 -summary: Core Dump Occurs After Installation on x86 -author: Guo Huan -date: 2021-12-09 ---- - -# Core Dump Occurs After Installation on x86 - -## Symptom - -The core dump occurs after the installation of MogDB on x86 architecture machine is completed, and the error in the following figure displays - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/core-dump-occurs-after-installation-on-x86-1.png) - -## Cause Analysis - -The x86 architecture does not include the rdtscp instruction set, and the deployment of MogDB fails. This problem is common in the case of virtualized installation of Linux server on local windows system, but the virtualization version is too low. - -## Procedure - -Run the `lscpu | grep rdtscp` command to see if the rdtscp instruction set is supported. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/core-dump-occurs-after-installation-on-x86-2.png) - -Support for this instruction set can be enabled through the host's admin side settings. Set the cloud host CPU mode to **host-passthrough**, and then reboot. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-due-to-full-disk-space.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-due-to-full-disk-space.md deleted file mode 100644 index abd30c48..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-due-to-full-disk-space.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Core Dump Occurs due to Full Disk Space -summary: Core Dump Occurs due to Full Disk Space -author: Guo Huan -date: 2021-05-24 ---- - -# Core Dump Occurs due to Full Disk Space - -## Symptom - -When TPC-C is running, the disk space is full during injection. As a result, a core dump occurs on the MogDB process, as shown in the following figure. - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/administrator-guide/core-dump-occurs-due-to-full-disk-space.png) - -## Cause Analysis - -When the disk is full, Xlog logs cannot be written. The program exits through the panic log. - -## Procedure - -Externally monitor the disk usage and periodically clean up the disk. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-due-to-incorrect-settings-of-guc-parameter-log-directory.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-due-to-incorrect-settings-of-guc-parameter-log-directory.md deleted file mode 100644 index 26d1e0b2..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-due-to-incorrect-settings-of-guc-parameter-log-directory.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Core Dump Occurs Due to Incorrect Settings of GUC Parameter log_directory -summary: Core Dump Occurs Due to Incorrect Settings of GUC Parameter log_directory -author: Guo Huan -date: 2021-05-24 ---- - -# Core Dump Occurs Due to Incorrect Settings of GUC Parameter log_directory - -## Symptom - -After the database process is started, a core dump occurs and no log is recorded. - -## Cause Analysis - -The directory specified by GUC parameter **log_directory** cannot be read or you do not have permissions to access this directory. As a result, the verification fails during the database startup, and the program exits through the panic log. - -## Procedure - -Set **log_directory** to a valid directory. For details, see [log_directory](../../../reference-guide/guc-parameters/error-reporting-and-logging/logging-destination.md#log_directory). diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-when-removeipc-is-enabled.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-when-removeipc-is-enabled.md deleted file mode 100644 index 033ec2ad..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-dump-occurs-when-removeipc-is-enabled.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Core Dump Occurs when RemoveIPC Is Enabled -summary: Core Dump Occurs when RemoveIPC Is Enabled -author: Guo Huan -date: 2021-05-24 ---- - -# Core Dump Occurs when RemoveIPC Is Enabled - -## Symptom - -The **RemoveIPC** parameter in the OS configuration is set to **yes**. The database breaks down during running, and the following log information is displayed: - -``` -FATAL: semctl(1463124609, 3, SETVAL, 0) failed: Invalid argument -``` - -## Cause Analysis - -If **RemoveIPC** is set to **yes**, the OS deletes the IPC resources (shared memory and semaphore) when the corresponding user exits. As a result, the IPC resources used by the MogDB server are cleared, causing the database to break down. - -## Procedure - -Set **RemoveIPC** to **no**. For details, see [Operating System Configuration](../../../installation-guide/installation-preparation/os-configuration.md) in the *Installation Guide*. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-fault-locating.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-fault-locating.md deleted file mode 100644 index 5620fd56..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/core-fault-locating/core-fault-locating.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Core Fault Locating -summary: Core Fault Locating -author: zhang cuiping -date: 2023-04-07 ---- - -# Core Fault Locating - -- **[Core Dump Occurs due to Full Disk Space](core-dump-occurs-due-to-full-disk-space.md)** -- **[Core Dump Occurs Due to Incorrect Settings of GUC Parameter log_directory](core-dump-occurs-due-to-incorrect-settings-of-guc-parameter-log-directory.md)** -- **[Core Dump Occurs when RemoveIPC Is Enabled](core-dump-occurs-when-removeipc-is-enabled.md)** -- **[Core Dump Occurs After Installation on x86](core-dump-occurs-after-installation-on-x86.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/after-you-run-the-du-command.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/after-you-run-the-du-command.md deleted file mode 100644 index e2e8d4fa..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/after-you-run-the-du-command.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: After You Run the du Command to Query Data File Size In the XFS File System, the Query Result Is Greater than the Actual File Size -summary: After You Run the du Command to Query Data File Size In the XFS File System, the Query Result Is Greater than the Actual File Size -author: Guo Huan -date: 2021-05-24 ---- - -# After You Run the du Command to Query Data File Size In the XFS File System, the Query Result Is Greater than the Actual File Size - -## Symptom - -After you run the **du** command to query data file size in the cluster, the query result is probably greater than the actual file size. - -``` - du -sh file -``` - -## Cause Analysis - -The XFS file system has a pre-assignment mechanism. The file size is determined by the **allocsize** parameter. The file size displayed by the **du** command includes the pre-assigned disk space. - -## Procedure - -- Select the default value (64 KB) for the XFS file system mount parameter allocsize to eliminate the problem. - -- Add the **-apparent-size** parameter when using the **du** command to query the actual file size. - - ``` - du -sh file --apparent-size - ``` - -- If the XFS file system reclaims the pre-assigned space of a file, the **du** command displays the actual file size. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/disk-space-usage-reaches-the-threshold.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/disk-space-usage-reaches-the-threshold.md deleted file mode 100644 index 0e7ce312..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/disk-space-usage-reaches-the-threshold.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: Disk Space Usage Reaches the Threshold and the Database Becomes Read-only -summary: Disk Space Usage Reaches the Threshold and the Database Becomes Read-only -author: Guo Huan -date: 2021-05-24 ---- - -# Disk Space Usage Reaches the Threshold and the Database Becomes Read-only - -## Symptom - -The following error is reported when a non-read-only SQL statement is executed: - -``` -ERROR: cannot execute %s in a read-only transaction. -``` - -An error is reported when some non-read-only SQL statements (such as insert, update, create table as, create index, alter table, and copy from) are executed: - -``` -canceling statement due to default_transaction_read_only is on. -``` - -## Cause Analysis - -After the disk space usage reaches the threshold, the database enters the read-only mode. In this mode, only read-only statements can be executed. - -## Procedure - -1. Use either of the following methods to connect to the database in maintenance mode: - - - Method 1 - - ``` - gsql -d mogdb -p 8000 -r -m - ``` - - - Method 2 - - ``` - gsql -d mogdb -p 8000 -r - ``` - - After the connection is successful, run the following command: - - ``` - set xc_maintenance_mode=on; - ``` - -2. Run the **DROP** or **TRUNCATE** statement to delete user tables that are no longer used until the disk space usage falls below the threshold. - - Deleting user tables can only temporarily relieve the insufficient disk space. To permanently solve the problem, expand the disk space. - -3. Disable the read-only mode of the database as user **omm**. - - ``` - gs_guc reload -D /mogdb/data/dbnode -c "default_transaction_read_only=off" - ``` diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/error-no-space-left-on-device-is-displayed.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/error-no-space-left-on-device-is-displayed.md deleted file mode 100644 index e87b496e..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/error-no-space-left-on-device-is-displayed.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Error:No space left on device Is Displayed -summary: Error:No space left on device Is Displayed -author: Guo Huan -date: 2021-05-24 ---- - -# "Error:No space left on device" Is Displayed - -## Symptom - -The following error message is displayed when the cluster is being used: - -``` -Error:No space left on device -``` - -## Cause Analysis - -The disk space is insufficient. - -## Procedure - -- Run the following command to check the disk usage. The **Avail** column indicates the available disk space, and the **Use%** column indicates the percentage of disk space that has been used. - - ``` - [root@openeuler123 mnt]# df -h - Filesystem Size Used Avail Use% Mounted on - devtmpfs 255G 0 255G 0% /dev - tmpfs 255G 35M 255G 1% /dev/shm - tmpfs 255G 57M 255G 1% /run - tmpfs 255G 0 255G 0% /sys/fs/cgroup - /dev/mapper/openeuler-root 196G 8.8G 178G 5% / - tmpfs 255G 1.0M 255G 1% /tmp - /dev/sda2 9.8G 144M 9.2G 2% /boot - /dev/sda1 10G 5.8M 10G 1% /boot/efi - ``` - - The demand for remaining disk space depends on the increase in service data. Suggestions: - - - Check the disk space usage status, ensuring that the remaining space is sufficient for the growth of disk space for over one year. - - If the disk space usage exceeds 60%, you must clear or expand the disk space. - -- Run the following command to check the size of the data directory. - - ``` - du --max-depth=1 -h /mnt/ - ``` - - The following information is displayed. The first column shows the sizes of directories or files, and the second column shows all the sub-directories or files under the **/mnt/** directory. - - ``` - [root@MogDB36 mnt]# du --max-depth=1 -h /mnt - 83G /mnt/data3 - 71G /mnt/data2 - 365G /mnt/data1 - 518G /mnt - ``` - -- Clean up the disk space. You are advised to periodically back up audit logs to other storage devices. The recommended log retention period is one month. **pg_log** stores database process run logs which help database administrators locate faults. You can delete error logs if you view them every day and handle errors in time. - -- Delete useless data. Back up data that is not used frequently or used for a certain period of time to storage media with lower costs, and clean the backup data to free up disk space. - -- If the disk space is still insufficient, expand the disk capacity. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/file-is-damaged-in-the-xfs-file-system.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/file-is-damaged-in-the-xfs-file-system.md deleted file mode 100644 index 8b1f081f..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/file-is-damaged-in-the-xfs-file-system.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: File Is Damaged in the XFS File System -summary: File Is Damaged in the XFS File System -author: Guo Huan -date: 2021-05-24 ---- - -# File Is Damaged in the XFS File System - -## Symptom - -When a cluster is in use, error reports such as an input/output error or the structure needs cleaning generally do not occur in the XFS file system. - -## Cause Analysis - -The XFS file system is abnormal. - -## Procedure - -Try to mount or unmount the file system to check whether the problem can be solved. - -If the problem recurs, refer to the file system document, such as **xfs_repair**, and ask the system administrator to restore the file system. After the file system is repaired, run the **gs_ctl build** command to restore the damaged DNs. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/file-system-disk-memory.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/file-system-disk-memory.md deleted file mode 100644 index 5c6fd4cf..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/file-system-disk-memory.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: File System/Disk/Memory Fault Location -summary: File System/Disk/Memory Fault Location -author: zhang cuiping -date: 2023-04-07 ---- - -# File System/Disk/Memory Fault Location - -- **[After You Run the du Command to Query Data File Size In the XFS File System, the Query Result Is Greater than the Actual File Size](after-you-run-the-du-command.md)** -- **[File Is Damaged in the XFS File System](file-is-damaged-in-the-xfs-file-system.md)** -- **[Insufficient Memory](insufficient-memory.md)** -- **["Error:No space left on device" Is Displayed](error-no-space-left-on-device-is-displayed.md)** -- **[When the TPC-C is running and a disk to be injected is full, the TPC-C stops responding](when-the-tpcc-is-running.md)** -- **[Disk Space Usage Reaches the Threshold and the Database Becomes Read-only](disk-space-usage-reaches-the-threshold.md)** -- **[Shared Memory Leakage](shared-memory-leak.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/insufficient-memory.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/insufficient-memory.md deleted file mode 100644 index 1bdc8300..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/insufficient-memory.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Insufficient Memory -summary: Insufficient Memory -author: Guo Huan -date: 2021-05-24 ---- - -# Insufficient Memory - -## Symptom - -The client or logs contain the error message **memory usage reach the max_dynamic_memory**. - -## Cause Analysis - -The possible cause is that the value of the GUC parameter **max_process_memory** is too small. This parameter limits the maximum memory that can be used by an MogDB instance. - -## Procedure - -Use the **gs_guc** tool to adjust the value of **max_process_memory**. Note that you need to restart the instance for the modification to take effect. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/shared-memory-leak.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/shared-memory-leak.md deleted file mode 100644 index d2f73e95..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/shared-memory-leak.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Shared Memory Leakage -summary: Shared Memory Leakage -author: zhang cuiping -date: 2023-04-23 ---- - -# Shared Memory Leakage - -## Symptom - -The following error information is recorded in logs. - -``` -This error usually means that PostgreSQL's request for a shared memory segment -exceeded available memory or swap space, or exceeded your kernel's SHMALL parameter. -You can either reduce the request size or reconfigure the kernel with larger SHMALL. -``` - -## Cause Analysis - -Run the `free` command to check the memory usage. It is found that the `shared` memory occupies a large part. - -``` -# free -g - total used free shared buff/cache available -Mem: 31 1 2 23 27 2 -Swap: 3 3 0 -``` - -Run the `ipcs` command to check the usage of the shared memory. It is found that there is a large amount of shared memory that is not used by processes but is not reclaimed. That is, the value of `nattch` is **0**. - -``` -[root@pekpeuler00671 script]# ipcs -m - ------- Shared Memory Segments -------- -key shmid owner perms bytes nattch status -0x00000000 65536 gnome-init 777 16384 1 dest -0x00000000 131073 gnome-init 777 16384 1 dest -0x00000000 163842 gnome-init 777 3145728 2 dest -0x00000000 393219 gnome-init 600 524288 2 dest -0x00000000 425988 gnome-init 600 4194304 2 dest -0x00000000 458757 gnome-init 777 3145728 2 dest -0x00f42401 3604486 1001 600 4455342080 0 -0x00f42402 14123015 1003 600 4457177088 0 -0x00f42403 23592968 1005 600 4457177088 0 -0x00f42404 33062921 1007 600 4457177088 0 -0x00f42405 42532874 1009 600 4457177088 0 -0x00f42406 52002827 1011 600 4457177088 0 -0x00f42407 61472780 1013 600 4457177088 0 -0x00f42408 70942733 1015 600 4457177088 0 -0x00f42409 80412686 1017 600 4457177088 0 -0x00f4240a 89882639 1019 600 4457177088 0 -0x00f4240b 99352592 1021 600 4457177088 0 -0x00f4240c 108822545 1023 600 4457177088 0 -0x00f4240d 118292498 1025 600 4457177088 0 -0x00f4240e 127762451 1027 600 4457177088 0 -0x00f4240f 136904724 1029 600 4455342080 0 -0x00f42410 146374677 1031 600 4457177088 0 -0x00f42411 155844630 1033 600 4457177088 0 -0x00f42412 165314583 1035 600 4457177088 0 -0x00f42413 174784536 1037 600 4457177088 0 -``` - -The cause is that the `kill -9` command is run to exit the database process and the `IpcMemoryDelete` function is not invoked to clear the shared memory. As a result, the memory leakage occurs. - -## Procedure - -Run the `ipcrm` command to release the shared memory without owners. For example, to release the shared memory whose `shmid` is `3604486`, run the following command. - -``` -ipcrm -m shid3604486 -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/when-the-tpcc-is-running.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/when-the-tpcc-is-running.md deleted file mode 100644 index bd1a3f9d..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/file-system-disk-memory/when-the-tpcc-is-running.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: When the TPC-C is running and a disk to be injected is full, the TPC-C stops responding. -summary: When the TPC-C is running and a disk to be injected is full, the TPC-C stops responding. -author: Guo Huan -date: 2021-05-24 ---- - -# When the TPC-C is running and a disk to be injected is full, the TPC-C stops responding - -## Symptom - -When the TPC-C is running and a disk to be injected is full, the TPC-C stops responding. After the fault is rectified, the TPC-C automatically continues to run. - -## Cause Analysis - -When the disk where the performance log (**gs_profile**) is located is full, the database cannot write data and enters the infinite waiting state. As a result, the TPC-C stops responding. After the disk space insufficiency fault is rectified, performance logs can be properly written, and the TPC-C is restored. - -## Procedure - -Externally monitor the disk usage and periodically clean up the disk. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/b-tree-index-faults.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/b-tree-index-faults.md deleted file mode 100644 index c04a36d9..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/b-tree-index-faults.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: B-tree Index Faults -summary: B-tree Index Faults -author: Guo Huan -date: 2021-05-24 ---- - -# B-tree Index Faults - -## Symptom - -The following error message is displayed, indicating that the index is lost occasionally. - -``` -ERROR: index 'xxxx_index' contains unexpected zero page -Or -ERROR: index 'pg_xxxx_index' contains unexpected zero page -Or -ERROR: compressed data is corrupt -``` - -## Cause Analysis - -This type of error is caused by the index fault. The possible causes are as follows: - -- The index is unavailable due to software bugs or hardware faults. -- The index contains many empty pages or almost empty pages. -- During concurrent DDL execution, the network is intermittently disconnected. -- The index failed to be created when indexes are concurrently created. -- A network fault occurs when a DDL or DML operation is performed. - -## Procedure - -Run the REINDEX command to rebuild the index. - -1. Log in to the host as the OS user **omm**. - -2. Run the following command to connect to the database: - - ``` - gsql -d mogdb -p 8000 -r - ``` - -3. Rebuild the index. - - - During DDL or DML operations, if index problems occur due to software or hardware faults, run the following command to rebuild the index: - - ``` - REINDEX TABLE tablename; - ``` - - - If the error message contains **xxxx_index**, the index of a user table is faulty. **xxxx** indicates the name of the user table. Run either of the following commands to rebuild the index: - - ``` - REINDEX INDEX indexname; - ``` - - Or - - ``` - REINDEX TABLE tablename; - ``` - - - If the error message contains **pg_xxxx_index**, the index of the system catalog is faulty. Run the following command to rebuild the index: - - ``` - REINDEX SYSTEM databasename; - ``` diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/index-fault.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/index-fault.md deleted file mode 100644 index ff575843..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/index-fault.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Index Fault Location -summary: Index Fault Location -author: zhang cuiping -date: 2023-04-07 ---- - -# Index Fault Location - -- **[When a User Specifies Only an Index Name to Modify the Index, A Message Indicating That the Index Does Not Exist Is Displayed](when-a-user-specifies-only-an-index-name.md)** -- **[Reindexing Fails](reindexing-fails.md)** -- **[B-tree Index Faults](b-tree-index-faults.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/reindexing-fails.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/reindexing-fails.md deleted file mode 100644 index d8920008..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/reindexing-fails.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Reindexing Fails -summary: Reindexing Fails -author: Guo Huan -date: 2021-05-24 ---- - -# Reindexing Fails - -## Symptom - -When an index of the desc table is damaged, a series of operations cannot be performed. The error information may be as follows: - -``` -index \"%s\" contains corrupted page at block - %u" ,RelationGetRelationName(rel),BufferGetBlockNumber(buf), please reindex it. -``` - -## Cause Analysis - -In actual operations, indexes may break down due to software or hardware faults. For example, if disk space is insufficient or pages are damaged after indexes are split, the indexes may be damaged. - -## Procedure - -If the table is a column-store table named **pg_cudesc_xxxxx_index**, the desc index table is damaged. Find the OID and table corresponding to the primary table based on the desc index table name, and run the following statement to recreate the cudesc index. - -``` -REINDEX INTERNAL TABLE name; -``` diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/when-a-user-specifies-only-an-index-name.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/when-a-user-specifies-only-an-index-name.md deleted file mode 100644 index 58f381dd..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/index-fault/when-a-user-specifies-only-an-index-name.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: When a User Specifies Only an Index Name to Modify the Index, A Message Indicating That the Index Does Not Exist Is Displayed -summary: When a User Specifies Only an Index Name to Modify the Index, A Message Indicating That the Index Does Not Exist Is Displayed -author: Guo Huan -date: 2021-05-24 ---- - -# When a User Specifies Only an Index Name to Modify the Index, A Message Indicating That the Index Does Not Exist Is Displayed - -## Symptom - -When a User Specifies Only an Index Name to Modify the Index, A Message Indicating That the Index Does Not Exist Is Displayed The following provides an example: - -``` --- Create a partitioned table index HR_staffS_p1_index1, without specifying index partitions. -CREATE INDEX HR_staffS_p1_index1 ON HR.staffS_p1 (staff_ID) LOCAL; --- Create a partitioned table index HR_staffS_p1_index2, with index partitions specified. -CREATE INDEX HR_staffS_p1_index2 ON HR.staffS_p1 (staff_ID) LOCAL -( -PARTITION staff_ID1_index, -PARTITION staff_ID2_index TABLESPACE example3, -PARTITION staff_ID3_index TABLESPACE example4 -) TABLESPACE example; --- Change the tablespace of index partition staff_ID2_index to example1. A message is displayed, indicating that the index does not exist. -ALTER INDEX HR_staffS_p1_index2 MOVE PARTITION staff_ID2_index TABLESPACE example1; -``` - -## Cause Analysis - -The possible reason is that the user is in the public schema instead of the hr schema. - -``` --- Run the following command to validate the inference. It is found that the calling is successful. -ALTER INDEX hr.HR_staffS_p1_index2 MOVE PARTITION staff_ID2_index TABLESPACE example1; --- Change the schema of the current session to hr. -ALTER SESSION SET CURRENT_SCHEMA TO hr; --- Run the following command to modify the index: -ALTER INDEX HR_staffS_p1_index2 MOVE PARTITION staff_ID2_index TABLESPACE example1; -``` - -## Procedure - -Add a schema reference to a table, index, or view. The format is as follows: - -``` -schema.table -``` diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/an-error-occurs-during-integer-conversion.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/an-error-occurs-during-integer-conversion.md deleted file mode 100644 index e3537084..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/an-error-occurs-during-integer-conversion.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: An Error Occurs During Integer Conversion -summary: An Error Occurs During Integer Conversion -author: Guo Huan -date: 2021-05-24 ---- - -# An Error Occurs During Integer Conversion - -## Symptom - -The following error is reported during integer conversion: - -``` -Invalid input syntax for integer: "13." -``` - -## Cause Analysis - -Some data types cannot be converted to the target data type. - -## Procedure - -Gradually narrow down the range of SQL statements to determine the data types that cannot be converted. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/different-data-is-displayed.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/different-data-is-displayed.md deleted file mode 100644 index 2a916055..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/different-data-is-displayed.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Different Data Is Displayed for the Same Table Queried By Multiple Users -summary: Different Data Is Displayed for the Same Table Queried By Multiple Users -author: Guo Huan -date: 2021-05-24 ---- - -# Different Data Is Displayed for the Same Table Queried By Multiple Users - -## Symptom - -Two users log in to the same database human_resource and run the following statement separately to query the areas table, but obtain different results. - -``` -select count(*) from areas; -``` - -## Cause Analysis - -1. Check whether tables with same names are really the same table. In a relational database, a table is identified by three elements: **database**, **schema**, and **table**. In this issue, **database** is **human_resource** and **table** is **areas**. -2. Check whether schemas of tables with the same name are consistent. Log in as users **omm** and **user01** separately. It is found that **search_path** is **public** and **$user**, respectively. As **omm** is the cluster administrator, a schema having the same name as user **omm** will not be created by default. That is, all tables will be created in **public** if no schema is specified. However, when a common user, such as **user01**, is created, the same-name schema (**user01**) is created by default. That is, all tables are created in **user01** if the schema is not specified. -3. If different users access different data in the same table, check whether objects in the table have different access policies for different users. - -## Procedure - -- For the query of tables with the same name in different schemas, add the schema reference to the queried table. The format is as follows: - - ``` - schema.table - ``` - -- If different access policies result in different query results of the same table, you can query the **pg_rlspolicy** system catalog to determine the specific access rules. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/forcibly-terminating-a-session.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/forcibly-terminating-a-session.md deleted file mode 100644 index 6b04991a..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/forcibly-terminating-a-session.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Forcibly Terminating a Session -summary: Forcibly Terminating a Session -author: Guo Huan -date: 2021-05-24 ---- - -# Forcibly Terminating a Session - -## Symptom - -In some cases, the administrator must forcibly terminate abnormal sessions to keep the system healthy. - -## Procedure - -1. Log in to the host as the OS user **omm**. - -2. Run the following command to connect to the database: - - ``` - gsql -d mogdb -p 8000 - ``` - - **mogdb** is the name of the database, and **8000** is the port number. - -3. Find the thread ID of the faulty session from the current active session view. - - ``` - SELECT datid, pid, state, query FROM pg_stat_activity; - ``` - - A command output similar to the following is displayed, where the pid value indicates the thread ID of the session. - - ``` - datid | pid | state | query - -------+-----------------+--------+------ - 13205 | 139834762094352 | active | - 13205 | 139834759993104 | idle | - (2 rows) - ``` - -4. Terminate the session using its thread ID. - - ``` - SELECT pg_terminate_backend(139834762094352); - ``` - - If information similar to the following is displayed, the session is successfully terminated: - - ``` - pg_terminate_backend - --------------------- - t - (1 row) - ``` - - If a command output similar to the following is displayed, a user is attempting to terminate the session, and the session will be reconnected rather than being terminated. - - ``` - FATAL: terminating connection due to administrator command - FATAL: terminating connection due to administrator command The connection to the server was lost. Attempting reset: Succeeded. - ``` diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/permission-session-data-type.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/permission-session-data-type.md deleted file mode 100644 index 474081c4..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/permission-session-data-type/permission-session-data-type.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Permission/Session/Data Type Fault Location -summary: Permission/Session/Data Type Fault Location -author: zhang cuiping -date: 2023-04-07 ---- - -# Permission/Session/Data Type Fault Location - -- **[Forcibly Terminating a Session](forcibly-terminating-a-session.md)** -- **[Different Data Is Displayed for the Same Table Queried By Multiple Users](different-data-is-displayed.md)** -- **[An Error Occurs During Integer Conversion](an-error-occurs-during-integer-conversion.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/performance-deterioration.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/performance-deterioration.md deleted file mode 100644 index f78722bb..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/performance-deterioration.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Performance Deterioration Caused by Dirty Page Flushing Efficiency During TPC-C High-Concurrency Long-Term Stable Running -summary: Performance Deterioration Caused by Dirty Page Flushing Efficiency During TPC-C High-Concurrency Long-Term Stable Running -author: zhang cuiping -date: 2022-10-24 ---- - -# Performance Deterioration Caused by Dirty Page Flushing Efficiency During TPC-C High-Concurrency Long-Term Stable Running - -## Symptom - -TPC-C performance deteriorates due to dirty page flushing efficiency during high-concurrency long-term stable running. The details are as follows: The initial performance is high. As the running time increases, the value of **tmpTotal** in the database decreases, the CPU usage of the WalWriter thread is 100%, and other CPUs are almost not loaded. In the WDR report, the waiting time for dirty page flushing accounts for the highest proportion. - -## Cause - -Generally, you can analyze the specific cause by checking the process status and operating system resource usage (such as CPU and I/O), or further analyze the root cause based on the WDR. In this scenario, dirty page refreshing is inefficient. - -## Solution - -1. Reduce the concurrency or increase the value of **shared_buffers**. - -2. Adjust dirty page parameters. In scenarios where doublewrite is enabled, you can adjust parameters such as **page_writer_sleep** (downward adjustment) and **max_io_capacity** (upward adjustment) to improve dirty page elimination efficiency. - -3. Replace high-performance disks (such as NVMe disks). - - The resources occupied by the database must meet the service requirements. In a high-concurrency test, you need to add resources to ensure that database services are available. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/primary-node-is-hung-in-demoting.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/primary-node-is-hung-in-demoting.md deleted file mode 100644 index 36409ffe..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/primary-node-is-hung-in-demoting.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Primary Node Is Hung in Demoting During a Switchover -summary: Primary Node Is Hung in Demoting During a Switchover -author: Guo Huan -date: 2021-05-24 ---- - -# Primary Node Is Hung in Demoting During a Switchover - -## Symptom - -In a cluster deployed with one primary and multiple standby DNs, if system resources are insufficient and a switchover occurs, a node is hung in demoting. - -## Cause Analysis - -If system resources are insufficient, the third-party management thread cannot be created. As a result, the managed sub-threads cannot exit and the primary node is hung in demoting. - -## Procedure - -Run the following command to stop the process of the primary node so that the standby node can be promoted to primary: Perform the following operations only in the preceding scenario. - -``` - kill -9 PID -``` diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/query-failure.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/query-failure.md deleted file mode 100644 index 012cf926..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/query-failure.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: A Query Error Is Reported Due to Predicate Pushdown -summary: A Query Error Is Reported Due to Predicate Pushdown -author: zhang cuiping -date: 2023-04-07 ---- - -# A Query Error Is Reported Due to Predicate Pushdown - -## Symptom - -When a predicate is pushed down in a plan, an error should not be reported according to the query execution sequence in the SQL standard. However, an error occurs during the execution. - -```sql -MogDB=# select * from tba; - a ---- - -1 - 2 -(2 rows) - -MogDB=# select * from tbb; - b ---- - -1 - 1 -(2 rows) - -MogDB=# select * from tba join tbb on a > b where b > 0 and sqrt(a) > 1; -ERROR: cannot table square root of a negative number -``` - -Execute the SQL standard process. - -1. Execute the FROM clause to ensure that all data meets the `a > b` condition. -2. Execute the WHERE clause with the `b > 0` condition. If the result is `true`, `a > 0` can be deduced and the execution continues. If the result is `false`, the subsequent conditions are short-circuited and will not be executed. -3. Execute the WHERE clause with the `sqrt(a) > 1` condition. - -However, an error is reported, indicating that the input parameter is a negative value. - -## Cause Analysis - -```sql -MogDB=# explain (costs off) select * from tba join tbb on a > b where b > 0 and sqrt(a) > 1; - QUERY PLAN ----------------------------------- -Nest loop - Join Filter: (a > b) - -> Seq Scan on public.tba - Filter: (sqrt(a) > 1) - -> Materialize - -> Seq Scan on public.tbb - Filter: (b > 0) -(7 rows) -``` - -According to the analysis plan, the original `a > b`, `b > 0`, and `sqrt(a) > 1` conditions are split and pushed down to different operators. As a result, the conditions are not executed in sequence. In addition, the current equivalence class inference supports only equal sign (=) inference and cannot automatically supplement `a > 0`. As a result, an error is reported during the query. - -## Procedure - -Predicate pushdown can greatly improve query performance, and this special short-circuit and derivation scenario is not considered in most database optimizers. Therefore, you are advised to modify the query statement and manually add `a > 0` under related conditions. - -```sql -MogDB=# select * from tba join tbb on a > b where b > 0 and a > 0 and sqrt(a) > 1; - a | b ----+--- - 2 | 1 -(1 row) - -MogDB=# explain (costs off) select * from tba join tbb on a > b where b > 0 and a > 0 and sqrt(a) > 1; - QUERY PLAN --------------------------------------- -Nest loop - Join Filter: (a > b) - -> Seq Scan on public.tba - Filter: (a > 0 and sqrt(a) > 1) - -> Materialize - -> Seq Scan on public.tbb - Filter: (b > 0) -(7 rows) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/service-ha-concurrency.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/service-ha-concurrency.md deleted file mode 100644 index 2d7445bb..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/service-ha-concurrency.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Service/High Availability/Concurrency Fault Location -summary: Service/High Availability/Concurrency Fault Location -author: zhang cuiping -date: 2023-04-07 ---- - -# Service/High Availability/Concurrency Fault Location - -- **[Standby Node in the Need Repair (WAL) State](standby-node-in-the-need-repair-state.md)** -- **[Service Startup Failure](service-startup-failure.md)** -- **[Primary Node Is Hung in Demoting During a Switchover](primary-node-is-hung-in-demoting.md)** -- **["too many clients already" Is Reported or Threads Failed To Be Created in High Concurrency Scenarios](too-many-clients-already.md)** -- **[Performance Deterioration Caused by Dirty Page Flushing Efficiency During TPC-C High-Concurrency Long-Term Stable Running](performance-deterioration.md)** -- **[A Query Error Is Reported Due to Predicate Pushdown](query-failure.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/service-startup-failure.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/service-startup-failure.md deleted file mode 100644 index ee94999f..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/service-startup-failure.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Service Startup Failure -summary: Service Startup Failure -author: Guo Huan -date: 2021-05-24 ---- - -# Service Startup Failure - -## Symptom - -The service startup failed. - -## Cause Analysis - -- Parameters are set to improper values, resulting in insufficient system resources in the database cluster, or parameter settings do not meet the internal restrictions in the cluster. -- The status of some DNs is abnormal. -- Permissions to modify directories are insufficient. For example, users do not have sufficient permissions for the **/tmp** directory or the data directory in the cluster. -- The configured port has been occupied. -- The system firewall is enabled. -- The trust relationship between servers of the database in the cluster is abnormal. - -## Procedure - -- Check whether the parameter configurations are improper or meet internal constraints. - - - Log in to the node that cannot be started. Check the run logs and check whether the resources are insufficient or whether the parameter configurations meet internal constraints. For example, if the message "Out of memory" or the following error information is displayed, the resources are insufficient, the startup fails, or the configuration parameters do not meet the internal constraints. - - ``` - FATAL: hot standby is not possible because max_connections = 10 is a lower setting than on the master server (its value was 100) - ``` - - - Check whether the GUC parameters are set to proper values. For example, check parameters, such as **shared_buffers**, **effective_cache_size**, and **bulk_write_ring_size** that consume much resources, or parameter **max_connections** that cannot be easily set to a value that is less than its last value. For details about how to view and set GUC parameters, see Configuring Running Parameters. - -- Check whether the status of some DNs is abnormal. Check the status of each primary and standby instances in the current cluster using **gs_om -t status -detail**. - - - If the status of all the instances on a host is abnormal, replace the host. - - - If the status of an instance is **Unknown**, **Pending**, or **Down**, log in to the node where the instance resides as a cluster user to view the instance log and identify the cause. For example: - - ``` - 2014-11-27 14:10:07.022 CST 140720185366288 FATAL: database "postgres" does not exist 2014-11-27 14:10:07.022 CST 140720185366288 DETAIL: The database subdirectory "base/ 13252" is missing. - ``` - - If the preceding information is displayed in a log, files stored in the data directory where the DN resides are damaged, and the instance cannot be queried. You cannot execute normal queries to this instance. - -- Check whether users have sufficient directory permissions. For example, users do not have sufficient permissions for the **/tmp** directory or the data directory in the cluster. - - - Determine the directory for which users have insufficient permissions. - - Run the **chmod** command to modify directory permissions as required. The database user must have read/write permissions for the **/tmp** directory. To modify permissions for data directories, refer to the settings for data directories with sufficient permissions. - -- Check whether the configured ports have been occupied. - - - Log in to the node that cannot be started and check whether the instance process exists. - - - If the instance process does not exist, view the instance log to check the exception reasons. For example: - - ``` - 2014-10-17 19:38:23.637 CST 139875904172320 LOG: could not bind IPv4 socket at the 0 time: Address already in use 2014-10-17 19:38:23.637 CST 139875904172320 HINT: Is another postmaster already running on port 40005? If not, wait a few seconds and retry. - ``` - - If the preceding information is displayed in a log, the TCP port on the DN has been occupied, and the instance cannot be started. - - ``` - 2015-06-10 10:01:50 CST 140329975478400 [SCTP MODE] WARNING: (sctp bind) bind(socket=9, [addr:0.0.0.0,port:1024]):Address already in use -- attempt 10/10 2015-06-10 10:01:50 CST 140329975478400 [SCTP MODE] ERROR: (sctp bind) Maximum bind() attempts. Die now... - ``` - - If the preceding information is displayed in a log, the SCTP port on the DN has been occupied, and the instance cannot be started. - -- Run **sysctl -a** to view the **net.ipv4.ip_local_port_range** parameter. If this port configured for this instance is within the range of the port number randomly occupied by the system, modify the value of **net.ipv4.ip_local_port_range**, ensuring that all the instance port numbers in the XML file are beyond this range. Check whether a port has been occupied: - - ``` - netstat -anop | grep Port number - ``` - - The following is an example: - - ``` - [root@MogDB36 ~]# netstat -anop | grep 15970 - tcp 0 0 127.0.0.1:15970 0.0.0.0:* LISTEN 3920251/mogdb off (0.00/0/0) - tcp6 0 0 ::1:15970 :::* LISTEN 3920251/mogdb off (0.00/0/0) - unix 2 [ ACC ] STREAM LISTENING 197399441 3920251/mogdb /tmp/.s.PGSQL.15970 - unix 3 [ ] STREAM CONNECTED 197461142 3920251/mogdb /tmp/.s.PGSQL.15970 - ``` - -- Check whether the system firewall is enabled. - -- Check whether the mutual trust relationship is abnormal. Reconfigure the mutual trust relationship between servers in the cluster. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/standby-node-in-the-need-repair-state.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/standby-node-in-the-need-repair-state.md deleted file mode 100644 index c8947b21..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/standby-node-in-the-need-repair-state.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Standby Node in the Need Repair (WAL) State -summary: Standby Node in the Need Repair (WAL) State -author: Guo Huan -date: 2021-05-24 ---- - -# Standby Node in the **Need Repair (WAL)** State - -## Symptom - -The **Need Repair (WAL)** fault occurs on a standby node of the MogDB. - -## Cause Analysis - -The primary and standby DB instances are disconnected due to network faults or insufficient disk space. As a result, logs are not synchronized between the primary and standby DB instances, and the database cluster fails to start. - -## Procedure - -Run the **gs_ctl build -D** command to rebuild the faulty node. For details, see the [build parameter](../../../reference-guide/tool-reference/tools-used-in-the-internal-system/gs_ctl.md#6) in the *MogDB Tool Reference*. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/too-many-clients-already.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/too-many-clients-already.md deleted file mode 100644 index 59e5b1d1..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/service-ha-concurrency/too-many-clients-already.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: too many clients already Is Reported or Threads Failed To Be Created in High Concurrency Scenarios -summary: too many clients already Is Reported or Threads Failed To Be Created in High Concurrency Scenarios -author: Guo Huan -date: 2021-05-24 ---- - -# "too many clients already" Is Reported or Threads Failed To Be Created in High Concurrency Scenarios - -## Symptom - -When a large number of SQL statements are concurrently executed, the error message "sorry, too many clients already" is displayed or an error is reported, indicating that threads cannot be created or processes cannot be forked. - -## Cause Analysis - -These errors are caused by insufficient OS threads. Check **ulimit -u** in the OS. If the value is too small (for example, less than 32768), the errors are caused by the OS limitation. - -## Procedure - -Run **ulimit -u** to obtain the value of **max user processes** in the OS. - -``` -[root@MogDB36 mnt]# ulimit -u -unlimited -``` - -Use the following formula to calculate the minimum value: - -``` -value=max (32768, number of instances x 8192) -``` - -The number of instances refers to the total number of instances on the node. - -To set the minimum value, add the following two lines to the **/etc/security/limits.conf** file: - -``` -* hard nproc [value] -* soft nproc [value] -``` - -The file to be modified varies based on the OS. For versions later than CentOS6, modify the **/etc/security/limits.d/90-nofile.conf** file in the same way. - -Alternatively, you can run the following command to change the value. However, the change becomes invalid upon OS restart. To solve this problem, you can add **ulimit -u [value]** to the global environment variable file **/etc/profile**. - -``` -ulimit -u [values] -``` - -In high concurrency mode, enable the thread pool to control thread resources in the database. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/analyzing-the-status-of-a-query-statement.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/analyzing-the-status-of-a-query-statement.md deleted file mode 100644 index 08a8c1b8..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/analyzing-the-status-of-a-query-statement.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Analyzing the Status of a Query Statement -summary: Analyzing the Status of a Query Statement -author: Guo Huan -date: 2021-05-24 ---- - -# Analyzing the Status of a Query Statement - -## Symptom - -Some query statements are executed for an excessively long time in the system. You need to analyze the status of the query statements. - -## Procedure - -1. Log in to the host as the OS user **omm**. - -2. Run the following command to connect to the database: - - ``` - gsql -d mogdb -p 8000 - ``` - - **mogdb** is the name of the database, and **8000** is the port number. - -3. Set the parameter **track_activities** to **on**. - - ``` - SET track_activities = on; - ``` - - The database collects the running information about active queries only if the parameter is set to **on**. - -4. View the running query statements. The **pg_stat_activity** view is used as an example here. - - ``` - SELECT datname, usename, state, query FROM pg_stat_activity; - datname | usename | state | query - ----------+---------+--------+------- - mogdb | omm | idle | - mogdb | omm | active | - (2 rows) - ``` - - If the **state** column is **idle**, the connection is idle and requires a user to enter a command. To identify only active query statements, run the following command: - - ``` - SELECT datname, usename, state, query FROM pg_stat_activity WHERE state != 'idle'; - ``` - -5. Analyze whether a query statement is in the active or blocked state. Run the following command to view a query statement in the block state: - - ``` - SELECT datname, usename, state, query FROM pg_stat_activity WHERE waiting = true; - ``` - - The query statement is displayed. It is requesting a lock resource that may be held by another session, and is waiting for the lock resource to be released by the session. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/analyzing-whether-a-query-statement-is-blocked.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/analyzing-whether-a-query-statement-is-blocked.md deleted file mode 100644 index 226fa3ff..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/analyzing-whether-a-query-statement-is-blocked.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Analyzing Whether a Query Statement Is Blocked -summary: Analyzing Whether a Query Statement Is Blocked -author: Guo Huan -date: 2021-05-24 ---- - -# Analyzing Whether a Query Statement Is Blocked - -## Symptom - -During database running, query statements are blocked in some service scenarios. As a result, the query statements are executed for an excessively long time. - -## Cause Analysis - -A query statement uses a lock to protect the data objects that it wants to access. If the data objects have been locked by another session, the query statement will be blocked and wait for the session to complete operation and release the lock resource. The data objects requiring locks include tables and tuples. - -## Procedure - -1. Log in to the host as the OS user **omm**. - -2. Run the following command to connect to the database: - - ``` - gsql -d mogdb -p 8000 - ``` - - **mogdb** is the name of the database, and **8000** is the port number. - -3. Find the thread ID of the faulty session from the current active session view. - - ``` - SELECT w.query AS waiting_query, w.pid AS w_pid, w.usename AS w_user, l.query AS locking_query, l.pid AS l_pid, l.usename AS l_user, t.schemaname || '.' || t.relname AS tablename FROM pg_stat_activity w JOIN pg_locks l1 ON w.pid = l1.pid AND NOT l1.granted JOIN pg_locks l2 ON l1.relation = l2.relation AND l2.granted JOIN pg_stat_activity l ON l2.pid = l.pid JOIN pg_stat_user_tables t ON l1.relation = t.relid WHERE w.waiting = true; - ``` - -4. Terminate the session using its thread ID. - - ``` - SELECT pg_terminate_backend(139834762094352); - ``` - - If information similar to the following is displayed, the session is successfully terminated: - - ``` - pg_terminate_backend - --------------------- - t - (1 row) - ``` - - If a command output similar to the following is displayed, a user is attempting to terminate the session, and the session will be reconnected rather than being terminated. - - ``` - FATAL: terminating connection due to administrator command - FATAL: terminating connection due to administrator command The connection to the server was lost. Attempting reset: Succeeded. - ``` diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/lock-wait-timeout-is-displayed.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/lock-wait-timeout-is-displayed.md deleted file mode 100644 index 19447292..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/lock-wait-timeout-is-displayed.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Lock wait timeout Is Displayed When a User Executes an SQL Statement -summary: Lock wait timeout Is Displayed When a User Executes an SQL Statement -author: Guo Huan -date: 2021-05-24 ---- - -# "Lock wait timeout" Is Displayed When a User Executes an SQL Statement - -## Symptom - -"Lock wait timeout" is displayed when a user executes an SQL statement. - -``` -ERROR: Lock wait timeout: thread 140533638080272 waiting for ShareLock on relation 16409 of database 13218 after 1200000.122 ms ERROR: Lock wait timeout: thread 140533638080272 waiting for AccessExclusiveLock on relation 16409 of database 13218 after 1200000.193 ms -``` - -## Cause Analysis - -Lock waiting times out in the database. - -## Procedure - -- After detecting such errors, the database automatically retries the SQL statements. The number of retries is controlled by **max_query_retry_times**. -- To analyze the cause of the lock wait timeout, find the SQL statements that time out in the **pg_locks** and **pg_stat_activity** system catalogs. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/low-query-efficiency.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/low-query-efficiency.md deleted file mode 100644 index 479c9e35..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/low-query-efficiency.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Low Query Efficiency -summary: Low Query Efficiency -author: Guo Huan -date: 2021-05-24 ---- - -# Low Query Efficiency - -## Symptom - -A query task that used to take a few milliseconds to complete is now requiring several seconds, and that used to take several seconds is now requiring even half an hour. - -## Procedure - -Perform the following procedure to locate the cause. - -1. Run the **analyze** command to analyze the database. - - Run the **analyze** command to update statistics such as data sizes and attributes in all tables. You are advised to perform the operation with light job load. If the query efficiency is improved or restored after the command execution, the **autovacuum** process does not function well that requires further analysis. - -2. Check whether the query statement returns unnecessary information. - - For example, if a query statement is used to query all records in a table with the first 10 records being used, then it is quick to query a table with 50 records. However, if a table contains 50000 records, the query efficiency decreases. If an application requires only a part of data information but the query statement returns all information, add a LIMIT clause to the query statement to restrict the number of returned records. In this way, the database optimizer can optimize space and improve query efficiency. - -3. Check whether the query statement still has a low response even when it is solely executed. - - Run the query statement when there are no or only a few other query requests in the database, and observe the query efficiency. If the efficiency is high, the previous issue is possibly caused by a heavily loaded host in the database system or - -4. Check the same query statement repeatedly to check the query efficiency. - - One major cause of low query efficiency is that the required information is not cached in the memory or is replaced by other query requests due to insufficient memory resources. This can be verified by running the same query statement repeatedly and the query efficiency increases gradually. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/slow-response-to-a-query-statement.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/slow-response-to-a-query-statement.md deleted file mode 100644 index fba12ae2..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/slow-response-to-a-query-statement.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Slow Response to a Query Statement -summary: Slow Response to a Query Statement -author: Guo Huan -date: 2021-05-24 ---- - -# Slow Response to a Query Statement - -## Symptom - -After a query statement has been executed, no response is returned for a long time. - -## Cause Analysis - -- The query statement is complex and requires a long time for execution. -- The query statement is blocked. - -## Procedure - -1. Log in to the host as the OS user **omm**. - -2. Run the following command to connect to the database: - - ``` - gsql -d mogdb -p 8000 - ``` - - **mogdb** is the name of the database, and **8000** is the port number. - -3. Check for the query statements that are executed for a long time in the system. - - ```sql - SELECT EXTRACT(DAY FROM (current_timestamp - query_start)) * 24 * 60 + EXTRACT(HOUR FROM (current_timestamp - query_start)) * 60 + EXTRACT(MINUTE FROM (current_timestamp - query_start)) AS runtime, datname, usename, query FROM pg_stat_activity WHERE state != 'idle' ORDER BY 1 desc; - - -- In B-compatible mode, run the following statement: - SELECT timestampdiff(minutes, query_start, current_timestamp) AS runtime, datname, usename, query FROM pg_stat_activity WHERE state != 'idle' ORDER BY 1 desc; - ``` - - Query statements are returned, sorted by execution time length in descending order. The first record is the query statement that takes the long time for execution. - - Alternatively, you can use the [TIMESTAMPDIFF](../../../reference-guide/functions-and-operators/date-and-time-processing-functions-and-operators.md) function to set **current_timestamp** and **query_start** to be greater than a threshold to identify query statements that are executed for a duration longer than this threshold. The first parameter of **timestampdiff** is the time difference unit. For example, execute the following statement to query the statements whose execution lasts more than 2 minutes: - - ```sql - SELECT query FROM pg_stat_activity WHERE (EXTRACT(DAY FROM (current_timestamp - query_start)) * 24 * 60 + EXTRACT(HOUR FROM (current_timestamp - query_start)) * 60 + EXTRACT(MINUTE FROM (current_timestamp - query_start))) > 2; - - -- In B-compatible mode, run the following statement: - SELECT query FROM pg_stat_activity WHERE timestampdiff(minutes, query_start, current_timestamp) > 2; - ``` - -4. Analyze the status of the query statements that were run for a long time. - - - If the query statement is normal, wait until the execution of the query statement is complete. - - If the query statement is blocked, rectify the fault by referring to [Analyzing Whether a Query Statement Is Blocked](analyzing-whether-a-query-statement-is-blocked.md). diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/sql-fault.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/sql-fault.md deleted file mode 100644 index b040fb60..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/sql-fault/sql-fault.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: SQL Fault Location -summary: SQL Fault Location -author: zhang cuiping -date: 2023-04-07 ---- - -# SQL Fault Location - -- **["Lock wait timeout" Is Displayed When a User Executes an SQL Statement](lock-wait-timeout-is-displayed.md)** -- **[Analyzing Whether a Query Statement Is Blocked](analyzing-whether-a-query-statement-is-blocked.md)** -- **[Low Query Efficiency](low-query-efficiency.md)** -- **[Slow Response to a Query Statement](slow-response-to-a-query-statement.md)** -- **[Analyzing the Status of a Query Statement](analyzing-the-status-of-a-query-statement.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/an-error-is-reported-when-the-table-partition-is-modified.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/an-error-is-reported-when-the-table-partition-is-modified.md deleted file mode 100644 index c5710586..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/an-error-is-reported-when-the-table-partition-is-modified.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: An Error Is Reported When the Table Partition Is Modified -summary: An Error Is Reported When the Table Partition Is Modified -author: Guo Huan -date: 2021-05-24 ---- - -# An Error Is Reported When the Table Partition Is Modified - -## Symptom - -When **ALTER TABLE PARTITION** is performed, the following error message is displayed: - -``` -ERROR:start value of partition "XX" NOT EQUAL up-boundary of last partition. -``` - -## Cause Analysis - -If the **ALTER TABLE PARTITION** statement involves both the DROP PARTITION operation and the ADD PARTITION operation, MogDB always performs the DROP PARTITION operation before the ADD PARTITION operation regardless of their orders. However, performing DROP PARTITION before ADD PARTITION causes a partition gap. As a result, an error is reported. - -## Procedure - -To prevent partition gaps, set **END** in DROP PARTITION to the value of **START** in ADD PARTITION. The following is an example: - -``` --- Create a partitioned table partitiontest. -mogdb=# CREATE TABLE partitiontest -( -c_int integer, -c_time TIMESTAMP WITHOUT TIME ZONE -) -PARTITION BY range (c_int) -( -partition p1 start(100)end(108), -partition p2 start(108)end(120) -); --- An error is reported when the following statements are used: -mogdb=# ALTER TABLE partitiontest ADD PARTITION p3 start(120)end(130), DROP PARTITION p2; -ERROR: start value of partition "p3" NOT EQUAL up-boundary of last partition. -mogdb=# ALTER TABLE partitiontest DROP PARTITION p2,ADD PARTITION p3 start(120)end(130); -ERROR: start value of partition "p3" NOT EQUAL up-boundary of last partition. --- Change them as follows: -mogdb=# ALTER TABLE partitiontest ADD PARTITION p3 start(108)end(130), DROP PARTITION p2; -mogdb=# ALTER TABLE partitiontest DROP PARTITION p2,ADD PARTITION p3 start(108)end(130); -``` diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/table-partition-table.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/table-partition-table.md deleted file mode 100644 index 88f78c73..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/table-partition-table.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Table/Partition Table Fault Location -summary: Table/Partition Table Fault Location -author: zhang cuiping -date: 2023-04-23 ---- - -# Table/Partition Table Fault Location - -- **[Table Size Does not Change After VACUUM FULL Is Executed on the Table](table-size-does-not-change.md)** -- **[An Error Is Reported When the Table Partition Is Modified](an-error-is-reported-when-the-table-partition-is-modified.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/table-size-does-not-change.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/table-size-does-not-change.md deleted file mode 100644 index ce32f744..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-cases/table-partition-table/table-size-does-not-change.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Table Size Does not Change After VACUUM FULL Is Executed on the Table -summary: Table Size Does not Change After VACUUM FULL Is Executed on the Table -author: Guo Huan -date: 2021-05-24 ---- - -# Table Size Does not Change After VACUUM FULL Is Executed on the Table - -## Symptom - -A user runs the **VACUUM FULL** command to clear a table, the table size does not change. - -## Cause Analysis - -Assume the table is named **table_name**. Possible causes are as follows: - -- No data has been deleted from the **table_name** table. Therefore, the execution of **VACUUM FULL table_name** does not cause the table size to change. -- Concurrent transactions exist during the execution of **VACUUM FULL table_name**. As a result, recently deleted data may be skipped when clearing the table. - -## Procedure - -For the second possible cause, use either of the following methods: - -- Wait until all concurrent transactions are complete, and run the **VACUUM FULL table_name** command again. - -- If the table size still does not change, ensure no service operations are performed on the table, and then execute the following SQL statements to query the active transaction list status: - - ``` - select txid_current(); - ``` - - The current XID is obtained. Then, run the following command to check the active transaction list: - - ``` - select txid_current_snapshot(); - ``` - - If any XID in the active transaction list is smaller than the current transaction XID, stop the database and then start it. Run **VACUUM FULL** to clear the table again. diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-methods.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-methods.md deleted file mode 100644 index edd8ab0d..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-fault-locating-methods.md +++ /dev/null @@ -1,283 +0,0 @@ ---- -title: Common Fault Locating Methods -summary: Common Fault Locating Methods -author: Guo Huan -date: 2021-05-24 ---- - -# Common Fault Locating Methods - -## **Locating OS Faults** - -If all instances on a node are abnormal, an OS fault may have occurred. - -Use one of the following methods to check whether any OS fault occurs: - -- Log in to the node using SSH or other remote login tools. If the login fails, run the **ping** command to check the network status. - - - If no response is returned, the server is down or being restarted, or its network connection is abnormal. - - The restart takes a long time (about 20 minutes) if the system crashes due to an OS kernel panic. Try to connect the host every 5 minutes. If the connection failed 20 minutes later, the server is down or the network connection is abnormal. In this case, contact the administrator to locate the fault on site. - - - If ping operations succeed but SSH login fails or commands cannot be executed, the server does not respond to external connections possibly because system resources are insufficient (for example, CPU or I/O resources are overloaded). In this case, try again. If the fault persists within 5 minutes, contact the administrator for further fault locating on site. - -- If login is successful but responses are slow, check the system running status, such as collecting system information as well as checking system version, hardware, parameter setting, and login users. The following are common commands for reference: - - - Use the **who** command to check online users. - - ``` - [root@MogDB36 ~]# who - root pts/0 2020-11-07 16:32 (10.70.223.238) - wyc pts/1 2020-11-10 09:54 (10.70.223.222) - root pts/2 2020-10-10 14:20 (10.70.223.238) - root pts/4 2020-10-09 10:14 (10.70.223.233) - root pts/5 2020-10-09 10:14 (10.70.223.233) - root pts/7 2020-10-31 17:03 (10.70.223.222) - root pts/9 2020-10-20 10:03 (10.70.220.85) - ``` - - - Use the **cat /etc/openEuler-release** and **uname -a** commands to check the system version and kernel information. - - ``` - [root@MogDB36 ~]# cat /etc/openEuler-release - openEuler release 20.03 (LTS) - [root@MogDB36 ~]# uname -a - Linux MogDB36 4.19.90-2003.4.0.0036.oe1.aarch64 #1 SMP Mon Mar 23 19:06:43 UTC 2020 aarch64 aarch64 aarch64 GNU/Linux - [root@MogDB36 ~]# - ``` - - - Use the **sysctl -a** (run this command as user **root**) and **cat /etc/sysctl.conf** commands to obtain system parameter information. - - - Use the **cat /proc/cpuinfo** and **cat /proc/meminfo** commands to obtain CPU and memory information. - - ``` - [root@MogDB36 ~]# cat /proc/cpuinfo - processor : 0 - BogoMIPS : 200.00 - Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp asimdhp cpuid asimdrdm jscvt fcma dcpop asimddp asimdfhm - CPU implementer : 0x48 - CPU architecture: 8 - CPU variant : 0x1 - CPU part : 0xd01 - CPU revision : 0 - [root@MogDB36 ~]# cat /proc/meminfo - MemTotal: 534622272 kB - MemFree: 253322816 kB - MemAvailable: 369537344 kB - Buffers: 2429504 kB - Cached: 253063168 kB - SwapCached: 0 kB - Active: 88570624 kB - Inactive: 171801920 kB - Active(anon): 4914880 kB - Inactive(anon): 67011456 kB - Active(file): 83655744 kB - Inactive(file): 104790464 kB - ``` - - - Use the **top -H** command to query the CPU usage and check whether the CPU usage is high due to a specific process. If it is, use the **gdb** or **gstack** command to print the stack trace of this process and check whether this process is in an infinite loop. - - - Use the **iostat -x 1 3** command to query the I/O usage and check whether the I/O usage of the current disk is full. View the ongoing jobs to determine whether to handle the jobs with high I/O usage. - - - Use the **vmstat 1 3** command to query the memory usage in the current system and use the **top** command to obtain the processes with unexpectedly high memory usage. - - - View the OS logs (**/var/log/messages**) or dmseg information as user **root** to check whether errors have occurred in the OS. - - - The watchdog of an OS is a mechanism to ensure that the OS runs properly or exits from the infinite loop or deadlock state. If the watchdog times out (the default value is 60s), the system resets. - -## **Locating Network Faults** - -When the database runs normally, the network layer is transparent to upper-layer users. However, during the long-term operation of a database cluster, network exceptions or errors may occur. Common exceptions caused by network faults are as follows: - -- Network error reported due to database startup failure. -- Abnormal status, for example, all instances on a host are in the **UnKnown** state, or all services are switched over to standby instances. -- Network connection failure. -- Network disconnection reported during database sql query. -- Process response failures during database connection or query execution. When a network fault occurs in a database, locate and analyze the fault by using network-related Linux command tools (such as **ping**, **ifconfig**, **netstat**, and **lsof**) and process stack viewers (such as **gdb** and **gstack**) based on database log information. This section lists common network faults and describes how to analyze and locate faults. - -Common faults are as follows: - -- Network error reported due to a startup failure - - **Symptom 1**: The log contains the following error information. The port may be listened on by another process. - - ``` - LOG: could not bind socket at the 10 time, is another postmaster already running on port 54000? - ``` - - **Solution**: Run the following command to check the process that listens on the port. Replace the port number with the actual one. - - ``` - [root@MogDB36 ~]# netstat -anop | grep 15970 - tcp 0 0 127.0.0.1:15970 0.0.0.0:* LISTEN 3920251/mogdb off (0.00/0/0) - tcp6 0 0 ::1:15970 :::* LISTEN 3920251/mogdb off (0.00/0/0) - unix 2 [ ACC ] STREAM LISTENING 197399441 3920251/mogdb /tmp/.s.PGSQL.15970 - unix 3 [ ] STREAM CONNECTED 197461142 3920251/mogdb /tmp/.s.PGSQL.15970 - - ``` - - Forcibly stop the process that is occupying the port or change the listening port of the database based on the query result. - - **Symptom 2**: When the **gs_om -t status -detail** command is used to query status, the command output shows that the connection between the primary and standby nodes is not established. - - **Solution**: In openEuler, run the **systemctl status firewalld.service** command to check whether the firewall is enabled on this node. If it is enabled, run the **systemctl stop firewalld.service** command to disable it. - - ``` - [root@MogDB36 mnt]# systemctl status firewalld.service - ●firewalld.service - firewalld - dynamic firewall daemon - Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled) - Active: inactive (dead) - Docs: man:firewalld(1) - ``` - - The command varies according to the operating system. You can run the corresponding command to view and modify the configuration. - -- The database is abnormal. - - **Symptom**: The following problems occur on a node: - - - All instances are in the **Unknown** state. - - All primary instances are switched to standby instances. - - Errors "Connection reset by peer" and "Connection timed out" are frequently displayed. - - **Solution** - - - If you cannot connect to the faulty server through SSH, run the **ping** command on other servers to send data packages to the faulty server. If the ping operation succeeds, connection fails because resources such as memory, CPUs, and disks, on the faulty server are used up. - - - Connect to the faulty server through through SSH and run the **/sbin/ifconfig eth ?** command every other second (replace the question mark (?) with the number indicating the position of the NIC). Check value changes of **dropped** and **errors**. If they increase rapidly, the NIC or NIC driver may be faulty. - - ``` - [root@MogDB36 ~]# ifconfig enp125s0f0 - enp125s0f0: flags=4163 mtu 1500 - inet 10.90.56.36 netmask 255.255.255.0 broadcast 10.90.56.255 - inet6 fe80::7be7:8038:f3dc:f916 prefixlen 64 scopeid 0x20 - ether 44:67:47:7d:e6:84 txqueuelen 1000 (Ethernet) - RX packets 129344246 bytes 228050833914 (212.3 GiB) - RX errors 0 dropped 647228 overruns 0 frame 0 - TX packets 96689431 bytes 97279775245 (90.5 GiB) - TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 - ``` - - - Check whether the following parameters are correctly configured: - - ``` - net.ipv4.tcp_retries1 = 3 - net.ipv4.tcp_retries2 = 15 - ``` - -- Network connection failure. - - **Symptom 1**: A node fails to connect to other nodes, and the "Connection refused" error is reported in the log. - - **Solution** - - - Check whether the port is incorrectly configured, resulting in that the port used for connection is not the listening port of the peer end. Check whether the port number recorded in the **postgresql.conf** configuration file of the faulty node is the same as the listening port number of the peer end. - - Check whether the peer listening port is normal (for example, by running the **netstat -anp** command). - - Check whether the peer process exists. - - **Symptom 2**: When SQL operations are performed on the database, the connection descriptor fails to be obtained. The following error information is displayed: - - ``` - WARNING: 29483313: incomplete message from client:4905,9 - WARNING: 29483313: failed to receive connDefs at the time:1. - ERROR: 29483313: failed to get pooled connections - ``` - - In logs, locate and view the log content before the preceding error messages, which are generated due to incorrect active and standby information. Error details are displayed as follows. - - ``` - FATAL: dn_6001_6002: can not accept connection in pending mode. - FATAL: dn_6001_6002: the database system is starting up - FATAL: dn_6009_6010: can not accept connection in standby mode. - ``` - - **Solution** - - - Run the **gs_om -t status -detail** command to query the status and check whether an primary/standby switchover has occurred. Reset the instance status. - - In addition, check whether a core dump or restart occurs on the node that fails to be connected. In the om log, check whether restart occurs. - -- Network disconnection reported during database sql query. - - **Symptom 1**: The query fails, and the following error information is displayed: - - ``` - ERROR: dn_6065_6066: Failed to read response from Datanodes. Detail: Connection reset by peer. Local: dn_6065_6066 Remote: dn_6023_6024 - ERROR: Failed to read response from Datanodes Detail: Remote close socket unexpectedly - ERROR: dn_6155_6156: dn_6151_6152: Failed to read vector response from Datanodes - ``` - - If the connection fails, the error information may be as follows: - - ``` - ERROR: Distribute Query unable to connect 10.145.120.79:14600 [Detail:stream connect connect() fail: Connection timed out - ERROR: Distribute Query unable to connect 10.144.192.214:12600 [Detail:receive accept response fail: Connection timed out - ``` - - **Solution** - - 1. Use **gs_check** to check whether the network configuration meets requirements. For network check, see "Tool Reference > Server Tools > [gs_check](../reference-guide/tool-reference/server-tools/gs_check.md)" in the *Reference Guide*. - 2. Check whether a process core dump, restart, or switchover occurs. - 3. If problems still exist, contact network technical engineers. - -## **Locating Disk Faults** - -Common disk faults include insufficient disk space, bad blocks of disks, and unmounted disks. Disk faults such as unmount of disks damage the file system. The cluster management mechanism identifies this kind of faults and stops the instance, and the instance status is **Unknown**. However, disk faults such as insufficient disk space do not damage the file system. The cluster management mechanism cannot identify this kind of faults and service processes exit abnormally when accessing a faulty disk. Failures cover database startup, checksum verification, page read and write operation, and page verification. - -- For faults that result in file system damages, the instance status is **Unknown** when you view the host status. Perform the following operations to locate the disk fault: - - - Check the logs. If the logs contain information similar to "data path disc writable test failed", the file system is damaged. - - - The possible cause of file system damage may be unmounted disks. Run the **ls -l** command and you can view that the disk directory permission is abnormal, as shown in the following: - - - Another possible cause is that the disk has bad blocks. In this case, the OS rejects read and write operations to protect the file system. You can use a bad block check tool, for example, **badblocks**, to check whether bad blocks exist. - - ``` - [root@openeuler123 mnt]# badblocks /dev/sdb1 -s -v - Checking blocks 0 to 2147482623 - Checking for bad blocks (read-only test): done - Pass completed, 0 bad blocks found. (0/0/0 errors) - ``` - -- For faults that do not damage the file system, the service process will report an exception and exit when it accesses the faulty disk. Perform the following operations to locate the disk fault: - - View logs. The log contains read and write errors, such as "No space left on device" and "invalid page header n block 122838 of relation base/16385/152715". Run the **df -h** command to check the disk space. If the disk usage is 100% as shown below, the read and write errors are caused by insufficient disk space: - - ``` - [root@openeuler123 mnt]# df -h - Filesystem Size Used Avail Use% Mounted on - devtmpfs 255G 0 255G 0% /dev - tmpfs 255G 35M 255G 1% /dev/shm - tmpfs 255G 57M 255G 1% /run - tmpfs 255G 0 255G 0% /sys/fs/cgroup - /dev/mapper/openeuler-root 196G 8.8G 178G 5% / - tmpfs 255G 1.0M 255G 1% /tmp - /dev/sda2 9.8G 144M 9.2G 2% /boot - /dev/sda1 10G 5.8M 10G 1% /boot/efi - /dev/mapper/openeuler-home 1.5T 69G 1.4T 5% /home - tmpfs 51G 0 51G 0% /run/user/0 - tmpfs 51G 0 51G 0% /run/user/1004 - /dev/sdb1 2.0T 169G 1.9T 9% /data - ``` - -## **Locating Database Faults** - -- Logs. Database logs record the operations (starting, running, and stopping) on servers. Database users can view logs to quickly locate fault causes and rectify the faults accordingly. - -- View. A database provides different views to display its internal status. When locating a fault, you can use: - - - **pg_stat_activity**: shows the status of each session on the current instance. - - **pg_thread_wait_status**: shows the wait events of each thread on the current instance. - - **pg_locks**: shows the status of locks on the current instance. - -- Core files. Abnormal termination of a database process will trigger a core dump. A core dump file helps locate faults and determine fault causes. Once a core dump occurs during process running, collect the core file immediately for further analyzing and locating the fault. - - - The OS performance is affected, especially when errors occur frequently. - - - The OS disk space will be occupied by core files. Therefore, after core files are discovered, locate and rectify the errors as soon as possible. The OS is delivered with a core dump mechanism. If this mechanism is enabled, core files are generated for each core dump, which has an impact on the OS performance and disk space. - - - Set the path for generating core files. Modify the **/proc/sys/kernel/core_pattern** file. - - ``` - [root@openeuler123 mnt]# cat /proc/sys/kernel/core_pattern - /data/jenkins/workspace/MogDBInstall/dbinstall/cluster/corefile/core-%e-%p-%t - ``` diff --git a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-faults-and-identification.md b/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-faults-and-identification.md deleted file mode 100644 index 3eebe188..00000000 --- a/product/en/docs-mogdb/v5.2/common-faults-and-identification/common-faults-and-identification.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Common Faults and Identification -summary: Common Faults and Identification -author: zhang cuiping -date: 2023-04-07 ---- - -# Common Faults and Identification - -- **[Common Fault Locating Methods](common-fault-locating-methods.md)** -- **[Common Fault Locating Cases](./common-fault-locating-cases/common-fault-locating-cases.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/communication-matrix.md b/product/en/docs-mogdb/v5.2/communication-matrix.md deleted file mode 100644 index f11ad9f3..00000000 --- a/product/en/docs-mogdb/v5.2/communication-matrix.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Communication Matrix -summary: Communication Matrix -author: zhang cuiping -date: 2022-10-24 ---- - -# Communication Matrix - -**Table 1** Communication matrix - -IP address of the node where the - -| Source Device | Source IP | Source Port | Destination Device | Destination IP | Destination Port(Listening) | Protocol | Port Description | Listening Port Configurable (Yes/No) | Authentication Mode | Encryption Mode | Plane | Introduced In | Special Scenario | Remarks | -| :---------------------------- | :--------------------------------------------------------- | :---------- | :------------------ | :----------------------------------------------------------- | :-------------------------------------------------------- | :--------- | :----------------------------------------------------------- | :----------------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | :---------------- | :------------ | :--------------- | :------- | -| Client | IP address of the node where the source device is located. | 1024–65535 | DN | IP address of the node where the DN is located. | dataPortBase | TCP | [Function] Port for the client to send connection requests.
[Description] The port number ranges from 1024 to 65529. The actual value is equal to the value of the GUC parameter **port**.
[Enabled by default after installation] Yes | Yes | Method 1: username/password, based on SHA-256 or SM3 authentication.
Method 2: username/password, based on MD5 authentication (This method is not supported by default. It is reserved for compatibility with open-source third-party tools and is not recommended.)
Method 3: certificate authentication | SSL encryption
SSL encryption
Data is encrypted using SSL. | User plane | MogDB 1.0.0 | None | SQL | -| Internal tool | IP address of the node where the cluster is located. | 1024–65535 | DN | IP address of the node where the DN is located. | dataPortBase+1 | TCP | [Function] Port for the internal tool to send connection requests.
[Description] The port number ranges from 1025 to 65530. The actual value is equal to the value of the GUC parameter **port** plus 1.
[Enabled by default after installation] Yes | Yes | Method 1: username/password, based on SHA-256, SM3, or MD5 authentication (MD5 is not supported by default. It is reserved for compatibility with open-source third-party tools and is not recommended.)
Method 2: Local trust authentication (only for initial users whose usernames are the same as that of the OS user who runs the database) | SSL encryption | Maintenance plane | MogDB 1.0.0 | None | Storage | -| Primary and standby DNs | IP address of the node where the source device is located. | 1024–65535 | DN | IP address of the node where the DN is located. | haPort | TCP | [Function] Port for replication between primary and standby DNs. The standby DN connects to the primary DN.
[Description] The port number ranges from 1025 to 65530. The actual value is equal to the value of **localport** in the connection string of the GUC parameter **replconninfo**. The default value is the value of **port** plus 1, which is the same as the value of **dataPortBase** plus 1.
[Enabled by default after installation] Yes | Yes | IP address authentication or IP address + Kerberos authentication | SSL encryption | Maintenance plane | MogDB 1.0.0 | None | Storage | -| Client | IP address of the node where the source device is located. | 1024-65535 | DN | IP address of the node where the DN is located. | haPort | TCP | [Function] Port for connecting to a DN to extract logical logs.
[Description] The port number ranges from 1025 to 65530. The actual value is equal to the value of **localport** in the connection string of the GUC parameter **replconninfo**. The default value is the value of **port** plus 1, which is the same as the value of **dataPortBase** plus 1.
[Enabled by default after installation] Yes | Yes | Username/Password, based on SHA-256, SM3, or MD5 authentication (MD5 is not supported by default. It is reserved for compatibility with open-source third-party tools and is not recommended.) | SSL encryption | User plane | MogDB 1.0.0 | None | Storage | -| Standby DN | IP address of the node where the source device is located. | 1024–65535 | DN | IP address of the node where the DN is located. | remote heartbeat port | TCP | [Function] Port for the heartbeat connection request between the primary and standby DNs.
[Description] The port number ranges from 1029 to 65535. The actual value is equal to the value of **remoteheartbeatport** in the connection string of the GUC parameter **replconninfo**. The default value is the value of **port** plus 5.
[Enabled by default after installation] Yes | Yes | IP address authentication | Data is not encrypted. | Maintenance plane | MogDB 1.0.0 | None | Storage | -| Primary and standby DNs | IP address of the node where the source device is located. | 1024–65535 | DN | IP address of the node where the DN is located. | dcf_config Port | TCP | [Function] Port for processing connection and message requests between the primary and standby DNs.
[Description] The port number ranges from 1024 to 65535. The source port number is a random port number. The destination port is subject to the port number set in the configuration file.
[Enabled by default after installation] Yes when the DCF mode is enabled. | Yes | IP address authentication + SSL certificate authentication | SSL encryption | Maintenance plane | MogDB 3.0.0 | None | DCF | -| CM Agent/cm_ctl | IP address of the node where the source device is located. | 1024–65535 | CM Server | IP address of the node where the CM Server is located. | cmServerPortBase | TCP | [Function] Port for processing CM Agent and cm_ctl connection requests.
[Description] The port number ranges from 1024 to 65534, and the default value is **5000**.
[Enabled by default after installation] Yes | Yes | IP address authentication, IP address + Kerberos authentication, or IP address authentication + SSL certificate authentication | SSL encryption | Maintenance plane | MogDB 3.0.0 | None | CM | -| Kerberos client (DN/CM Agent) | IP address of the node where the source device is located. | 1024–65535 | Kerberos | IP address of the node where the Kerberos service is located. | 21732 | UDP | [Function] Port for listening on the Kerberos KDC service, which provides the authentication capability between nodes in a cluster. (This port is enabled after the Kerberos authentication is enabled.)
[Description] The default value is **21732**.
[Enabled by default after installation] User-defined | No | User name+password or keytab file authentication | AES-256 algorithm is used for encryption. | Maintenance plane | MogDB 1.0.0 | None | Security | -| CMServer | IP address of the node where the source device is located. | 1024–65535 | CM Server | IP address of the node where the CM Server is located. | cmServerPortHacmServerPortBase+1 | TCP | [Function] Port for internal communication between CMSs.
[Description] The port number ranges from 1024 to 65535. The source port number is a random port number. If the destination port number is not set in the XML file, the default port number is the value of **cmServerPortBase** plus 1. If the destination port number is set, the value is used.
[Enabled by default after installation] Yes | Yes | IP address authentication or IP address authentication + SSL certificate authentication | SSL encryption | Maintenance plane | MogDB 3.0.0 | None | CM | -| Prometheus server | IP address of the node where the source device is located. | 1024–65535 | Prometheus exporter | IP address of the node where the exporter is located. | Specified by the exporter parameter **–web.listen-port**. | HTTPS/HTTP | [Function] Port for the open-source monitoring system Prometheus to collect and process monitoring information.
[Description] The default value is **9187** for openGauss-exporter and **8181** for reprocessing-exporter.
[Enabled by default after installation] No. The port is user-defined. | Yes | Prometheus server supports SSL certificate authentication, but Prometheus exporter does not support certificate authentication. | SSL encryption | User plane | MogDB 3.0.0 | None | AI | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/1-1-stored-procedure.md b/product/en/docs-mogdb/v5.2/developer-guide/1-1-stored-procedure.md deleted file mode 100644 index 3c3d400a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/1-1-stored-procedure.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Stored Procedure -summary: Stored Procedure -author: Guo Huan -date: 2021-03-04 ---- - -# Stored Procedure - -In MogDB, business rules and logics are saved as stored procedures. - -A stored procedure is a combination of SQL and PL/pgSQL. Stored procedures can move the code that executes business rules from applications to databases. Therefore, the code storage can be used by multiple programs at a time. - -For details about how to create and call a stored procedure, see [CREATE PROCEDURE](../reference-guide/sql-syntax/CREATE-PROCEDURE.md). - -The application methods for PL/pgSQL functions mentioned in [User-defined Functions](user-defined-functions.md) are similar to those for stored procedures. For details, please refer to [PL/pgSQL-SQL Procedural Language](../developer-guide/plpgsql/1-1-plpgsql-overview.md) section, unless otherwise specified, the contents apply to stored procedures and user-defined functions. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/1-introduction-to-autonomous-transaction.md b/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/1-introduction-to-autonomous-transaction.md deleted file mode 100644 index 8b7308b5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/1-introduction-to-autonomous-transaction.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Autonomous Transaction -summary: Autonomous Transaction -author: Zhang Cuiping -date: 2021-05-10 ---- - -# Autonomous Transaction - -An autonomous transaction is an independent transaction that is started during the execution of a primary transaction. Committing and rolling back an autonomous transaction does not affect the data that has been committed by the primary transaction. In addition, an autonomous transaction is not affected by the primary transaction. - -Autonomous transactions are defined in stored procedures, functions, and anonymous blocks, and are declared using the **PRAGMA AUTONOMOUS_TRANSACTION** keyword. - -+ **[Stored Procedure Supporting Autonomous Transaction](3-stored-procedure-supporting-autonomous-transaction.md)** -+ **[Anonymous Block Supporting Autonomous Transaction](anonymous-block-supporting-autonomous-transaction.md)** -+ **[Function Supporting Autonomous Transaction](2-function-supporting-autonomous-transaction.md)** -+ **[Restrictions](4-restrictions.md)** diff --git a/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/2-function-supporting-autonomous-transaction.md b/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/2-function-supporting-autonomous-transaction.md deleted file mode 100644 index 50bc79d4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/2-function-supporting-autonomous-transaction.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Function Supporting Autonomous Transaction -summary: Function Supporting Autonomous Transaction -author: Zhang Cuiping -date: 2021-05-10 ---- - -# Function Supporting Autonomous Transaction - -An autonomous transaction can be defined in a function. The identifier of an autonomous transaction is **PRAGMA AUTONOMOUS_TRANSACTION**. The syntax of an autonomous transaction is the same as that of creating a function. The following is an example. - -```sql -create table t4(a int, b int, c text); - -CREATE OR REPLACE function autonomous_32(a int ,b int ,c text) RETURN int AS -DECLARE - PRAGMA AUTONOMOUS_TRANSACTION; -BEGIN - insert into t4 values(a, b, c); - return 1; -END; -/ -CREATE OR REPLACE function autonomous_33(num1 int) RETURN int AS -DECLARE - num3 int := 220; - tmp int; - PRAGMA AUTONOMOUS_TRANSACTION; -BEGIN - num3 := num3/num1; - return num3; -EXCEPTION - WHEN division_by_zero THEN - select autonomous_32(num3, num1, sqlerrm) into tmp; - return 0; -END; -/ - -select autonomous_33(0); - -select * from t4; -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/3-stored-procedure-supporting-autonomous-transaction.md b/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/3-stored-procedure-supporting-autonomous-transaction.md deleted file mode 100644 index b44707cb..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/3-stored-procedure-supporting-autonomous-transaction.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Stored Procedure Supporting Autonomous Transaction -summary: Stored Procedure Supporting Autonomous Transaction -author: Zhang Cuiping -date: 2021-05-10 ---- - -# Stored Procedure Supporting Autonomous Transaction - -An autonomous transaction can be defined in a stored procedure. The identifier of an autonomous transaction is **PRAGMA AUTONOMOUS_TRANSACTION**. The syntax of an autonomous transaction is the same as that of creating a stored procedure. The following is an example. - -```sql --- Create a table. -create table t2(a int, b int); -insert into t2 values(1,2); -select * from t2; - --- Create a stored procedure that contains an autonomous transaction. -CREATE OR REPLACE PROCEDURE autonomous_4(a int, b int) AS -DECLARE - num3 int := a; - num4 int := b; - PRAGMA AUTONOMOUS_TRANSACTION; -BEGIN - insert into t2 values(num3, num4); -END; -/ --- Create a common stored procedure that invokes an autonomous transaction stored procedure. -CREATE OR REPLACE PROCEDURE autonomous_5(a int, b int) AS -DECLARE -BEGIN - insert into t2 values(666, 666); - autonomous_4(a,b); - rollback; -END; -/ --- Invoke a common stored procedure. -select autonomous_5(11,22); --- View the table result. -select * from t2 order by a; -``` - -In the preceding example, a stored procedure containing an autonomous transaction is finally executed in a transaction block to be rolled back, which directly illustrates a characteristic of the autonomous transaction, that is, rollback of the primary transaction does not affect content that has been committed by the autonomous transaction. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/4-restrictions.md b/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/4-restrictions.md deleted file mode 100644 index e15ba978..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/4-restrictions.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Restrictions -summary: Restrictions -author: Zhang Cuiping -date: 2021-05-10 ---- - -# Restrictions - -- A trigger function does not support autonomous transactions. - -- In the autonomous transaction block of a function or stored procedure, static SQL statements do not support variable transfer. - - ```sql - -- Autonomous transactions do not support the execution of the following functions. The SQL statement contains the variable i. - CREATE OR REPLACE FUNCTION autonomous_easy_2(i int) RETURNS integer - LANGUAGE plpgsql - AS $$ - DECLARE - PRAGMA AUTONOMOUS_TRANSACTION; - BEGIN - START TRANSACTION; - INSERT INTO test1 VALUES (i, 'test'); - COMMIT; - RETURN 42; - END; - $$; - -- To use the parameter transfer, use the dynamic statement EXECUTE to replace variables. The following is an example: - CREATE OR REPLACE FUNCTION autonomous_easy(i int) RETURNS integer - LANGUAGE plpgsql - AS $$ - DECLARE - PRAGMA AUTONOMOUS_TRANSACTION; - BEGIN - START TRANSACTION; - EXECUTE 'INSERT INTO test1 VALUES (' || i::integer || ', ''test'')'; - COMMIT; - RETURN 42; - END; - $$; - ``` - -- Autonomous transactions do not support nesting. - - > **NOTICE:** In a function that contains an autonomous transaction, it is not allowed to explicitly execute another function or stored procedure that contains an autonomous transaction through **PERFORM**, **SELECT**, or **CALL**. However, another function or stored procedure that contains an autonomous transaction can be explicitly called in the last **RETURN**. - -- A function containing an autonomous transaction does not support the return value of parameter transfer. - - ```sql - -- In the following example, the return value ret is not transferred and only null is returned. - create or replace function at_test2(i int) returns text - LANGUAGE plpgsql - as $$ - declare - ret text; - pragma autonomous_transaction; - begin - START TRANSACTION; - insert into at_tb2 values(1, 'before s1'); - if i > 10 then - rollback; - else - commit; - end if; - select val into ret from at_tb2 where id=1; - return ret; - end; - $$; - ``` - -- A stored procedure or function that contains an autonomous transaction does not support exception handling. - -- A trigger function does not support autonomous transactions. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/anonymous-block-supporting-autonomous-transaction.md b/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/anonymous-block-supporting-autonomous-transaction.md deleted file mode 100644 index 483a9b1a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/autonomous-transaction/anonymous-block-supporting-autonomous-transaction.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Anonymous Block Supporting Autonomous Transaction -summary: Anonymous Block Supporting Autonomous Transaction -author: Guo Huan -date: 2021-10-15 ---- - -# Anonymous Block Supporting Autonomous Transaction - -An autonomous transaction can be defined in an anonymous block. The identifier of an autonomous transaction is **PRAGMA AUTONOMOUS_TRANSACTION**. The syntax of an autonomous transaction is the same as that of creating an anonymous block. The following is an example. - -```sql -MogDB=# create table t1(a int ,b text); -CREATE TABLE - -START TRANSACTION; -DECLARE - PRAGMA AUTONOMOUS_TRANSACTION; -BEGIN - - insert into t1 values(1,'you are so cute,will commit!'); -END; -/ -MogDB=# insert into t1 values(1,'you will rollback!'); -INSERT 0 1 -MogDB=# rollback; -ROLLBACK - -MogDB=# select * from t1; -a | b ----+------------------------------ -1 | you are so cute,will commit! -(1 row) -``` - -In the preceding example, an anonymous block containing an autonomous transaction is finally executed before a transaction block to be rolled back, which directly illustrates a characteristic of the autonomous transaction, that is, rollback of the primary transaction does not affect content that has been committed by the autonomous transaction. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/1-development-based-on-jdbc-overview.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/1-development-based-on-jdbc-overview.md deleted file mode 100644 index 74f232a2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/1-development-based-on-jdbc-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2021-04-26 ---- - -# Overview - -Java Database Connectivity (JDBC) is a Java API for running SQL statements. It provides unified access interfaces for different relational databases, based on which applications process data. MogDB supports JDBC 4.0 and requires JDK 1.8 for code compiling. It does not support JDBC-ODBC bridge. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/10-example-common-operations.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/10-example-common-operations.md deleted file mode 100644 index bada212f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/10-example-common-operations.md +++ /dev/null @@ -1,280 +0,0 @@ ---- -title: Example Common Operations -summary: Example Common Operations -author: Guo Huan -date: 2021-04-26 ---- - -# Example: Common Operations - -**Example 1:** - -The following illustrates how to develop applications based on MogDB JDBC interfaces. - -```java -//DBtest.java -// This example illustrates the main processes of JDBC-based development, covering database connection creation, table creation, and data insertion. - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.CallableStatement; -import java.sql.Types; - -public class DBTest { - - // Create a database connection. - public static Connection GetConnection(String username, String passwd) { - String driver = "org.opengauss.Driver"; - String sourceURL = "jdbc:opengauss://localhost:8000/postgres"; - Connection conn = null; - try { - // Load the database driver. - Class.forName(driver).newInstance(); - } catch (Exception e) { - e.printStackTrace(); - return null; - } - - try { - // Create a database connection. - conn = DriverManager.getConnection(sourceURL, username, passwd); - System.out.println("Connection succeed!"); - } catch (Exception e) { - e.printStackTrace(); - return null; - } - - return conn; - }; - - // Run a common SQL statement to create table customer_t1. - public static void CreateTable(Connection conn) { - Statement stmt = null; - try { - stmt = conn.createStatement(); - - // Run a common SQL statement. - int rc = stmt - .executeUpdate("CREATE TABLE customer_t1(c_customer_sk INTEGER, c_customer_name VARCHAR(32));"); - - stmt.close(); - } catch (SQLException e) { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException e1) { - e1.printStackTrace(); - } - } - e.printStackTrace(); - } - } - - // Run a prepared statement to insert data in batches. - public static void BatchInsertData(Connection conn) { - PreparedStatement pst = null; - - try { - // Generate a prepared statement. - pst = conn.prepareStatement("INSERT INTO customer_t1 VALUES (?,?)"); - for (int i = 0; i < 3; i++) { - // Add parameters. - pst.setInt(1, i); - pst.setString(2, "data " + i); - pst.addBatch(); - } - // Perform batch processing. - pst.executeBatch(); - pst.close(); - } catch (SQLException e) { - if (pst != null) { - try { - pst.close(); - } catch (SQLException e1) { - e1.printStackTrace(); - } - } - e.printStackTrace(); - } - } - - // Run a prepared statement to update data. - public static void ExecPreparedSQL(Connection conn) { - PreparedStatement pstmt = null; - try { - pstmt = conn - .prepareStatement("UPDATE customer_t1 SET c_customer_name = ? WHERE c_customer_sk = 1"); - pstmt.setString(1, "new Data"); - int rowcount = pstmt.executeUpdate(); - pstmt.close(); - } catch (SQLException e) { - if (pstmt != null) { - try { - pstmt.close(); - } catch (SQLException e1) { - e1.printStackTrace(); - } - } - e.printStackTrace(); - } - } - - -// Run a stored procedure. - public static void ExecCallableSQL(Connection conn) { - CallableStatement cstmt = null; - try { - // 存储过程TESTPROC需提前创建。 - cstmt=conn.prepareCall("{? = CALL TESTPROC(?,?,?)}"); - cstmt.setInt(2, 50); - cstmt.setInt(1, 20); - cstmt.setInt(3, 90); - cstmt.registerOutParameter(4, Types.INTEGER); // Register an OUT parameter of the integer type. - cstmt.execute(); - int out = cstmt.getInt(4); // Obtain the OUT parameter. - System.out.println("The CallableStatment TESTPROC returns:"+out); - cstmt.close(); - } catch (SQLException e) { - if (cstmt != null) { - try { - cstmt.close(); - } catch (SQLException e1) { - e1.printStackTrace(); - } - } - e.printStackTrace(); - } - } - - - /** - *Main process. Call static methods one by one. - * @param args - */ - public static void main(String[] args) { - // Create a database connection. - Connection conn = GetConnection("tester", "Password1234"); - - // Create a table. - CreateTable(conn); - - // Insert data in batches. - BatchInsertData(conn); - - // Run a prepared statement to update data. - ExecPreparedSQL(conn); - - // Run a stored procedure. - ExecCallableSQL(conn); - - // Close the connection to the database. - try { - conn.close(); - } catch (SQLException e) { - e.printStackTrace(); - } - - } - -} -``` - -**Example 2 High Client Memory Usage** - -In this example, **setFetchSize** adjusts the memory usage of the client by using the database cursor to obtain server data in batches. It may increase network interaction and deteriorate some performance. - -The cursor is valid within a transaction. Therefore, disable automatic commit and then manually commit the code. - -```java -// Disable automatic commit. -conn.setAutoCommit(false); -Statement st = conn.createStatement(); - -// Open the cursor and obtain 50 lines of data each time. -st.setFetchSize(50); -ResultSet rs = st.executeQuery("SELECT * FROM mytable"); - -while (rs.next()) -{ - System.out.print("a row was returned."); -} -conn.commit(); -rs.close(); - -// Disable the server cursor. -st.setFetchSize(0); -rs = st.executeQuery("SELECT * FROM mytable"); - -while (rs.next()) -{ - System.out.print("many rows were returned."); -} -conn.commit(); -rs.close(); - -// Close the statement. -st.close(); -conn.close(); -``` - -Run the following command to enable automatic commit: - -```java -conn.setAutoCommit(true); -``` - -**Example 3 Example of Common Data Types** - -```java -//Example of the bit type. Note that the value range of the bit type is [0,1]. -Statement st = conn.createStatement(); -String sqlstr = "create or replace function fun_1()\n" + - "returns bit AS $$\n" + - "select col_bit from t_bit limit 1;\n" + - "$$\n" + - "LANGUAGE SQL;"; -st.execute(sqlstr); -CallableStatement c = conn.prepareCall("{ ? = call fun_1() }"); -//Register the output type, which is a bit string. -c.registerOutParameter(1, Types.BIT); -c.execute(); -//Use the Boolean type to obtain the result. -System.out.println(c.getBoolean(1)); - -// Example of using the money type -// Example of using a column of the money type in the table structure. -st.execute("create table t_money(col1 money)"); -PreparedStatement pstm = conn.prepareStatement("insert into t_money values(?)"); -// Use PGobject to assign a value. The value range is [-92233720368547758.08,92233720368547758.07]. -PGobject minMoney = new PGobject(); -minMoney.setType("money"); -minMoney.setValue("-92233720368547758.08"); -pstm.setObject(1, minMoney); -pstm.execute(); -// Use PGMoney to assign a value. The value range is [-9999999.99,9999999.99]. -pstm.setObject(1,new PGmoney(9999999.99)); -pstm.execute(); - -// Example of using the function whose return value is money. -st.execute("create or replace function func_money() " + - "return money " + - "as declare " + - "var1 money; " + - "begin " + - " select col1 into var1 from t_money limit 1; " + - " return var1; " + - "end;"); -CallableStatement cs = conn.prepareCall("{? = call func_money()}"); -cs.registerOutParameter(1,Types.DOUBLE); -cs.execute(); -cs.getObject(1); -``` - -**Example 4 Obtaining the Driver Version** - -```java -Driver.getGSVersion(); -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/11-example-retrying-sql-queries-for-applications.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/11-example-retrying-sql-queries-for-applications.md deleted file mode 100644 index b8946013..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/11-example-retrying-sql-queries-for-applications.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -title: Example Retrying SQL Queries for Applications -summary: Example Retrying SQL Queries for Applications -author: Guo Huan -date: 2021-04-26 ---- - -# Example Retrying SQL Queries for Applications - -If the primary database node is faulty and cannot be restored within 10s, the standby database node automatically switches to the active state to ensure the normal running of MogDB. During the switchover, jobs that are running will fail and those start running after the switchover are not affected. To prevent upper-layer services from being affected by the failover, refer to the following example to construct an SQL retry mechanism at the service layer. - -```java -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; - -class ExitHandler extends Thread { - private Statement cancel_stmt = null; - - public ExitHandler(Statement stmt) { - super("Exit Handler"); - this.cancel_stmt = stmt; - } - public void run() { - System.out.println("exit handle"); - try { - this.cancel_stmt.cancel(); - } catch (SQLException e) { - System.out.println("cancel query failed."); - e.printStackTrace(); - } - } -} - -public class SQLRetry { - // Create a database connection. - public static Connection GetConnection(String username, String passwd) { - String driver = "org.opengauss.Driver"; - String sourceURL = "jdbc:opengauss://10.131.72.136:8000/postgres"; - Connection conn = null; - try { - // Load the database driver. - Class.forName(driver).newInstance(); - } catch (Exception e) { - e.printStackTrace(); - return null; - } - - try { - // Create a database connection. - conn = DriverManager.getConnection(sourceURL, username, passwd); - System.out.println("Connection succeed!"); - } catch (Exception e) { - e.printStackTrace(); - return null; - } - - return conn; -} - - // Run a common SQL statement. Create the jdbc_test1 table. - public static void CreateTable(Connection conn) { - Statement stmt = null; - try { - stmt = conn.createStatement(); - - - Runtime.getRuntime().addShutdownHook(new ExitHandler(stmt)); - - // Run a common SQL statement. - int rc2 = stmt - .executeUpdate("DROP TABLE if exists jdbc_test1;"); - - int rc1 = stmt - .executeUpdate("CREATE TABLE jdbc_test1(col1 INTEGER, col2 VARCHAR(10));"); - - stmt.close(); - } catch (SQLException e) { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException e1) { - e1.printStackTrace(); - } - } - e.printStackTrace(); - } - } - - // Run a prepared statement to insert data in batches. - public static void BatchInsertData(Connection conn) { - PreparedStatement pst = null; - - try { - // Generate a prepared statement. - pst = conn.prepareStatement("INSERT INTO jdbc_test1 VALUES (?,?)"); - for (int i = 0; i < 100; i++) { - // Add parameters. - pst.setInt(1, i); - pst.setString(2, "data " + i); - pst.addBatch(); - } - // Perform batch processing. - pst.executeBatch(); - pst.close(); - } catch (SQLException e) { - if (pst != null) { - try { - pst.close(); - } catch (SQLException e1) { - e1.printStackTrace(); - } - } - e.printStackTrace(); - } - } - - // Run a prepared statement to update data. - private static boolean QueryRedo(Connection conn){ - PreparedStatement pstmt = null; - boolean retValue = false; - try { - pstmt = conn - .prepareStatement("SELECT col1 FROM jdbc_test1 WHERE col2 = ?"); - - pstmt.setString(1, "data 10"); - ResultSet rs = pstmt.executeQuery(); - - while (rs.next()) { - System.out.println("col1 = " + rs.getString("col1")); - } - rs.close(); - - pstmt.close(); - retValue = true; - } catch (SQLException e) { - System.out.println("catch...... retValue " + retValue); - if (pstmt != null) { - try { - pstmt.close(); - } catch (SQLException e1) { - e1.printStackTrace(); - } - } - e.printStackTrace(); - } - - System.out.println("finesh......"); - return retValue; - } - - // Configure the number of retry attempts for the retry of a query statement upon a failure. - public static void ExecPreparedSQL(Connection conn) throws InterruptedException { - int maxRetryTime = 50; - int time = 0; - String result = null; - do { - time++; - try { - System.out.println("time:" + time); - boolean ret = QueryRedo(conn); - if(ret == false){ - System.out.println("retry, time:" + time); - Thread.sleep(10000); - QueryRedo(conn); - } - } catch (Exception e) { - e.printStackTrace(); - } - } while (null == result && time < maxRetryTime); - - } - - /** - *Main process. Call static methods one by one. - * @param args - * @throws InterruptedException - */ - public static void main(String[] args) throws InterruptedException { - // Create a database connection. - Connection conn = GetConnection("testuser", "test@123"); - - // Create a table. - CreateTable(conn); - - // Insert data in batches. - BatchInsertData(conn); - - // Run a prepared statement to update data. - ExecPreparedSQL(conn); - - // Close the connection to the database. - try { - conn.close(); - } catch (SQLException e) { - e.printStackTrace(); - } - - } - - } -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/12-example-importing-and-exporting-data-through-local-files.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/12-example-importing-and-exporting-data-through-local-files.md deleted file mode 100644 index fd8e0637..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/12-example-importing-and-exporting-data-through-local-files.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: Example Importing and Exporting Data Through Local Files -summary: Example Importing and Exporting Data Through Local Files -author: Guo Huan -date: 2021-04-26 ---- - -# Example Importing and Exporting Data Through Local Files - -When Java is used for secondary development based on MogDB, you can use the CopyManager interface to export data from the database to a local file or import a local file to the database by streaming. The file can be in CSV or TEXT format. - -The sample program is as follows. Load the MogDB JDBC driver before running it. - -```java -import java.sql.Connection; -import java.sql.DriverManager; -import java.io.IOException; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.sql.SQLException; -import org.opengauss.copy.CopyManager; -import org.opengauss.core.BaseConnection; - -public class Copy{ - - public static void main(String[] args) - { - String urls = new String("jdbc:opengauss://10.180.155.74:8000/postgres"); // Database URL - String username = new String("jack"); // Username - String password = new String("Enmo@123"); // Password - String tablename = new String("migration_table"); // Table information - String tablename1 = new String("migration_table_1"); // Table information - String driver = "org.opengauss.Driver"; - Connection conn = null; - - try { - Class.forName(driver); - conn = DriverManager.getConnection(urls, username, password); - } catch (ClassNotFoundException e) { - e.printStackTrace(System.out); - } catch (SQLException e) { - e.printStackTrace(System.out); - } - - // Export the query result of SELECT * FROM migration_table to the local file d:/data.txt. - try { - copyToFile(conn, "d:/data.txt", "(SELECT * FROM migration_table)"); - } catch (SQLException e) { - k - e.printStackTrace(); - } catch (IOException e) { - - e.printStackTrace(); - } - // Import data from the d:/data.txt file to the migration_table_1 table. - try { - copyFromFile(conn, "d:/data.txt", tablename1); - } catch (SQLException e) { - e.printStackTrace(); - } catch (IOException e) { - - e.printStackTrace(); - } - - // Export the data from the migration_table_1 table to the d:/data1.txt file. - try { - copyToFile(conn, "d:/data1.txt", tablename1); - } catch (SQLException e) { - - e.printStackTrace(); - } catch (IOException e) { - - - e.printStackTrace(); - } - } - // Use copyIn to import data from a file to the database. - public static void copyFromFile(Connection connection, String filePath, String tableName) - throws SQLException, IOException { - - FileInputStream fileInputStream = null; - - try { - CopyManager copyManager = new CopyManager((BaseConnection)connection); - fileInputStream = new FileInputStream(filePath); - copyManager.copyIn("COPY " + tableName + " FROM STDIN", fileInputStream); - } finally { - if (fileInputStream != null) { - try { - fileInputStream.close(); - } catch (IOException e) { - e.printStackTrace(); - } - } - } - } - - // Use copyOut to export data from the database to a file. - public static void copyToFile(Connection connection, String filePath, String tableOrQuery) - throws SQLException, IOException { - - FileOutputStream fileOutputStream = null; - - try { - CopyManager copyManager = new CopyManager((BaseConnection)connection); - fileOutputStream = new FileOutputStream(filePath); - copyManager.copyOut("COPY " + tableOrQuery + " TO STDOUT", fileOutputStream); - } finally { - if (fileOutputStream != null) { - try { - fileOutputStream.close(); - } catch (IOException e) { - e.printStackTrace(); - } - } - } - } -} -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/13-example-2-migrating-data-from-a-my-database-to-mogdb.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/13-example-2-migrating-data-from-a-my-database-to-mogdb.md deleted file mode 100644 index 9916d2e6..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/13-example-2-migrating-data-from-a-my-database-to-mogdb.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Example 2 Migrating Data from a MY Database to MogDB -summary: Example 2 Migrating Data from a MY Database to MogDB -author: Guo Huan -date: 2021-04-26 ---- - -# Example 2 Migrating Data from a MY Database to MogDB - -The following example shows how to use CopyManager to migrate data from MY to MogDB. - -```java -import java.io.StringReader; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; - -import org.opengauss.copy.CopyManager; -import org.opengauss.core.BaseConnection; - -public class Migration{ - - public static void main(String[] args) { - String url = new String("jdbc:opengauss://10.180.155.74:8000/postgres"); // Database URL - String user = new String("jack"); // MogDB username - String pass = new String("Enmo@123"); // MogDB password - String tablename = new String("migration_table"); // Table information - String delimiter = new String("|"); // Delimiter - String encoding = new String("UTF8"); // Character set - String driver = "org.opengauss.Driver"; - StringBuffer buffer = new StringBuffer(); // Buffer to store formatted data - - try { - // Obtain the query result set of the source database. - ResultSet rs = getDataSet(); - - // Traverse the result set and obtain records row by row. - // The values of columns in each record are separated by the specified delimiter and end with a linefeed, forming strings. - // Add the strings to the buffer. - while (rs.next()) { - buffer.append(rs.getString(1) + delimiter - + rs.getString(2) + delimiter - + rs.getString(3) + delimiter - + rs.getString(4) - + "\n"); - } - rs.close(); - - try { - // Connect to the target database. - Class.forName(driver); - Connection conn = DriverManager.getConnection(url, user, pass); - BaseConnection baseConn = (BaseConnection) conn; - baseConn.setAutoCommit(false); - - // Initialize the table. - String sql = "Copy " + tablename + " from STDIN DELIMITER " + "'" + delimiter + "'" + " ENCODING " + "'" + encoding + "'"; - - // Commit data in the buffer. - CopyManager cp = new CopyManager(baseConn); - StringReader reader = new StringReader(buffer.toString()); - cp.copyIn(sql, reader); - baseConn.commit(); - reader.close(); - baseConn.close(); - } catch (ClassNotFoundException e) { - e.printStackTrace(System.out); - } catch (SQLException e) { - e.printStackTrace(System.out); - } - - } catch (Exception e) { - e.printStackTrace(); - } - } - - //******************************** - // Return the query result set from the source database. - //********************************* - private static ResultSet getDataSet() { - ResultSet rs = null; - try { - Class.forName("com.MY.jdbc.Driver").newInstance(); - Connection conn = DriverManager.getConnection("jdbc:MY://10.119.179.227:3306/jack?useSSL=false&allowPublicKeyRetrieval=true", "jack", "Enmo@123"); - Statement stmt = conn.createStatement(); - rs = stmt.executeQuery("select * from migration_table"); - } catch (SQLException e) { - e.printStackTrace(); - } catch (Exception e) { - e.printStackTrace(); - } - return rs; - } -} -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/14-example-logic-replication-code.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/14-example-logic-replication-code.md deleted file mode 100644 index b7c9d88b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/14-example-logic-replication-code.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: Example Logic Replication Code -summary: Example Logic Replication Code -author: Guo Huan -date: 2021-04-26 ---- - -# Example Logic Replication Code - -The following example demonstrates how to use the logical replication function through the JDBC APIs. - -For logical replication, in addition to the configuration items described in section [Logical Decoding](../../logical-replication/logical-decoding/1-logical-decoding.md), the following configuration items are added for streaming decoders such as JDBC: - -1. Decoding thread concurrency - - Set **parallel-decode-num** to specify the number of decoder threads for parallel decoding. The value is an integer ranging from 1 to 20. The value **1** indicates that decoding is performed based on the original serial logic. Other values indicate that parallel decoding is enabled. The default value is **1**. When this parameter is set to **1**, do not configure the following options: **decode-style**, **batch-sending**, and **parallel-queue-size**. - -2. Decoding format - - Configure **decode-style** to specify the decoding format. The value can be **'j'**, **'t'** or **'b'** of the char type, indicating the JSON, text, or binary format, respectively. The default value is **'b'**, indicating binary decoding. This option is set only when parallel decoding is allowed and the binary decoding is supported only in the parallel decoding scenario. For the JSON and text formats corresponding to the binary format, in the decoding result sent in batches, the uint32 consisting of the first four bytes of each decoding statement indicates the total number of bytes of the statement (the four bytes occupied by the uint32 are excluded, and **0** indicates that the decoding of this batch ends). The 8-byte uint64 indicates the corresponding LSN (**begin** corresponds to **first\_lsn**, **commit** corresponds to **end\_lsn**, and other values correspond to the LSN of the statement). - - > **NOTE:** - > The binary encoding rules are as follows: - > - > 1. The first four bytes represent the total number of bytes of the decoding result that follows the statement-level delimiter letter P (excluded) or the batch end character F (excluded) If the value is **0**, the decoding of this batch ends. - > 2. The next eight bytes (uint64) indicate the corresponding LSN (**begin** corresponds to **first\_lsn**, **commit** corresponds to **end\_lsn**, and other values correspond to the LSN of the statement). - > 3. The following 1-byte letter can be **B**, **C**, **I**, **U**, or **D**, representing BEGIN, COMMIT, INSERT, UPDATE, or DELETE. - > 4. If the letter described in step 3 is **B**: - > a. The following eight bytes (uint64) indicate the CSN. - > b. The following eight bytes (uint64) indicate first_lsn. - > c. (Optional) If the next 1-byte letter is **T**, the following four bytes (uint32) indicate the timestamp length for committing the transaction. The following characters with the same length are the timestamp character string. - > d. Because there may still be a decoding statement subsequently, a 1-byte letter **P** or **F** is used as a separator between statements. **P** indicates that there are still decoded statements in this batch, and **F** indicates that this batch is completed. - > 5. If the letter described in 3 is **C**: - > a. (Optional) If the next 1-byte letter is **X**, the following eight bytes (uint64) indicate XID. - > b. (Optional) If the next 1-byte letter is **T**, the following four bytes (uint32) indicate the timestamp length. The following characters with the same length are the timestamp character string. - > c. When logs are sent in batches, decoding results of other transactions may still exist after a COMMIT log is decoded. If the next 1-byte letter is **P**, the batch still needs to be decoded. If the letter is **F**, the batch decoding ends. - > 6. If the letter described in 3 is **I**, **U**, or **D**: - > a. The following two bytes (uint16) indicate the length of the schema name. - > b. The schema name is read based on the preceding length. - > c. The following two bytes (uint16) indicate the length of the table name. - > d. The table name is read based on the preceding length. - > e. (Optional) If the next 1-byte letter is **N**, it indicates a new tuple. If the letter is **O**, it indicates an old tuple. In this case, the new tuple is sent first. - > i. The following two bytes (uint16) indicate the number of columns to be decoded for the tuple, which is recorded as **attrnum**. - > ii. The following procedure is repeated for *attrnum* times. - > (1). The next two bytes (uint16) indicate the length of the column name. - > (2). The column name is read based on the preceding length. - > (3). The following four bytes (uint32) indicate the OID of the current column type. - > (4). The next four bytes (uint32) indicate the length of the value (stored in the character string format) in the current column. If the value is **0xFFFFFFFF**, it indicates null. If the value is **0**, it indicates a character string whose length is 0. - > (5). The column value is read based on the preceding length. - > f. Because there may still be a decoding statement after, if the next one-byte letter is **P**, it indicates that the batch still needs to be decoded, and if the next one-byte letter is **F**, it indicates that decoding of the batch ends. - -3. Decoding only on the standby node - - Configure the **standby-connection** option to specify whether to perform decoding only on the standby node. The value is of the Boolean type (**0** or **1**). The value **true** (or **1**) indicates that only the standby node can be connected for decoding. When the primary node is connected for decoding, an error is reported and the system exits. The value **false** (or **0**) indicates that there is no restriction. The default value is **false** (**0**). - -4. Batch sending - - Configure **batch-sending** to specify whether to send results in batches. The value is an integer ranging from 0 to 1. The value **0** indicates that decoding results are sent one by one. The value **1** indicates that decoding results are sent in batches when the accumulated size of decoding results reaches 1 MB. The default value is **0**. This parameter can be set only during parallel decoding. In the scenario where batch sending is enabled, if the decoding format is 'j' or 't', before each original decoding statement, a uint32 type is added indicating the length of the decoding result (excluding the current uint32 type), and a uint64 type is added, indicating the LSN corresponding to the current decoding result. - -5. Length of the parallel decoding queue - - Configure **parallel-queue-size** to specify the length of the queue for interaction among parallel logical decoding threads. The value ranges from 2 to 1024 and must be a power of 2. The default value is **128**. The queue length is positively correlated with the memory usage during decoding. - -6. Memory threshold for logical decoding - - The **max-txn-in-memory** configuration item specifies the memory threshold for caching the intermediate decoding result of a single transaction, in MB. The value ranges from 0 to 100. The default value is **0**, indicating that the memory usage is not controlled. The **max-reorderbuffer-in-memory** configuration item specifies the memory threshold for caching intermediate decoding results of all transactions, in GB. The value ranges from 0 to 100. The default value is **0**, indicating that the memory usage is not controlled. When the memory usage exceeds the threshold, intermediate decoding results are written into a temporary file during decoding, affecting the logic decoding performance. - -7. Logical decoding sending timeout threshold - - The **sender-timeout** configuration item specifies the heartbeat timeout threshold between the kernel and client. If no message is received from the client within the period, the logic decoding stops and disconnects from the client. The unit is ms, and the value range is [0,2147483647]. The default value depends on the value of **logical\_sender\_timeout**. - -The decoding performance (Xlog consumption) is greater than or equal to 100 MB/s in the following standard parallel decoding scenario: 16-core CPU, 128 GB memory, network bandwidth > 200 MB/s, 10 to 100 columns in a table, 0.1 KB to 1 KB data volume in a single row, DML operations are mainly INSERT operations, the number of statements in a single transaction is less than 4096, **parallel_decode_num** is set to **8**, the decoding format is **'b'**, and the batch sending function is enabled. To ensure that the decoding performance meets the requirements and minimize the impact on services, you are advised to set up only one parallel decoding connection on a standby node to ensure that the CPU, memory, and bandwidth resources are sufficient. - -Note: The logical replication class PGReplicationStream is a non-thread-safe class. Concurrent calls may cause data exceptions. - -```java -//Logical replication function example: file name, LogicalReplicationDemo.java -//Prerequisite: Add the IP address of the JDBC user machine to the database whitelist. Add the following content to pg_hba.conf: -//Assume that the IP address of the JDBC user machine is 10.10.10.10. -//host all all 10.10.10.10/32 sha256 -//host replication all 10.10.10.10/32 sha256 - -import org.opengauss.PGProperty; -import org.opengauss.jdbc.PgConnection; -import org.opengauss.replication.LogSequenceNumber; -import org.opengauss.replication.PGReplicationStream; - -import java.nio.ByteBuffer; -import java.sql.DriverManager; -import java.util.Properties; -import java.util.concurrent.TimeUnit; - -public class LogicalReplicationDemo { - public static void main(String[] args) { - String driver = "org.opengauss.Driver"; - // Configure the IP address and haPort number of the database. By default, the port number is the port number of the connected DN plus 1. - String sourceURL = "jdbc:opengauss://$ip:$port/postgres"; - //The default name of the logical replication slot is replication_slot. - //Test mode: Create a logical replication slot. - int TEST_MODE_CREATE_SLOT = 1; - //Test mode: Enable logical replication (The prerequisite is that the logical replication slot already exists). - int TEST_MODE_START_REPL = 2; - //Test mode: Delete a logical replication slot. - int TEST_MODE_DROP_SLOT = 3; - //Enable different test modes. - int testMode = TEST_MODE_START_REPL; - - try { - Class.forName(driver); - } catch (Exception e) { - e.printStackTrace(); - return; - } - - try { - Properties properties = new Properties(); - PGProperty.USER.set(properties, "user"); - PGProperty.PASSWORD.set(properties, "passwd"); - //For logical replication, the following three attributes are mandatory. - PGProperty.ASSUME_MIN_SERVER_VERSION.set(properties, "9.4"); - PGProperty.REPLICATION.set(properties, "database"); - PGProperty.PREFER_QUERY_MODE.set(properties, "simple"); - conn = (PgConnection) DriverManager.getConnection(sourceURL, properties); - System.out.println("connection success!"); - - if(testMode == TEST_MODE_CREATE_SLOT){ - conn.getReplicationAPI() - .createReplicationSlot() - .logical() - .withSlotName("replication_slot") // If the character string contains uppercase letters, the uppercase letters are automatically converted to lowercase letters. - .withOutputPlugin("mppdb_decoding") - .make(); - }else if(testMode == TEST_MODE_START_REPL) { - //Create a replication slot before enabling this mode. - LogSequenceNumber waitLSN = LogSequenceNumber.valueOf("6F/E3C53568"); - PGReplicationStream stream = conn - .getReplicationAPI() - .replicationStream() - .logical() - .withSlotName("replication_slot") - .withSlotOption("include-xids", false) - .withSlotOption("skip-empty-xacts", true) - .withStartPosition(waitLSN) - .withSlotOption("parallel-decode-num", 10) // Decoding thread concurrency. - .withSlotOption("white-table-list", "public.t1,public.t2") // Whitelist - .withSlotOption("standby-connection", true) // Forcible standby decoding - .withSlotOption("decode-style", "t") // Decoding format - .withSlotOption("sending-batch", 1) // Decoding results are sent in batches. - .withSlotOption("max-txn-in-memory", 100) // The memory threshold for flushing a single decoding transaction to disks is 100 MB. - .withSlotOption("max-reorderbuffer-in-memory", 50) // The total memory threshold for flushing decoding transactions that are being handled to disks is 50 GB. - .start(); - while (true) { - ByteBuffer byteBuffer = stream.readPending(); - - if (byteBuffer == null) { - TimeUnit.MILLISECONDS.sleep(10L); - continue; - } - - int offset = byteBuffer.arrayOffset(); - byte[] source = byteBuffer.array(); - int length = source.length - offset; - System.out.println(new String(source, offset, length)); - - //If the LSN needs to be flushed, call the following APIs based on the service requirements: - //LogSequenceNumber lastRecv = stream.getLastReceiveLSN(); - //stream.setFlushedLSN(lastRecv); - //stream.forceUpdateStatus(); - - } - }else if(testMode == TEST_MODE_DROP_SLOT){ - conn.getReplicationAPI() - .dropReplicationSlot("replication_slot"); - } - } catch (Exception e) { - e.printStackTrace(); - return; - } - } -} -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/14.1-example-parameters-for-connecting-to-the-database-in-different-scenarios.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/14.1-example-parameters-for-connecting-to-the-database-in-different-scenarios.md deleted file mode 100644 index 1d5542d6..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/14.1-example-parameters-for-connecting-to-the-database-in-different-scenarios.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Parameters for Connecting to the Database in Different Scenarios -summary: Parameters for Connecting to the Database in Different Scenarios -author: Zhang Cuiping -date: 2021-10-11 ---- - -# Example: Parameters for Connecting to the Database in Different Scenarios - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** In the following example, **host:port** represents a node, where **host** indicates the name or IP address of the server where the database resides, and **port** indicates the port number of the server where the database resides. - -## DR - -A customer has two database instances. Database instance A is the production database instance, and database instance B is the DR database instance. When the customer performs a DR switchover, database instance A is demoted to the DR database instance, and database instance B is promoted the production database instance. In this case, to avoid application restart or re-release caused by modifications on the configuration file, the customer can write database instances A and B to the connection string when initializing the configuration file. If the primary database instance cannot be connected, the driver attempts to connect to the DR database instance. For example, database instance A consists of *node1*, *node2*, and *node3*, and database instance B consists of *node4*, *node5*, and *node6*. - -The URL can be configured as follows: - -``` -jdbc:opengauss://node1,node2,node3,node4,node5,node6/database?priorityServers=3 -``` - -## Load Balancing - -A customer has a centralized database instance that consists of one primary node and two standby nodes, that is, *node1*, *node2*, and *node3*. *node1* is the primary node, and *node2* and *node3* are the standby nodes. - -If the customer wants to evenly distribute the connections established on the same application to three nodes, the URL can be configured as follows: - -``` -jdbc:opengauss://node1,node2,node3/database?loadBalanceHosts=true -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** When **loadBalanceHosts** is used, if the connection is established on the standby DN, write operations cannot be performed. If read and write operations are required, do not set this parameter. - -## Log Diagnosis - -If a customer encounters slow data import or some errors that are difficult to analyze, the trace log function can be enabled for diagnosis. The URL can be configured as follows: - -``` -jdbc:opengauss://node1/database?loggerLevel=trace&loggerFile=jdbc.log -``` - -## High Performance - -A customer may execute the same SQL statement for multiple times with different input parameters. To improve the execution efficiency, the **prepareThreshold** parameter can be enabled to avoid repeatedly generating execution plans. The URL can be configured as follows: - -``` -jdbc:opengauss://node1/database?prepareThreshold=5 -``` - -A customer queries 10 million data records at a time. To prevent memory overflow caused by simultaneous return of the data records, the **defaultRowFetchSize** parameter can be used. The URL can be configured as follows: - -``` -jdbc:opengauss://node1/database?defaultRowFetchSize=50000 -``` - -A customer needs to insert 10 million data records in batches. To improve efficiency, the **batchMode** parameter can be used. The URL can be configured as follows: - -``` -jdbc:opengauss://node1/database?batchMode=true -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/1-java-sql-Connection.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/1-java-sql-Connection.md deleted file mode 100644 index 0598e5ef..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/1-java-sql-Connection.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: java.sql.Connection -summary: java.sql.Connection -author: Guo Huan -date: 2021-05-17 ---- - -# java.sql.Connection - -This section describes **java.sql.Connection**, the interface for connecting to a database. - -**Table 1** Support status for java.sql.Connection - -| Method Name | Return Type | JDBC 4 Is Supported Or Not | -| :----------------------------------------------------------- | :------------------------------- | :------------------------- | -| abort(Executor executor) | void | Yes | -| clearWarnings() | void | Yes | -| close() | void | Yes | -| commit() | void | Yes | -| createArrayOf(String typeName, Object[] elements) | Array | Yes | -| createBlob() | Blob | Yes | -| createClob() | Clob | Yes | -| createSQLXML() | SQLXML | Yes | -| createStatement() | Statement | Yes | -| createStatement(int resultSetType, int resultSetConcurrency) | Statement | Yes | -| createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) | Statement | Yes | -| getAutoCommit() | Boolean | Yes | -| getCatalog() | String | Yes | -| getClientInfo() | Properties | Yes | -| getClientInfo(String name) | String | Yes | -| getHoldability() | int | Yes | -| getMetaData() | DatabaseMetaData | Yes | -| getNetworkTimeout() | int | Yes | -| getSchema() | String | Yes | -| getTransactionIsolation() | int | Yes | -| getTypeMap() | Map<String,Class<?>> | Yes | -| getWarnings() | SQLWarning | Yes | -| isClosed() | Boolean | Yes | -| isReadOnly() | Boolean | Yes | -| isValid(int timeout) | boolean | Yes | -| nativeSQL(String sql) | String | Yes | -| prepareCall(String sql) | CallableStatement | Yes | -| prepareCall(String sql, int resultSetType, int resultSetConcurrency) | CallableStatement | Yes | -| prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) | CallableStatement | Yes | -| prepareStatement(String sql) | PreparedStatement | Yes | -| prepareStatement(String sql, int autoGeneratedKeys) | PreparedStatement | Yes | -| prepareStatement(String sql, int[] columnIndexes) | PreparedStatement | Yes | -| prepareStatement(String sql, int resultSetType, int resultSetConcurrency) | PreparedStatement | Yes | -| prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) | PreparedStatement | Yes | -| prepareStatement(String sql, String[] columnNames) | PreparedStatement | Yes | -| releaseSavepoint(Savepoint savepoint) | void | Yes | -| rollback() | void | Yes | -| rollback(Savepoint savepoint) | void | Yes | -| setAutoCommit(boolean autoCommit) | void | Yes | -| setClientInfo(Properties properties) | void | Yes | -| setClientInfo(String name,String value) | void | Yes | -| setHoldability(int holdability) | void | Yes | -| setNetworkTimeout(Executor executor, int milliseconds) | void | Yes | -| setReadOnly(boolean readOnly) | void | Yes | -| setSavepoint() | Savepoint | Yes | -| setSavepoint(String name) | Savepoint | Yes | -| setSchema(String schema) | void | Yes | -| setTransactionIsolation(int level) | void | Yes | -| setTypeMap(Map<String,Class<?>> map) | void | Yes | - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> The AutoCommit mode is used by default within the interface. If you disable it by running **setAutoCommit(false)**, all the statements executed later will be packaged in explicit transactions, and you cannot execute statements that cannot be executed within transactions. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/10-javax-sql-DataSource.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/10-javax-sql-DataSource.md deleted file mode 100644 index 0aeccd03..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/10-javax-sql-DataSource.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: javax.sql.DataSource -summary: javax.sql.DataSource -author: Guo Huan -date: 2021-05-17 ---- - -# javax.sql.DataSource - -This section describes **javax.sql.DataSource**, the interface for data sources. - -**Table 1** Support status for javax.sql.DataSource - -| Method Name | Return Type | Support JDBC 4 | -| :--------------------------------------------- | :---------- | :------------- | -| getConneciton() | Connection | Yes | -| getConnection(String username,String password) | Connection | Yes | -| getLoginTimeout() | int | Yes | -| getLogWriter() | PrintWriter | Yes | -| setLoginTimeout(int seconds) | void | Yes | -| setLogWriter(PrintWriter out) | void | Yes | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/11-javax-sql-PooledConnection.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/11-javax-sql-PooledConnection.md deleted file mode 100644 index b1f4004d..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/11-javax-sql-PooledConnection.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: javax.sql.PooledConnection -summary: javax.sql.PooledConnection -author: Guo Huan -date: 2021-05-17 ---- - -# javax.sql.PooledConnection - -This section describes **javax.sql.PooledConnection**, the connection interface created by a connection pool. - -**Table 1** Support status for javax.sql.PooledConnection - -| Method Name | Return Type | JDBC 4 Is Supported Or Not | -| :----------------------------------------------------------- | :---------- | :------------------------- | -| addConnectionEventListener (ConnectionEventListener listener) | void | Yes | -| close() | void | Yes | -| getConnection() | Connection | Yes | -| removeConnectionEventListener (ConnectionEventListener listener) | void | Yes | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/12-javax-naming-Context.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/12-javax-naming-Context.md deleted file mode 100644 index 26da1f5a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/12-javax-naming-Context.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: javax.naming.Context -summary: javax.naming.Context -author: Guo Huan -date: 2021-05-17 ---- - -# javax.naming.Context - -This section describes **javax.naming.Context**, the context interface for connection configuration. - -**Table 1** Support status for javax.naming.Context - -| Method Name | Return Type | Support JDBC 4 | -| :------------------------------------- | :---------- | :------------- | -| bind(Name name, Object obj) | void | Yes | -| bind(String name, Object obj) | void | Yes | -| lookup(Name name) | Object | Yes | -| lookup(String name) | Object | Yes | -| rebind(Name name, Object obj) | void | Yes | -| rebind(String name, Object obj) | void | Yes | -| rename(Name oldName, Name newName) | void | Yes | -| rename(String oldName, String newName) | void | Yes | -| unbind(Name name) | void | Yes | -| unbind(String name) | void | Yes | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/13-javax-naming-spi-InitialContextFactory.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/13-javax-naming-spi-InitialContextFactory.md deleted file mode 100644 index ef15dfcc..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/13-javax-naming-spi-InitialContextFactory.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: javax.naming.spi.InitialContextFactory -summary: javax.naming.spi.InitialContextFactory -author: Guo Huan -date: 2021-05-17 ---- - -# javax.naming.spi.InitialContextFactory - -This section describes **javax.naming.spi.InitialContextFactory**, the initial context factory interface. - -**Table 1** Support status for javax.naming.spi.InitialContextFactory - -| Method Name | Return Type | Support JDBC 4 | -| :-------------------------------------------------- | :---------- | :------------- | -| getInitialContext(Hashtable<?,?> environment) | Context | Yes | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/14-CopyManager.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/14-CopyManager.md deleted file mode 100644 index 50a32149..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/14-CopyManager.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: CopyManager -summary: CopyManager -author: Guo Huan -date: 2021-05-17 ---- - -# CopyManager - -CopyManager is an API class provided by the JDBC driver in MogDB. It is used to import data to MogDB in batches. - -## Inheritance Relationship of CopyManager - -The CopyManager class is in the **org.opengauss.copy** package and inherits the java.lang.Object class. The declaration of the class is as follows: - -```java -public class CopyManager -extends Object -``` - -## Construction Method - -public CopyManager(BaseConnection connection) - -throws SQLException - -## Common Methods - -**Table 1** Common methods of CopyManager - -| Return Value | Method | Description | throws | -| :----------------------- | :------------------- | :------------------- | :------------------- | -| CopyIn | copyIn(String sql) | - | SQLException | -| long | copyIn(String sql, InputStream from) | Uses **COPY FROM STDIN** to quickly load data to tables in the database from InputStream. | SQLException,IOException | -| long | copyIn(String sql, InputStream from, int bufferSize) | Uses **COPY FROM STDIN** to quickly load data to tables in the database from InputStream. | SQLException,IOException | -| long | copyIn(String sql, Reader from) | Uses **COPY FROM STDIN** to quickly load data to tables in the database from Reader. | SQLException,IOException | -| long | copyIn(String sql, Reader from, int bufferSize) | Uses **COPY FROM STDIN** to quickly load data to tables in the database from Reader. | SQLException,IOException | -| CopyOut | copyOut(String sql) | - | SQLException | -| long | copyOut(String sql, OutputStream to) | Sends the result set of **COPY TO STDOUT** from the database to the OutputStream class. | SQLException,IOException | -| long | copyOut(String sql, Writer to) | Sends the result set of **COPY TO STDOUT** from the database to the Writer class. | SQLException,IOException | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/2-java-sql-CallableStatement.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/2-java-sql-CallableStatement.md deleted file mode 100644 index dc493d97..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/2-java-sql-CallableStatement.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: java.sql.CallableStatement -summary: java.sql.CallableStatement -author: Guo Huan -date: 2021-05-17 ---- - -# java.sql.CallableStatement - -This section describes **java.sql.CallableStatement**, the interface for executing the stored procedure. - -**Table 1** Support status for java.sql.CallableStatement - -| Method Name | Return Type | JDBC 4 Is Supported Or Not | -| :------------------------------------------------- | :---------- | :------------------------- | -| getArray(int parameterIndex) | Array | Yes | -| getBigDecimal(int parameterIndex) | BigDecimal | Yes | -| getBlob(int parameterIndex) | Blob | Yes | -| getBoolean(int parameterIndex) | boolean | Yes | -| getByte(int parameterIndex) | byte | Yes | -| getBytes(int parameterIndex) | byte[] | Yes | -| getClob(int parameterIndex) | Clob | Yes | -| getDate(int parameterIndex) | Date | Yes | -| getDate(int parameterIndex, Calendar cal) | Date | Yes | -| getDouble(int parameterIndex) | double | Yes | -| getFloat(int parameterIndex) | float | Yes | -| getInt(int parameterIndex) | int | Yes | -| getLong(int parameterIndex) | long | Yes | -| getObject(int parameterIndex) | Object | Yes | -| getObject(int parameterIndex, Class<T> type) | Object | Yes | -| getShort(int parameterIndex) | short | Yes | -| getSQLXML(int parameterIndex) | SQLXML | Yes | -| getString(int parameterIndex) | String | Yes | -| getNString(int parameterIndex) | String | Yes | -| getTime(int parameterIndex) | Time | Yes | -| getTime(int parameterIndex, Calendar cal) | Time | Yes | -| getTimestamp(int parameterIndex) | Timestamp | Yes | -| getTimestamp(int parameterIndex, Calendar cal) | Timestamp | Yes | -| registerOutParameter(int parameterIndex, int type) | void | Yes | -| wasNull() | Boolean | Yes | - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - The batch operation of statements containing OUT parameter is not allowed. -> - The following methods are inherited from java.sql.Statement: close, execute, executeQuery, executeUpdate, getConnection, getResultSet, getUpdateCount, isClosed, setMaxRows, and setFetchSize. -> - The following methods are inherited from java.sql.PreparedStatement: addBatch, clearParameters, execute, executeQuery, executeUpdate, getMetaData, setBigDecimal, setBoolean, setByte, setBytes, setDate, setDouble, setFloat, setInt, setLong, setNull, setObject, setString, setTime, and setTimestamp. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/3-java-sql-DatabaseMetaData.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/3-java-sql-DatabaseMetaData.md deleted file mode 100644 index 6539f365..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/3-java-sql-DatabaseMetaData.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: java.sql.DatabaseMetaData -summary: java.sql.DatabaseMetaData -author: Guo Huan -date: 2021-05-17 ---- - -# java.sql.DatabaseMetaData - -This section describes **java.sql.DatabaseMetaData**, the interface for defining database objects. - -**Table 1** Support status for java.sql.DatabaseMetaData - -| Method Name | Return Type | JDBC 4 Is Supported Or Not | -| :----------------------------------------------------------- | :----------- | :------------------------- | -| allProceduresAreCallable() | boolean | Yes | -| allTablesAreSelectable() | boolean | Yes | -| autoCommitFailureClosesAllResultSets() | boolean | Yes | -| dataDefinitionCausesTransactionCommit() | boolean | Yes | -| dataDefinitionIgnoredInTransactions() | boolean | Yes | -| deletesAreDetected(int type) | boolean | Yes | -| doesMaxRowSizeIncludeBlobs() | boolean | Yes | -| generatedKeyAlwaysReturned() | boolean | Yes | -| getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) | ResultSet | Yes | -| getCatalogs() | ResultSet | Yes | -| getCatalogSeparator() | String | Yes | -| getCatalogTerm() | String | Yes | -| getClientInfoProperties() | ResultSet | Yes | -| getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) | ResultSet | Yes | -| getConnection() | Connection | Yes | -| getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, String foreignSchema, String foreignTable) | ResultSet | Yes | -| getDefaultTransactionIsolation() | int | Yes | -| getExportedKeys(String catalog, String schema, String table) | ResultSet | Yes | -| getExtraNameCharacters() | String | Yes | -| getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) | ResultSet | Yes | -| getFunctions(String catalog, String schemaPattern, String functionNamePattern) | ResultSet | Yes | -| getIdentifierQuoteString() | String | Yes | -| getImportedKeys(String catalog, String schema, String table) | ResultSet | Yes | -| getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) | ResultSet | Yes | -| getMaxBinaryLiteralLength() | int | Yes | -| getMaxCatalogNameLength() | int | Yes | -| getMaxCharLiteralLength() | int | Yes | -| getMaxColumnNameLength() | int | Yes | -| getMaxColumnsInGroupBy() | int | Yes | -| getMaxColumnsInIndex() | int | Yes | -| getMaxColumnsInOrderBy() | int | Yes | -| getMaxColumnsInSelect() | int | Yes | -| getMaxColumnsInTable() | int | Yes | -| getMaxConnections() | int | Yes | -| getMaxCursorNameLength() | int | Yes | -| getMaxIndexLength() | int | Yes | -| getMaxLogicalLobSize() | default long | Yes | -| getMaxProcedureNameLength() | int | Yes | -| getMaxRowSize() | int | Yes | -| getMaxSchemaNameLength() | int | Yes | -| getMaxStatementLength() | int | Yes | -| getMaxStatements() | int | Yes | -| getMaxTableNameLength() | int | Yes | -| getMaxTablesInSelect() | int | Yes | -| getMaxUserNameLength() | int | Yes | -| getNumericFunctions() | String | Yes | -| getPrimaryKeys(String catalog, String schema, String table) | ResultSet | Yes | -| getPartitionTablePrimaryKeys(String catalog, String schema, String table) | ResultSet | Yes | -| getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) | ResultSet | Yes | -| getProcedures(String catalog, String schemaPattern, String procedureNamePattern) | ResultSet | Yes | -| getProcedureTerm() | String | Yes | -| getSchemas() | ResultSet | Yes | -| getSchemas(String catalog, String schemaPattern) | ResultSet | Yes | -| getSchemaTerm() | String | Yes | -| getSearchStringEscape() | String | Yes | -| getSQLKeywords() | String | Yes | -| getSQLStateType() | int | Yes | -| getStringFunctions() | String | Yes | -| getSystemFunctions() | String | Yes | -| getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) | ResultSet | Yes | -| getTimeDateFunctions() | String | Yes | -| getTypeInfo() | ResultSet | Yes | -| getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) | ResultSet | Yes | -| getURL() | String | Yes | -| getVersionColumns(String catalog, String schema, String table) | ResultSet | Yes | -| insertsAreDetected(int type) | boolean | Yes | -| locatorsUpdateCopy() | boolean | Yes | -| othersDeletesAreVisible(int type) | boolean | Yes | -| othersInsertsAreVisible(int type) | boolean | Yes | -| othersUpdatesAreVisible(int type) | boolean | Yes | -| ownDeletesAreVisible(int type) | boolean | Yes | -| ownInsertsAreVisible(int type) | boolean | Yes | -| ownUpdatesAreVisible(int type) | boolean | Yes | -| storesLowerCaseIdentifiers() | boolean | Yes | -| storesMixedCaseIdentifiers() | boolean | Yes | -| storesUpperCaseIdentifiers() | boolean | Yes | -| supportsBatchUpdates() | boolean | Yes | -| supportsCatalogsInDataManipulation() | boolean | Yes | -| supportsCatalogsInIndexDefinitions() | boolean | Yes | -| supportsCatalogsInPrivilegeDefinitions() | boolean | Yes | -| supportsCatalogsInProcedureCalls() | boolean | Yes | -| supportsCatalogsInTableDefinitions() | boolean | Yes | -| supportsCorrelatedSubqueries() | boolean | Yes | -| supportsDataDefinitionAndDataManipulationTransactions() | boolean | Yes | -| supportsDataManipulationTransactionsOnly() | boolean | Yes | -| supportsGetGeneratedKeys() | boolean | Yes | -| supportsMixedCaseIdentifiers() | boolean | Yes | -| supportsMultipleOpenResults() | boolean | Yes | -| supportsNamedParameters() | boolean | Yes | -| supportsOpenCursorsAcrossCommit() | boolean | Yes | -| supportsOpenCursorsAcrossRollback() | boolean | Yes | -| supportsOpenStatementsAcrossCommit() | boolean | Yes | -| supportsOpenStatementsAcrossRollback() | boolean | Yes | -| supportsPositionedDelete() | boolean | Yes | -| supportsPositionedUpdate() | boolean | Yes | -| supportsRefCursors() | boolean | Yes | -| supportsResultSetConcurrency(int type, int concurrency) | boolean | Yes | -| supportsResultSetType(int type) | boolean | Yes | -| supportsSchemasInIndexDefinitions() | boolean | Yes | -| supportsSchemasInPrivilegeDefinitions() | boolean | Yes | -| supportsSchemasInProcedureCalls() | boolean | Yes | -| supportsSchemasInTableDefinitions() | boolean | Yes | -| supportsSelectForUpdate() | boolean | Yes | -| supportsStatementPooling() | boolean | Yes | -| supportsStoredFunctionsUsingCallSyntax() | boolean | Yes | -| supportsStoredProcedures() | boolean | Yes | -| supportsSubqueriesInComparisons() | boolean | Yes | -| supportsSubqueriesInExists() | boolean | Yes | -| supportsSubqueriesInIns() | boolean | Yes | -| supportsSubqueriesInQuantifieds() | boolean | Yes | -| supportsTransactionIsolationLevel(int level) | boolean | Yes | -| supportsTransactions() | boolean | Yes | -| supportsUnion() | boolean | Yes | -| supportsUnionAll() | boolean | Yes | -| updatesAreDetected(int type) | boolean | Yes | -| getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) | ResultSet | Yes | -| getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) | ResultSet | Yes | -| getTableTypes() | ResultSet | Yes | -| getUserName() | String | Yes | -| isReadOnly() | boolean | Yes | -| nullsAreSortedHigh() | boolean | Yes | -| nullsAreSortedLow() | boolean | Yes | -| nullsAreSortedAtStart() | boolean | Yes | -| nullsAreSortedAtEnd() | boolean | Yes | -| getDatabaseProductName() | String | Yes | -| getDatabaseProductVersion() | String | Yes | -| getDriverName() | String | Yes | -| getDriverVersion() | String | Yes | -| getDriverMajorVersion() | int | Yes | -| getDriverMinorVersion() | int | Yes | -| usesLocalFiles() | boolean | Yes | -| usesLocalFilePerTable() | boolean | Yes | -| supportsMixedCaseIdentifiers() | boolean | Yes | -| storesUpperCaseIdentifiers() | boolean | Yes | -| storesLowerCaseIdentifiers() | boolean | Yes | -| supportsMixedCaseQuotedIdentifiers() | boolean | Yes | -| storesUpperCaseQuotedIdentifiers() | boolean | Yes | -| storesLowerCaseQuotedIdentifiers() | boolean | Yes | -| storesMixedCaseQuotedIdentifiers() | boolean | Yes | -| supportsAlterTableWithAddColumn() | boolean | Yes | -| supportsAlterTableWithDropColumn() | boolean | Yes | -| supportsColumnAliasing() | boolean | Yes | -| nullPlusNonNullIsNull() | boolean | Yes | -| supportsConvert() | boolean | Yes | -| supportsConvert(int fromType, int toType) | boolean | Yes | -| supportsTableCorrelationNames() | boolean | Yes | -| supportsDifferentTableCorrelationNames() | boolean | Yes | -| supportsExpressionsInOrderBy() | boolean | Yes | -| supportsOrderByUnrelated() | boolean | Yes | -| supportsGroupBy() | boolean | Yes | -| supportsGroupByUnrelated() | boolean | Yes | -| supportsGroupByBeyondSelect() | boolean | Yes | -| supportsLikeEscapeClause() | boolean | Yes | -| supportsMultipleResultSets() | boolean | Yes | -| supportsMultipleTransactions() | boolean | Yes | -| supportsNonNullableColumns() | boolean | Yes | -| supportsMinimumSQLGrammar() | boolean | Yes | -| supportsCoreSQLGrammar() | boolean | Yes | -| supportsExtendedSQLGrammar() | boolean | Yes | -| supportsANSI92EntryLevelSQL() | boolean | Yes | -| supportsANSI92IntermediateSQL() | boolean | Yes | -| supportsANSI92FullSQL() | boolean | Yes | -| supportsIntegrityEnhancementFacility() | boolean | Yes | -| supportsOuterJoins() | boolean | Yes | -| supportsFullOuterJoins() | boolean | Yes | -| supportsLimitedOuterJoins() | boolean | Yes | -| isCatalogAtStart() | boolean | Yes | -| supportsSchemasInDataManipulation() | boolean | Yes | -| supportsSavepoints() | boolean | Yes | -| supportsResultSetHoldability(int holdability) | boolean | Yes | -| getResultSetHoldability() | int | Yes | -| getDatabaseMajorVersion() | int | Yes | -| getDatabaseMinorVersion() | int | Yes | -| getJDBCMajorVersion() | int | Yes | -| getJDBCMinorVersion() | int | Yes | - -> **CAUTION:** The **getPartitionTablePrimaryKeys(String catalog, String schema, String table)** API is used to obtain the primary key column of a partitioned table that contains global indexes. The following is an example: -> -> ``` -> PgDatabaseMetaData dbmd = (PgDatabaseMetaData)conn.getMetaData(); -> dbmd.getPartitionTablePrimaryKeys("catalogName", "schemaName", "tableName"); -> ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/4-java-sql-Driver.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/4-java-sql-Driver.md deleted file mode 100644 index cb998154..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/4-java-sql-Driver.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: java.sql.Driver -summary: java.sql.Driver -author: Guo Huan -date: 2021-05-17 ---- - -# java.sql.Driver - -This section describes **java.sql.Driver**, the database driver interface. - -**Table 1** Support status for java.sql.Driver - -| Method Name | Return Type | JDBC 4 Is Supported Or Not | -| :------------------------------------------- | :------------------- | :------------------------- | -| acceptsURL(String url) | Boolean | Yes | -| connect(String url, Properties info) | Connection | Yes | -| jdbcCompliant() | Boolean | Yes | -| getMajorVersion() | int | Yes | -| getMinorVersion() | int | Yes | -| getParentLogger() | Logger | Yes | -| getPropertyInfo(String url, Properties info) | DriverPropertyInfo[] | Yes | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/5-java-sql-PreparedStatement.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/5-java-sql-PreparedStatement.md deleted file mode 100644 index 50091489..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/5-java-sql-PreparedStatement.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: java.sql.PreparedStatement -summary: java.sql.PreparedStatement -author: Guo Huan -date: 2021-05-17 ---- - -# java.sql.PreparedStatement - -This section describes **java.sql.PreparedStatement**, the interface for preparing statements. - -**Table 1** Support status for java.sql.PreparedStatement - -| Method Name | Return Type | JDBC 4 Is Supported Or Not | -| :----------------------------------------------------------- | :---------------- | :------------------------- | -| clearParameters() | void | Yes | -| execute() | Boolean | Yes | -| executeQuery() | ResultSet | Yes | -| excuteUpdate() | int | Yes | -| executeLargeUpdate() | long | No | -| getMetaData() | ResultSetMetaData | Yes | -| getParameterMetaData() | ParameterMetaData | Yes | -| setArray(int parameterIndex, Array x) | void | Yes | -| setAsciiStream(int parameterIndex, InputStream x, int length) | void | Yes | -| setBinaryStream(int parameterIndex, InputStream x) | void | Yes | -| setBinaryStream(int parameterIndex, InputStream x, int length) | void | Yes | -| setBinaryStream(int parameterIndex, InputStream x, long length) | void | Yes | -| setBlob(int parameterIndex, InputStream inputStream) | void | Yes | -| setBlob(int parameterIndex, InputStream inputStream, long length) | void | Yes | -| setBlob(int parameterIndex, Blob x) | void | Yes | -| setCharacterStream(int parameterIndex, Reader reader) | void | Yes | -| setCharacterStream(int parameterIndex, Reader reader, int length) | void | Yes | -| setClob(int parameterIndex, Reader reader) | void | Yes | -| setClob(int parameterIndex, Reader reader, long length) | void | Yes | -| setClob(int parameterIndex, Clob x) | void | Yes | -| setDate(int parameterIndex, Date x, Calendar cal) | void | Yes | -| setNull(int parameterIndex, int sqlType) | void | Yes | -| setNull(int parameterIndex, int sqlType, String typeName) | void | Yes | -| setObject(int parameterIndex, Object x) | void | Yes | -| setObject(int parameterIndex, Object x, int targetSqlType) | void | Yes | -| setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) | void | Yes | -| setSQLXML(int parameterIndex, SQLXML xmlObject) | void | Yes | -| setTime(int parameterIndex, Time x) | void | Yes | -| setTime(int parameterIndex, Time x, Calendar cal) | void | Yes | -| setTimestamp(int parameterIndex, Timestamp x) | void | Yes | -| setTimestamp(int parameterIndex, Timestamp x, Calendar cal) | void | Yes | -| setUnicodeStream(int parameterIndex, InputStream x, int length) | void | Yes | -| setURL(int parameterIndex, URL x) | void | Yes | -| setBoolean(int parameterIndex, boolean x) | void | Yes | -| setBigDecimal(int parameterIndex, BigDecimal x) | void | Yes | -| setByte(int parameterIndex, byte x) | void | Yes | -| setBytes(int parameterIndex, byte[] x) | void | Yes | -| setDate(int parameterIndex, Date x) | void | Yes | -| setDouble(int parameterIndex, double x) | void | Yes | -| setFloat(int parameterIndex, float x) | void | Yes | -| setInt(int parameterIndex, int x) | void | Yes | -| setLong(int parameterIndex, long x) | void | Yes | -| setShort(int parameterIndex, short x) | void | Yes | -| setString(int parameterIndex, String x) | void | Yes | -| setNString(int parameterIndex, String x) | void | Yes | -| addBatch() | void | Yes | -| executeBatch() | int[] | Yes | - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - Execute addBatch() and execute() only after running clearBatch(). -> - Batch is not cleared by calling executeBatch(). Clear batch by explicitly calling clearBatch(). -> - After bounded variables of a batch are added, if you want to reuse these values (add a batch again), set*() is not necessary. -> - The following methods are inherited from java.sql.Statement: close, execute, executeQuery, executeUpdate, getConnection, getResultSet, getUpdateCount, isClosed, setMaxRows, and setFetchSize. -> - The **executeLargeUpdate()** method can only be used in JDBC 4.2 or later. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/6-java-sql-ResultSet.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/6-java-sql-ResultSet.md deleted file mode 100644 index 390cb97b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/6-java-sql-ResultSet.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: java.sql.ResultSet -summary: java.sql.ResultSet -author: Guo Huan -date: 2021-05-17 ---- - -# java.sql.ResultSet - -This section describes **java.sql.ResultSet**, the interface for execution result sets. - -**Table 1** Support status for java.sql.ResultSet - -| Method Name | Return Type | JDBC 4 Is Supported Or Not | -| :----------------------------------------------------------- | :---------------- | :------------------------- | -| absolute(int row) | Boolean | Yes | -| afterLast() | void | Yes | -| beforeFirst() | void | Yes | -| cancelRowUpdates() | void | Yes | -| clearWarnings() | void | Yes | -| close() | void | Yes | -| deleteRow() | void | Yes | -| findColumn(String columnLabel) | int | Yes | -| first() | Boolean | Yes | -| getArray(int columnIndex) | Array | Yes | -| getArray(String columnLabel) | Array | Yes | -| getAsciiStream(int columnIndex) | InputStream | Yes | -| getAsciiStream(String columnLabel) | InputStream | Yes | -| getBigDecimal(int columnIndex) | BigDecimal | Yes | -| getBigDecimal(String columnLabel) | BigDecimal | Yes | -| getBinaryStream(int columnIndex) | InputStream | Yes | -| getBinaryStream(String columnLabel) | InputStream | Yes | -| getBlob(int columnIndex) | Blob | Yes | -| getBlob(String columnLabel) | Blob | Yes | -| getBoolean(int columnIndex) | Boolean | Yes | -| getBoolean(String columnLabel) | Boolean | Yes | -| getByte(int columnIndex) | byte | Yes | -| getBytes(int columnIndex) | byte[] | Yes | -| getByte(String columnLabel) | byte | Yes | -| getBytes(String columnLabel) | byte[] | Yes | -| getCharacterStream(int columnIndex) | Reader | Yes | -| getCharacterStream(String columnLabel) | Reader | Yes | -| getClob(int columnIndex) | Clob | Yes | -| getClob(String columnLabel) | Clob | Yes | -| getConcurrency() | int | Yes | -| getCursorName() | String | Yes | -| getDate(int columnIndex) | Date | Yes | -| getDate(int columnIndex, Calendar cal) | Date | Yes | -| getDate(String columnLabel) | Date | Yes | -| getDate(String columnLabel, Calendar cal) | Date | Yes | -| getDouble(int columnIndex) | double | Yes | -| getDouble(String columnLabel) | double | Yes | -| getFetchDirection() | int | Yes | -| getFetchSize() | int | Yes | -| getFloat(int columnIndex) | float | Yes | -| getFloat(String columnLabel) | float | Yes | -| getInt(int columnIndex) | int | Yes | -| getInt(String columnLabel) | int | Yes | -| getLong(int columnIndex) | long | Yes | -| getLong(String columnLabel) | long | Yes | -| getMetaData() | ResultSetMetaData | Yes | -| getObject(int columnIndex) | Object | Yes | -| getObject(int columnIndex, Class<T> type) | <T> T | Yes | -| getObject(int columnIndex, Map<String,Class<?>> map) | Object | Yes | -| getObject(String columnLabel) | Object | Yes | -| getObject(String columnLabel, Class<T> type) | <T> T | Yes | -| getObject(String columnLabel, Map<String,Class<?>> map) | Object | Yes | -| getRow() | int | Yes | -| getShort(int columnIndex) | short | Yes | -| getShort(String columnLabel) | short | Yes | -| getSQLXML(int columnIndex) | SQLXML | Yes | -| getSQLXML(String columnLabel) | SQLXML | Yes | -| getStatement() | Statement | Yes | -| getString(int columnIndex) | String | Yes | -| getString(String columnLabel) | String | Yes | -| getNString(int columnIndex) | String | Yes | -| getNString(String columnLabel) | String | Yes | -| getTime(int columnIndex) | Time | Yes | -| getTime(int columnIndex, Calendar cal) | Time | Yes | -| getTime(String columnLabel) | Time | Yes | -| getTime(String columnLabel, Calendar cal) | Time | Yes | -| getTimestamp(int columnIndex) | Timestamp | Yes | -| getTimestamp(int columnIndex, Calendar cal) | Timestamp | Yes | -| getTimestamp(String columnLabel) | Timestamp | Yes | -| getTimestamp(String columnLabel, Calendar cal) | Timestamp | Yes | -| getType() | int | Yes | -| getWarnings() | SQLWarning | Yes | -| insertRow() | void | Yes | -| isAfterLast() | Boolean | Yes | -| isBeforeFirst() | Boolean | Yes | -| isClosed() | Boolean | Yes | -| isFirst() | Boolean | Yes | -| isLast() | Boolean | Yes | -| last() | Boolean | Yes | -| moveToCurrentRow() | void | Yes | -| moveToInsertRow() | void | Yes | -| next() | Boolean | Yes | -| previous() | Boolean | Yes | -| refreshRow() | void | Yes | -| relative(int rows) | Boolean | Yes | -| rowDeleted() | Boolean | Yes | -| rowInserted() | Boolean | Yes | -| rowUpdated() | Boolean | Yes | -| setFetchDirection(int direction) | void | Yes | -| setFetchSize(int rows) | void | Yes | -| updateArray(int columnIndex, Array x) | void | Yes | -| updateArray(String columnLabel, Array x) | void | Yes | -| updateAsciiStream(int columnIndex, InputStream x, int length) | void | Yes | -| updateAsciiStream(String columnLabel, InputStream x, int length) | void | Yes | -| updateBigDecimal(int columnIndex, BigDecimal x) | void | Yes | -| updateBigDecimal(String columnLabel, BigDecimal x) | void | Yes | -| updateBinaryStream(int columnIndex, InputStream x, int length) | void | Yes | -| updateBinaryStream(String columnLabel, InputStream x, int length) | void | Yes | -| updateBoolean(int columnIndex, boolean x) | void | Yes | -| updateBoolean(String columnLabel, boolean x) | void | Yes | -| updateByte(int columnIndex, byte x) | void | Yes | -| updateByte(String columnLabel, byte x) | void | Yes | -| updateBytes(int columnIndex, byte[] x) | void | Yes | -| updateBytes(String columnLabel, byte[] x) | void | Yes | -| updateCharacterStream(int columnIndex, Reader x, int length) | void | Yes | -| updateCharacterStream(String columnLabel, Reader reader, int length) | void | Yes | -| updateDate(int columnIndex, Date x) | void | Yes | -| updateDate(String columnLabel, Date x) | void | Yes | -| updateDouble(int columnIndex, double x) | void | Yes | -| updateDouble(String columnLabel, double x) | void | Yes | -| updateFloat(int columnIndex, float x) | void | Yes | -| updateFloat(String columnLabel, float x) | void | Yes | -| updateInt(int columnIndex, int x) | void | Yes | -| updateInt(String columnLabel, int x) | void | Yes | -| updateLong(int columnIndex, long x) | void | Yes | -| updateLong(String columnLabel, long x) | void | Yes | -| updateNull(int columnIndex) | void | Yes | -| updateNull(String columnLabel) | void | Yes | -| updateObject(int columnIndex, Object x) | void | Yes | -| updateObject(int columnIndex, Object x, int scaleOrLength) | void | Yes | -| updateObject(String columnLabel, Object x) | void | Yes | -| updateObject(String columnLabel, Object x, int scaleOrLength) | void | Yes | -| updateRow() | void | Yes | -| updateShort(int columnIndex, short x) | void | Yes | -| updateShort(String columnLabel, short x) | void | Yes | -| updateSQLXML(int columnIndex, SQLXML xmlObject) | void | Yes | -| updateSQLXML(String columnLabel, SQLXML xmlObject) | void | Yes | -| updateString(int columnIndex, String x) | void | Yes | -| updateString(String columnLabel, String x) | void | Yes | -| updateTime(int columnIndex, Time x) | void | Yes | -| updateTime(String columnLabel, Time x) | void | Yes | -| updateTimestamp(int columnIndex, Timestamp x) | void | Yes | -| updateTimestamp(String columnLabel, Timestamp x) | void | Yes | -| wasNull() | Boolean | Yes | - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - One Statement cannot have multiple open ResultSets. -> - The cursor that is used for traversing the ResultSet cannot be open after being committed. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/7-java-sql-ResultSetMetaData.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/7-java-sql-ResultSetMetaData.md deleted file mode 100644 index 94fd5575..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/7-java-sql-ResultSetMetaData.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: java.sql.ResultSetMetaData -summary: java.sql.ResultSetMetaData -author: Guo Huan -date: 2021-05-17 ---- - -# java.sql.ResultSetMetaData - -This section describes **java.sql.ResultSetMetaData**, which provides details about ResultSet object information. - -**Table 1** Support status for java.sql.ResultSetMetaData - -| Method Name | Return Type | JDBC 4 Is Supported Or Not | -| :------------------------------- | :---------- | :------------------------- | -| getCatalogName(int column) | String | Yes | -| getColumnClassName(int column) | String | Yes | -| getColumnCount() | int | Yes | -| getColumnDisplaySize(int column) | int | Yes | -| getColumnLabel(int column) | String | Yes | -| getColumnName(int column) | String | Yes | -| getColumnType(int column) | int | Yes | -| getColumnTypeName(int column) | String | Yes | -| getPrecision(int column) | int | Yes | -| getScale(int column) | int | Yes | -| getSchemaName(int column) | String | Yes | -| getTableName(int column) | String | Yes | -| isAutoIncrement(int column) | boolean | Yes | -| isCaseSensitive(int column) | boolean | Yes | -| isCurrency(int column) | boolean | Yes | -| isDefinitelyWritable(int column) | boolean | Yes | -| isNullable(int column) | int | Yes | -| isReadOnly(int column) | boolean | Yes | -| isSearchable(int column) | boolean | Yes | -| isSigned(int column) | boolean | Yes | -| isWritable(int column) | boolean | Yes | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/8-java-sql-Statement.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/8-java-sql-Statement.md deleted file mode 100644 index d3333ee2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/8-java-sql-Statement.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: java.sql.Statement -summary: java.sql.Statement -author: Guo Huan -date: 2021-05-17 ---- - -# java.sql.Statement - -This section describes **java.sql.Statement**, the interface for executing SQL statements. - -**Table 1** Support status for java.sql.Statement - -| Method Name | Return Type | JDBC 4 Is Supported Or Not | -| :---------------------------------------------------- | :---------- | :------------------------- | -| addBatch(String sql) | void | Yes | -| clearBatch() | void | Yes | -| clearWarnings() | void | Yes | -| close() | void | Yes | -| closeOnCompletion() | void | Yes | -| execute(String sql) | Boolean | Yes | -| execute(String sql, int autoGeneratedKeys) | Boolean | Yes | -| execute(String sql, int[] columnIndexes) | Boolean | Yes | -| execute(String sql, String[] columnNames) | Boolean | Yes | -| executeBatch() | Boolean | Yes | -| executeQuery(String sql) | ResultSet | Yes | -| executeUpdate(String sql) | int | Yes | -| executeUpdate(String sql, int autoGeneratedKeys) | int | Yes | -| executeUpdate(String sql, int[] columnIndexes) | int | Yes | -| executeUpdate(String sql, String[] columnNames) | int | Yes | -| getConnection() | Connection | Yes | -| getFetchDirection() | int | Yes | -| getFetchSize() | int | Yes | -| getGeneratedKeys() | ResultSet | Yes | -| getMaxFieldSize() | int | Yes | -| getMaxRows() | int | Yes | -| getMoreResults() | boolean | Yes | -| getMoreResults(int current) | boolean | Yes | -| getResultSet() | ResultSet | Yes | -| getResultSetConcurrency() | int | Yes | -| getResultSetHoldability() | int | Yes | -| getResultSetType() | int | Yes | -| getQueryTimeout() | int | Yes | -| getUpdateCount() | int | Yes | -| getWarnings() | SQLWarning | Yes | -| isClosed() | Boolean | Yes | -| isCloseOnCompletion() | Boolean | Yes | -| isPoolable() | Boolean | Yes | -| setCursorName(String name) | void | Yes | -| setEscapeProcessing(boolean enable) | void | Yes | -| setFetchDirection(int direction) | void | Yes | -| setMaxFieldSize(int max) | void | Yes | -| setMaxRows(int max) | void | Yes | -| setPoolable(boolean poolable) | void | Yes | -| setQueryTimeout(int seconds) | void | Yes | -| setFetchSize(int rows) | void | Yes | -| cancel() | void | Yes | -| executeLargeUpdate(String sql) | long | No | -| getLargeUpdateCount() | long | No | -| executeLargeBatch() | long | No | -| executeLargeUpdate(String sql, int autoGeneratedKeys) | long | No | -| executeLargeUpdate(String sql, int[] columnIndexes) | long | No | -| executeLargeUpdate(String sql, String[] columnNames) | long | No | - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - Using setFetchSize can reduce the memory occupied by result sets on the client. Result sets are packaged into cursors and segmented for processing, which will increase the communication traffic between the database and the client, affecting performance. -> - Database cursors are valid only within their transactions. If **setFetchSize** is set, set **setAutoCommit(false)** and commit transactions on the connection to flush service data to a database. -> - **LargeUpdate** methods can only be used in JDBC 4.2 or later. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/9-javax-sql-ConnectionPoolDataSource.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/9-javax-sql-ConnectionPoolDataSource.md deleted file mode 100644 index 73d1518f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/9-javax-sql-ConnectionPoolDataSource.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: javax.sql.ConnectionPoolDataSource -summary: javax.sql.ConnectionPoolDataSource -author: Guo Huan -date: 2021-05-17 ---- - -# javax.sql.ConnectionPoolDataSource - -This section describes **javax.sql.ConnectionPoolDataSource**, the interface for data source connection pools. - -**Table 1** Support status for javax.sql.ConnectionPoolDataSource - -| Method Name | Return Type | JDBC 4 Is Supported Or Not | -| :----------------------------------------------- | :--------------- | :------------------------- | -| getPooledConnection() | PooledConnection | Yes | -| getPooledConnection(String user,String password) | PooledConnection | Yes | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/jdbc-interface-reference.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/jdbc-interface-reference.md deleted file mode 100644 index bc22d556..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/15-JDBC/jdbc-interface-reference.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: JDBC Interface Reference -summary: JDBC Interface Reference -author: Guo Huan -date: 2023-05-18 ---- - -# JDBC Interface Reference - -This section describes common JDBC interfaces. For more interfaces, check JDK1.8 (software package) and JDBC 4.0. - -- **[java.sql.Connection](1-java-sql-Connection.md)** -- **[java.sql.CallableStatement](2-java-sql-CallableStatement.md)** -- **[java.sql.DatabaseMetaData](3-java-sql-DatabaseMetaData.md)** -- **[java.sql.Driver](4-java-sql-Driver.md)** -- **[java.sql.PreparedStatement](5-java-sql-PreparedStatement.md)** -- **[java.sql.ResultSet](6-java-sql-ResultSet.md)** -- **[java.sql.ResultSetMetaData](7-java-sql-ResultSetMetaData.md)** -- **[java.sql.Statement](8-java-sql-Statement.md)** -- **[javax.sql.ConnectionPoolDataSource](9-javax-sql-ConnectionPoolDataSource.md)** -- **[javax.sql.DataSource](10-javax-sql-DataSource.md)** -- **[javax.sql.PooledConnection](11-javax-sql-PooledConnection.md)** -- **[javax.naming.Context](12-javax-naming-Context.md)** -- **[javax.naming.spi.InitialContextFactory](13-javax-naming-spi-InitialContextFactory.md)** -- **[CopyManager](14-CopyManager.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/2-jdbc-package-driver-class-and-environment-class.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/2-jdbc-package-driver-class-and-environment-class.md deleted file mode 100644 index 2ed78dcf..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/2-jdbc-package-driver-class-and-environment-class.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: JDBC Package, Driver Class, and Environment Class -summary: JDBC Package, Driver Class, and Environment Class -author: Guo Huan -date: 2021-04-26 ---- - -# JDBC Package, Driver Class, and Environment Class - -**JDBC Package** - -Run **build.sh** in the source code directory on Linux OS to obtain the driver JAR package **postgresql.jar**, which is stored in the source code directory. Obtain the package from the release package named [**openGauss-x.x.x-JDBC.tar.gz**](https://opengauss.org/en/download/). - -The driver package is compatible with PostgreSQL. The class name and structure in the driver are the same as those in the PostgreSQL driver. All applications running on PostgreSQL can be smoothly migrated to the current system. - -**Driver Class** - -Before establishing a database connection, load the **org.opengauss.Driver** database driver class. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - MogDB is compatible with PostgreSQL in the use of JDBC. Therefore, the org.postgresql.Driver class will conflict when the JDBC driver packages for PostgreSQL and openGauss are loaded in the same JVM virtual machine at the same time. -> -> - Compared with the PostgreSQL driver, the openGauss JDBC driver has the following enhanced features: -> - The SHA256 encryption mode is supported for login. -> - The third-party log framework that implements the sf4j API can be connected. -> - DR failover is supported. - -**Environment Class** - -JDK 1.8 must be configured on the client. The configuration method is as follows: - -1. In the MS-DOS window, run **java -version** to check the JDK version. Ensure that the version is JDK 1.8. If JDK is not installed, download the installation package from the official website and install it. If the system environment JDK version is lower than 1.8, please refer to [Use WebSphere to Configure MogDB Data Sources](../../../quick-start/mogdb-access/use-middleware-to-access-mogdb/websphere-configures-mogdb-data-source-reference.md). - -2. Configure system environment variables. - - 1. Right-click **My computer** and choose **Properties**. - - 2. In the navigation pane, choose **Advanced system settings**. - - 3. In the **System Properties** dialog box, click **Environment Variables** on the **Advanced** tab page. - - 4. In the **System variables** area of the **Environment Variables** dialog box, click **New** or **Edit** to configure system variables. For details, see [Table 1](#Description). - - **Table 1** Description - - | Variable | Operation | Variable Value | - | :-------- | :----------------------------------------------------------- | :----------------------------------------------------------- | - | JAVA_HOME | - If the variable exists, click **Edit**.
- If the variable does not exist, click **New**. | Specifies the Java installation directory.
Example: C:\Program Files\Java\jdk1.8.0_131 | - | Path | Edit | - If JAVA_HOME is configured, add **%JAVA_HOME%\bin** before the variable value.
- If JAVA_HOME is not configured, add the full Java installation path before the variable value:
C:\Program Files\Java\jdk1.8.0_131\bin; | - | CLASSPATH | New | .;%JAVA_HOME%\lib;%JAVA_H | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/3-development-process.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/3-development-process.md deleted file mode 100644 index e73100d5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/3-development-process.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Development Process -summary: Development Process -author: Guo Huan -date: 2021-04-26 ---- - -# Development Process - -**Figure 1** Application development process based on JDBC - -![application-development-process-based-on-jdbc](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/development-process-2.png) diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/4-loading-the-driver.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/4-loading-the-driver.md deleted file mode 100644 index de6094ed..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/4-loading-the-driver.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Loading the Driver -summary: Loading the Driver -author: Guo Huan -date: 2021-04-26 ---- - -# Loading the Driver - -Load the database driver before creating a database connection. - -You can load the driver in the following ways: - -- Before creating a connection, implicitly load the driver in the code:**Class.forName("org.opengauss.Driver")** - -- During the JVM startup, transfer the driver as a parameter to the JVM:**java -Djdbc.drivers=org.opengauss.Driver jdbctest** - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > **jdbctest** is the name of a test application. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/5-connecting-to-a-database.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/5-connecting-to-a-database.md deleted file mode 100644 index 6d645edd..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/5-connecting-to-a-database.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Connecting to a Database -summary: Connecting to a Database -author: Guo Huan -date: 2021-04-26 ---- - -# Connecting to a Database - -After a database is connected, you can use JDBC to run SQL statements to operate data. - -**Function Prototype** - -JDBC provides the following three database connection methods: - -- DriverManager.getConnection(String url); -- DriverManager.getConnection(String url, Properties info); -- DriverManager.getConnection(String url, String user, String password); - -**Parameters** - -**Table 1** Database connection parameters - -| Parameter | Description | -| --------- | ------------------------------------------------------------ | -| url | **postgresql.jar** database connection descriptor. The format is as follows:
- `jdbc:opengauss:database`
- `jdbc:opengauss://host/database`
- `jdbc:opengauss://host:port/database`
- `jdbc:opengauss://host:port/database?param1=value1¶m2=value2`
- `jdbc:opengauss://host1:port1,host2:port2/database?param1=value1¶m2=value2`
NOTE:
- **database** indicates the name of the database to connect.
- **host** indicates the name or IP address of the database server.
If a machine connected to MogDB is not in the same network segment as MogDB, the IP address specified by **host** should be the value of **coo.cooListenIp2** (application access IP address) set in Manager.
For security purposes, the primary database node forbids access from other nodes in MogDB without authentication. To access the primary database node from inside MogDB, deploy the JDBC program on the host where the primary database node is located and set **host** to **127.0.0.1**. Otherwise, the error message "FATAL: Forbid remote connection with trust method!" may be displayed.
It is recommended that the service system be deployed outside MogDB. If it is deployed inside, database performance may be affected.
By default, the local host is used to connect to the server.
- **port** indicates the port number of the database server.
By default, the database on port 5432 of the local host is connected.
If the thread pool is enabled, the connection port must be the HA port number. The default value is the value of port plus 1.
- **param** indicates a database connection attribute.
The parameter can be configured in the URL. The URL starts with a question mark (?), uses an equal sign (=) to assign a value to the parameter, and uses an ampersand (&) to separate parameters. You can also use the attributes of the **info** object for configuration. For details, see the example below.
- **value** indicates the database connection attribute values.
The **connectTimeout** and **socketTimeout** parameters must be set for connection. If they are not set, the default value **0** is used, indicating that the connection will not time out. When the network between the DN and client is faulty, the client does not receive the ACK packet from the DN. In this case, the client starts the timeout retransmission mechanism to continuously retransmit packets. A timeout error is reported only when the timeout interval reaches the default value **600s**. As a result, the RTO is high. | -| info | Database connection attributes (all attributes are case sensitive). Common attributes are described as follows:
- **PGDBNAME**: string type. This parameter specifies the database name. (This parameter does not need to be set in the URL. The system automatically parses the URL to obtain its value.)
- **PGHOST**: string type. This parameter specifies the host IP address. For details, see the example below.
- **PGPORT**: integer type. This parameter specifies the host port number. For details, see the example below.
- **user**: string type. This parameter specifies the database user who creates the connection.
- **password**: string type. This parameter specifies the password of the database user.
- **enable_ce**: string type. If **enable_ce** is set to **1**, JDBC supports encrypted equality query.
- **loggerLevel**: string type. The following log levels are supported:**OFF**, **DEBUG**, and **TRACE**. The value **OFF** indicates that the log function is disabled. **DEBUG** and **TRACE** logs record information of different levels.
- **loggerFile**: string type. This parameter specifies the name of a log file. You can specify a directory for storing logs. If no directory is specified, logs are stored in the directory where the client program is running.
- **allowEncodingChanges**: Boolean type. If this parameter is set to **true**, the character set type can be changed. This parameter is used together with **characterEncoding=CHARSET** to set the character set. The two parameters are separated by ampersands (&). The value of **characterEncoding** can be **UTF8**, **GBK**, or **LATIN1**.
- **currentSchema**: string type. This parameter specifies the schema to be set in **search-path**.
- **hostRecheckSeconds**: integer type. After JDBC attempts to connect to a host, the host status is saved: connection success or connection failure. This status is trusted within the duration specified by **hostRecheckSeconds**. After the duration expires, the status becomes invalid. The default value is 10 seconds.
- **ssl**: Boolean type. This parameter specifies a connection in SSL mode. When **ssl** is set to **true**, the NonValidatingFactory channel and certificate mode are supported.
1. For the NonValidatingFactory channel, configure the username and password and set **SSL** to **true**.
2. In certification mode, configure the client certificate, key, and root certificate, and set **SSL** to **true**.
- **sslmode**: string type. This parameter specifies the SSL authentication mode. The value can be **require**, **verify-ca**, or **verify-full**.
- **require**: The system attempts to set up an SSL connection. If there is a CA file, the system performs verification as if the parameter was set to **verify-ca**.
- **verify-ca**: The system attempts to set up an SSL connection and checks whether the server certificate is issued by a trusted CA.
- **verify-full**: The system attempts to set up an SSL connection, checks whether the server certificate is issued by a trusted CA, and checks whether the host name of the server is the same as that in the certificate.
- **sslcert**: string type. This parameter specifies the complete path of the certificate file. The type of the client and server certificates is **End Entity**.
- **sslkey**: string type. This parameter specifies the complete path of the key file. You must run the following command to convert the client certificate to the DER format:
`openssl pkcs8 -topk8 -outform DER -in client.key -out client.key.pk8 -nocrypt`
- **sslrootcert**: string type. This parameter specifies the name of the SSL root certificate. The root certificate type is CA.
- **sslpassword**: string type. This parameter is provided for ConsoleCallbackHandler.
- **sslpasswordcallback**: string type. This parameter specifies the class name of the SSL password provider. The default value is **org.opengauss.ssl.jdbc4.LibPQFactory.ConsoleCallbackHandler**.
- **sslfactory**: string type. This parameter specifies the class name used by SSLSocketFactory to establish an SSL connection.
- **sslfactoryarg**: string type. The value is an optional parameter of the constructor function of the **sslfactory** class and is not recommended.
- **sslhostnameverifier**: string type. This parameter specifies the class name of the host name verifier. The interface must implement javax.net.ssl.HostnameVerifier. The default value is **org.opengauss.ssl.PGjdbcHostnameVerifier**.
- **loginTimeout**: integer type. This parameter specifies the waiting time for establishing the database connection, in seconds.**connectTimeout**: integer type. This parameter specifies the timeout duration for connecting to a server, in seconds. If the time taken to connect to a server exceeds the value specified, the connection is interrupted. If the value is **0**, the timeout mechanism is disabled.
- **socketTimeout**: integer type. This parameter specifies the timeout duration for a socket read operation, in seconds. If the time taken to read data from a server exceeds the value specified, the connection is closed. If the value is **0**, the timeout mechanism is disabled.
- **cancelSignalTimeout**: integer type. Cancel messages may cause a block. This parameter controls **connectTimeout** and **socketTimeout** in a cancel message, in seconds. The default value is 10 seconds.
- **tcpKeepAlive**: Boolean type. This parameter is used to enable or disable TCP keepalive detection. The default value is **false**.
- **logUnclosedConnections**: Boolean type. The client may leak a connection object because it does not call the connection object's close() method. These objects will be collected as garbage and finalized using the finalize() method. If the caller ignores this operation, this method closes the connection.
- **assumeMinServerVersion**: string type. The client sends a request to set a floating point. This parameter specifies the version of the server to connect, for example, **assumeMinServerVersion=9.0**. This parameter can reduce the number of packets to send during connection setup.
- **ApplicationName**: string type. This parameter specifies the name of the JDBC driver that is being connected. You can query the **pg_stat_activity** table on the primary database node to view information about the client that is being connected. The JDBC driver name is displayed in the **application_name** column. The default value is **PostgreSQL JDBC Driver**.
- **connectionExtraInfo**: Boolean type. This parameter specifies whether the JDBC driver reports the driver deployment path and process owner to the database.The value can be **true** or **false**. The default value is **false**. If **connectionExtraInfo** is set to **true**, the JDBC driver reports the driver deployment path and process owner, and URL connection configuration information to the database and displays the information in the **connection_info** parameter. In this case, you can query the information from **PG_STAT_ACTIVITY**.
- **autosave**: string type. The value can be **always**, **never**, or **conservative**. The default value is **never**. This parameter specifies the action that the driver should perform upon a query failure. If **autosave** is set to **always**, the JDBC driver sets a savepoint before each query and rolls back to the savepoint if the query fails. If **autosave** is set to **never**, there is no savepoint. If **autosave** is set to **conservative**, a savepoint is set for each query. However, the system rolls back and retries only when there is an invalid statement.
- **protocolVersion**: integer type. This parameter specifies the connection protocol version. Only version 3 is supported. Note: MD5 encryption is used when this parameter is set. You must use the following command to change the database encryption mode:**gs_guc set -N all -I all -c "password_encryption_type=1"**. After MogDB is restarted, create a user that uses MD5 encryption to encrypt passwords. You must also change the client connection mode to **md5** in the **pg_hba.conf** file. Log in as the new user (not recommended).
NOTE:
The MD5 encryption algorithm has lower security and poses security risks. Therefore, you are advised to use a more secure encryption algorithm.
- **prepareThreshold**: integer type. This parameter specifies the time when the parse statement is sent. The default value is **5**. It takes a long time to parse an SQL statement for the first time, but a short time to parse SQL statements later because of cache. If a session runs an SQL statement multiple consecutive times and the number of execution times exceeds the value of **prepareThreshold**, JDBC does not send the parse command to the SQL statement.
- **preparedStatementCacheQueries**: integer type. This parameter specifies the number of queries cached in each connection. The default value is **256**. If more than 256 different queries are used in the prepareStatement() call, the least recently used query cache will be discarded. The value **0** indicates that the cache function is disabled.
- **preparedStatementCacheSizeMiB**: integer type. This parameter specifies the maximum cache size of each connection, in MB. The default value is **5**. If the size of the cached queries exceeds 5 MB, the least recently used query cache will be discarded. The value **0** indicates that the cache function is disabled.
- **databaseMetadataCacheFields**: integer type. The default value is **65536**. This parameter specifies the maximum cache size of each connection. The value **0** indicates that the cache function is disabled.
- **databaseMetadataCacheFieldsMiB**: integer type. The default value is **5**. This parameter specifies the maximum cache size of each connection, in MB. The value **0** indicates that the cache function is disabled.
- **stringtype**: string type. The value can be **false**, **unspecified**, or **varchar**. The default value is **varchar**. This parameter specifies the type of the **PreparedStatement** parameter used by the setString() method. If **stringtype** is set to **varchar**, these parameters are sent to the server as varchar parameters. If **stringtype** is set to **unspecified**, these parameters are sent to the server as an untyped value, and the server attempts to infer their appropriate type.
- **batchMode**: Boolean type. This parameter specifies whether to connect the database in batch mode. The default value is **on**, indicating that the batch mode is enabled.
- **fetchsize**: integer type. This parameter specifies the default fetchsize for statements in the created connection. The default value is **0**, indicating that all results are obtained at a time.
- **reWriteBatchedInserts**: Boolean type. During batch import, set this parameter to **true** to combine **N** insertion statements into one: insert into TABLE_NAME values(values1, …, valuesN), …, (values1, …, valuesN). To use this parameter, set **batchMode** to **off**.
- **unknownLength**: integer type. The default value is **Integer.MAX\_VALUE**. This parameter specifies the length of the unknown length type when the data of some postgresql types (such as TEXT) is returned by functions such as ResultSetMetaData.getColumnDisplaySize and ResultSetMetaData.getPrecision.
- **defaultRowFetchSize**: integer type. This parameter specifies the number of rows read by fetch in ResultSet at a time. Limiting the number of rows read each time in a database access request can avoid unnecessary memory consumption, thereby avoiding out of memory exception. The default value is **0**, indicating that all rows are obtained at a time in ResultSet. There is no negative value.
- **binaryTransfer**: Boolean type. This parameter specifies whether data is sent and received in binary format. The default value is **false**.
- **binaryTransferEnable**: string type. This parameter specifies the type for which binary transmission is enabled. Every two types are separated by commas (,). You can select either the OID or name, for example, binaryTransferEnable=Integer4_ARRAY,Integer8_ARRAY.
For example, if the OID name is **BLOB** and the OID number is 88, you can configure the OID as follows:
**binaryTransferEnable=BLOB or binaryTransferEnable=88**
- **binaryTransferDisEnable**: string type. This parameter specifies the type for which binary transmission is disabled. Every two types are separated by commas (,). You can select either the OID or name. The value of this parameter overwrites the value of **binaryTransferEnable**.
- **blobMode**: string type. This parameter sets the setBinaryStream method to assign values to different types of data. The value **on** indicates that values are assigned to blob data. The value **off** indicates that values are assigned to bytea data. The default value is **on**.
- **socketFactory**: string type. This parameter specifies the name of the class used to create a socket connection with the server. This class must implement the **javax.net.SocketFactory** interface and define a constructor with no parameter or a single string parameter.
- **socketFactoryArg**: string type. The value is an optional parameter of the constructor function of the socketFactory class and is not recommended.
- **receiveBufferSize**: integer type. This parameter is used to set **SO\_RCVBUF** on the connection stream.
- **sendBufferSize**: integer type. This parameter is used to set **SO\_SNDBUF** on the connection stream.
- **preferQueryMode**: string type. The value can be **extended**, **extendedForPrepared**, **extendedCacheEverything**, or **simple**. This parameter specifies the query mode. In **simple** mode, the query is executed without parsing or binding. In **extended** mode, the query is executed and bound. The **extendedForPrepared** mode is used for prepared statement extension. In **extendedCacheEverything** mode, each statement is cached.
- **targetServerType**: string type. The difference between primary DN and standby DN is whether the DN allows the write operation in the URL connection string. The default value is **any**. The value can be **any**, **master**, **slave**, or **preferSlave**.
- **master**: attempts to connect to a primary DN in the URL connection string. If the primary DN cannot be found, an exception is thrown.
- **slave**: attempts to connect to a standby DN in the URL connection string. If the primary DN cannot be found, an exception is thrown.
- **preferSlave** attempts to connect to a standby DN (if available) in the URL connection string. Otherwise, it connects to the primary DN.
- **any** attempts to connect to any DN in the URL connection string.
- **priorityServers**: integer type. This value is used to specify the first **n** nodes configured in the URL as the primary database instance to be connected preferentially. The default value is **null**. The value is a number greater than 0 and less than the number of DNs configured in the URL.
For example, `jdbc:opengauss://host1:port1,host2:port2,host3:port3,host4:port4,/database?priorityServers=2`. That is, **host1** and **host2** are primary database instance nodes, and **host3** and **host4** are DR database instance nodes.
- **forceTargetServerSlave**: Boolean type. This parameter specifies whether to enable the function of forcibly connecting to the standby node and forbid the existing connections to be used on the standby node that is promoted to primary during the primary/standby switchover of the database instance. The default value is **false**, indicating that the function of forcibly connecting to the standby node is disabled. **true**: The function of forcibly connecting to the standby node is enabled. | -| user | Database user. | -| password | Password of the database user. | - -**Examples** - -```java -// The following code encapsulates database connection operations into an interface. The database can then be connected using an authorized username and a password. -public static Connection getConnect(String username, String passwd) - { - // Driver class. - String driver = "org.opengauss.Driver"; - // Database connection descriptor. - String sourceURL = "jdbc:opengauss://10.10.0.13:8000/postgres"; - Connection conn = null; - - try - { - // Load the driver. - Class.forName(driver); - } - catch( Exception e ) - { - e.printStackTrace(); - return null; - } - - try - { - // Create a connection. - conn = DriverManager.getConnection(sourceURL, username, passwd); - System.out.println("Connection succeed!"); - } - catch(Exception e) - { - e.printStackTrace(); - return null; - } - - return conn; - }; -// The following code uses the Properties object as a parameter to establish a connection. -public static Connection getConnectUseProp(String username, String passwd) - { - // Driver class. - String driver = "org.opengauss.Driver"; - // Database connection descriptor. - String sourceURL = "jdbc:opengauss://10.10.0.13:8000/postgres?"; - Connection conn = null; - Properties info = new Properties(); - - try - { - // Load the driver. - Class.forName(driver); - } - catch( Exception e ) - { - e.printStackTrace(); - return null; - } - - try - { - info.setProperty("user", username); - info.setProperty("password", passwd); - // Create a connection. - conn = DriverManager.getConnection(sourceURL, info); - System.out.println("Connection succeed!"); - } - catch(Exception e) - { - e.printStackTrace(); - return null; - } - - return conn; - }; -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/6-connecting-to-a-database-using-ssl.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/6-connecting-to-a-database-using-ssl.md deleted file mode 100644 index d9f02a54..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/6-connecting-to-a-database-using-ssl.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: Connecting to the Database (Using SSL) -summary: Connecting to the Database (Using SSL) -author: Guo Huan -date: 2021-04-26 ---- - -# Connecting to the Database (Using SSL) - -When establishing connections to the MogDB server using JDBC, you can enable SSL connections to encrypt client and server communications for security of sensitive data transmission on the Internet. This section describes how applications establish an SSL connection to MogDB using JDBC. To start the SSL mode, you must have the server certificate, client certificate, and private key files. For details on how to obtain these files, see related documents and commands of OpenSSL. - -**Configuring the Server** - -The SSL mode requires a root certificate, a server certificate, and a private key. - -Perform the following operations (assuming that the license files are saved in the data directory **/mogdb/data/datanode** and the default file names are used): - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Generate and import a certificate. - - Generate an SSL certificate. For details, see [Generating Certificates](../../../security-guide/security/1-client-access-authentication.md#Generating Certificates). Copy the generated **server.crt**, **server.key**, and **cacert.pem** files to the data directory on the server. - - Run the following command to query the data directory of the database node. The instance column indicates the data directory. - - ```bash - gs_om -t status --detail - ``` - - In the Unix OS, **server.crt** and **server.key** must deny the access from the external or any group. Run the following command to set this permission: - - ```bash - chmod 0600 server.key - ``` - -3. Enable the SSL authentication mode. - - ```bash - gs_guc set -D /mogdb/data/datanode -c "ssl=on" - ``` - -4. Set client access authentication parameters. The IP address is the IP address of the host to be connected. - - ```bash - gs_guc reload -D /mogdb/data/datanode -h "hostssl all all 127.0.0.1/32 cert" - gs_guc reload -D /mogdb/data/datanode -h "hostssl all all IP/32 cert" - ``` - - Clients on the **127.0.0.1⁄32** network segment can connect to MogDB servers in SSL mode. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > - > - If **METHOD** is set to **cert** in the **pg_hba.conf** file of the server, the client must use the username (common name) configured in the license file (**client.crt**) for the database connection. If **METHOD** is set to **md5**, **sm3** or **sha256**, there is no such a restriction. - > - The MD5 encryption algorithm has lower security and poses security risks. Therefore, you are advised to use a more secure encryption algorithm. - -5. Configure the digital certificate parameters related to SSL authentication. - - The information following each command indicates operation success. - - ```bash - gs_guc set -D /mogdb/data/datanode -c "ssl_cert_file='server.crt'" - gs_guc set: ssl_cert_file='server.crt' - ``` - - ```bash - gs_guc set -D /mogdb/data/datanode -c "ssl_key_file='server.key'" - gs_guc set: ssl_key_file='server.key' - ``` - - ```bash - gs_guc set -D /mogdb/data/datanode -c "ssl_ca_file='cacert.pem'" - gs_guc set: ssl_ca_file='cacert.pem' - ``` - -6. Restart the database. - - ```bash - gs_om -t stop && gs_om -t start - ``` - -**Configuring the Client** - -To configure the client, perform the following steps: - -Upload the certificate files **client.key.pk8**, **client.crt**, and **cacert.pem** generated in **Configuring the Server** to the client. - -**Example** - -Note: Choose one of example 1 and example 2. - -```java -public class SSL{ - public static void main(String[] args) { - Properties urlProps = new Properties(); - String urls = "jdbc:opengauss://10.29.37.136:8000/postgres"; - - /** - * ================== Example 1: Use the NonValidatingFactory channel. - */ - urlProps.setProperty("sslfactory","org.opengauss.ssl.NonValidatingFactory"); - urlProps.setProperty("user", "world"); - urlProps.setProperty("password", "test@123"); - urlProps.setProperty("ssl", "true"); - /** - * ================== Examples 2: Use a certificate. - */ - urlProps.setProperty("sslcert", "client.crt"); - urlProps.setProperty("sslkey", "client.key.pk8"); - urlProps.setProperty("sslrootcert", "cacert.pem"); - urlProps.setProperty("user", "world"); - urlProps.setProperty("ssl", "true"); - /* sslmode can be set to require, verify-ca, or verify-full. Select one from the following three examples.*/ - /* ================== Example 2.1: Set sslmode to require to use the certificate for authentication. */ - urlProps.setProperty("sslmode", "require"); - /* ================== Example 2.2: Set sslmode to verify-ca to use the certificate for authentication. */ - urlProps.setProperty("sslmode", "verify-ca"); - /* ================== Example 2.3: Set sslmode to verify-full to use the certificate (in the Linux OS) for authentication. */ - urls = "jdbc:opengauss://world:8000/postgres"; - urlProps.setProperty("sslmode", "verify-full"); - try { - Class.forName("org.opengauss.Driver").newInstance(); - } catch (Exception e) { - e.printStackTrace(); - } - try { - Connection conn; - conn = DriverManager.getConnection(urls,urlProps); - conn.close(); - } catch (Exception e) { - e.printStackTrace(); - } - } -} -/** - * Note: Convert the client key to the DER format. - * openssl pkcs8 -topk8 -outform DER -in client.key -out client.key.pk8 -nocrypt - * openssl pkcs8 -topk8 -inform PEM -in client.key -outform DER -out client.key.der -v1 PBE-MD5-DES - * openssl pkcs8 -topk8 -inform PEM -in client.key -outform DER -out client.key.der -v1 PBE-SHA1-3DES - * The preceding algorithms are not recommended due to their low security. - * If the customer needs to use a higher-level private key encryption algorithm, the following private key encryption algorithms can be used after the BouncyCastle or a third-party private key is used to decrypt the password package: - * openssl pkcs8 -in client.key -topk8 -outform DER -out client.key.der -v2 AES128 - * openssl pkcs8 -in client.key -topk8 -outform DER -out client.key.der -v2 aes-256-cbc -iter 1000000 - * openssl pkcs8 -in client.key -topk8 -out client.key.der -outform Der -v2 aes-256-cbc -v2prf hmacWithSHA512 - * Enable BouncyCastle: Introduce the bcpkix-jdk15on.jar package for projects that use JDBC. The recommended version is 1.65 or later. - */ -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/7-running-sql-statements.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/7-running-sql-statements.md deleted file mode 100644 index 93d3d686..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/7-running-sql-statements.md +++ /dev/null @@ -1,242 +0,0 @@ ---- -title: Running SQL Statements -summary: Running SQL Statements -author: Guo Huan -date: 2021-04-26 ---- - -# Running SQL Statements - -**Running a Common SQL Statement** - -To enable an application to operate data in the database by running SQL statements (statements that do not need to transfer parameters), perform the following operations: - -1. Create a statement object by calling the **createStatement** method in **Connection**. - - ```bash - Connection conn = DriverManager.getConnection("url","user","password"); - Statement stmt = conn.createStatement(); - ``` - -2. Run the SQL statement by calling the **executeUpdate** method in **Statement**. - - ```bash - int rc = stmt.executeUpdate("CREATE TABLE customer_t1(c_customer_sk INTEGER, c_customer_name VARCHAR(32));"); - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - If an execution request (not in a transaction block) received in the database contains multiple statements, the request is packed into a transaction. **VACUUM** is not supported in a transaction block. If one of the statements fails, the entire request will be rolled back. - > - Use semicolons (;) to separate statements. Stored procedures, functions, and anonymous blocks do not support multi-statement execution. - > - The slash (/) can be used as the terminator for creating a single stored procedure, function, or anonymous block. - > - When `prepareThreshold` is set to `1`, because the `preferQueryMode` default mode does not weed out the statement cache, each SQL statement will be cached. As a result, memory ballooning occurs. You need to set `preferQueryMode` to `extendedCacheEverything` to weed out the statement cache. - -3. Close the statement object. - - ``` - stmt.close(); - ``` - -**Running a Prepared SQL Statement** - -Prepared statements are complied and optimized once but can be used in different scenarios by assigning multiple values. Using prepared statements improves execution efficiency. If you want to run a statement for several times, use a precompiled statement. Perform the following operations: - -1. Create a prepared statement object by calling the prepareStatement method in Connection. - - ```json - PreparedStatement pstmt = con.prepareStatement("UPDATE customer_t1 SET c_customer_name = ? WHERE c_customer_sk = 1"); - ``` - -2. Set parameters by calling the setShort method in PreparedStatement. - - ```json - pstmt.setShort(1, (short)2); - ``` - -3. Run the prepared statement by calling the executeUpdate method in PreparedStatement. - - ```json - int rowcount = pstmt.executeUpdate(); - ``` - -4. Close the prepared statement object by calling the close method in PreparedStatement. - - ```json - pstmt.close(); - ``` - -**Calling a Stored Procedure** - -To call an existing stored procedure through JDBC in MogDB, perform the following operations: - -1. Create a call statement object by calling the **prepareCall** method in **Connection**. - - ```bash - Connection myConn = DriverManager.getConnection("url","user","password"); - CallableStatement cstmt = myConn.prepareCall("{? = CALL TESTPROC(?,?,?)}"); - ``` - -2. Set parameters by calling the **setInt** method in **CallableStatement**. - - ``` - cstmt.setInt(2, 50); - cstmt.setInt(1, 20); - cstmt.setInt(3, 90); - ``` - -3. Register an output parameter by calling the **registerOutParameter** method in **CallableStatement**. - - ``` - cstmt.registerOutParameter(4, Types.INTEGER); // Register an OUT parameter of the integer type. - ``` - -4. Call the stored procedure by calling the **execute** method in **CallableStatement**. - - ``` - cstmt.execute(); - ``` - -5. Obtain the output parameter by calling the **getInt** method in **CallableStatement**. - - ``` - int out = cstmt.getInt(4); // Obtain the OUT parameter. - ``` - - Example: - - ``` - // The following stored procedure (containing the OUT parameter) has been created: - create or replace procedure testproc - ( - psv_in1 in integer, - psv_in2 in integer, - psv_inout in out integer - ) - as - begin - psv_inout := psv_in1 + psv_in2 + psv_inout; - end; - / - ``` - -6. Close the call statement by calling the **close** method in **CallableStatement**. - - ``` - cstmt.close(); - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - Many database classes such as Connection, Statement, and ResultSet have a close() method. Close these classes after using their objects. Closing Connection will close all the related Statements, and closing a Statement will close its ResultSet. - > - Some JDBC drivers support named parameters, which can be used to set parameters by name rather than sequence. If a parameter has the default value, you do not need to specify any parameter value but can use the default value directly. Even though the parameter sequence changes during a stored procedure, the application does not need to be modified. Currently, the MogDB JDBC driver does not support this method. - > - MogDB does not support functions containing OUT parameters, or stored procedures and function parameters containing default values. - > - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE**: - > - > - If JDBC is used to call a stored procedure whose returned value is a cursor, the returned cursor cannot be used. - > - A stored procedure and an SQL statement must be run separately. - -**Calling a Stored Procedure When Overloading Is Enabled in Oracle Compatibility Mode** - -After the **behavior_compat_options='proc_outparam_override'** parameter is enabled, perform the following steps to call the stored procedure: - -1. Create a call statement object by calling the **prepareCall** method in **Connection**. - - ```bash - Connection conn = DriverManager.getConnection("url","user","password"); - CallableStatement cs = conn.prepareCall("{ CALL TEST_PROC(?,?,?) }"); - ``` - -2. Set parameters by calling the **setInt** method in **CallableStatement**. - - ```bash - PGobject pGobject = new PGobject(); - pGobject.setType("public.compfoo"); // Set the composite type name. The format is "schema.typename". - pGobject.setValue("(1,demo)"); //: Bind the value of the composite type. The format is "(value1,value2)". - cs.setObject(1, pGobject); - ``` - -3. Register an output parameter by calling the **registerOutParameter** method in **CallableStatement**. - - ```bash - //Register an out parameter of the composite type. The format is "schema.typename". - cs.registerOutParameter(2, Types.STRUCT, "public.compfoo"); - ``` - -4. Call the stored procedure by calling the **execute** method in **CallableStatement**. - - ```bash - cs.execute(); - ``` - -5. Obtain the output parameter by calling the **getObject** method in **CallableStatement**. - - ```bash - PGobject result = (PGobject)cs.getObject(2); // Obtain the out parameter. - result.getValue(); // Obtain the string value of the composite type. - result.getArrayValue(); // Obtain the array values of the composite type and sort the values according to the sequence of columns of the composite type. - result.getStruct(); // Obtain the subtype names of the composite type and sort them according to the creation sequence. - ``` - -6. Close the call statement by calling the **close** method in **CallableStatement**. - - ```bash - cs.close(); - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - After the Oracle compatibility mode is enabled, you must use the **{call proc_name(?,?,?)}** format to call a stored procedure and use the **{? = call func_name(?,?)}** format to call a function. The question mark (?) on the left of the equal mark is the placeholder for the return value of the function and is used to register the return value of the function. - > - After **behavior_compat_options** is set to **'proc_outparam_override'**, the service needs to re-establish a connection. Otherwise, the stored procedures and functions cannot be correctly called. - > - If a function or stored procedure contains a composite type, bind and register parameters in the schema.typename format. - -Example: - -``` -//Create a composite data type in the database. -CREATE TYPE compfoo AS (f1 int, f3 text); -// The following stored procedure (containing the OUT parameter) has been created: -create or replace procedure test_proc -( - psv_in in compfoo, - psv_out out compfoo -) -as -begin - psv_out := psv_in; -end; -/ -``` - -**Batch Processing** - -When a prepared statement processes multiple pieces of similar data, the database creates only one execution plan. This improves compilation and optimization efficiency. Perform the following operations: - -1. Create a prepared statement object by calling the prepareStatement method in Connection. - - ``` - Connection conn = DriverManager.getConnection("url","user","password"); - PreparedStatement pstmt = conn.prepareStatement("INSERT INTO customer_t1 VALUES (?)"); - ``` - -2. Call the setShort parameter for each piece of data, and call addBatch to confirm that the setting is complete. - - ``` - pstmt.setShort(1, (short)2); - pstmt.addBatch(); - ``` - -3. Perform batch processing by calling the executeBatch method in PreparedStatement. - - ``` - int[] rowcount = pstmt.executeBatch(); - ``` - -4. Close the prepared statement object by calling the close method in PreparedStatement. - - ``` - pstmt.close(); - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > Do not terminate a batch processing action when it is ongoing; otherwise, database performance will deteriorate. Therefore, disable automatic commit during batch processing. Manually commit several rows at a time. The statement for disabling automatic commit is **conn.setAutoCommit(false);**. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/8-processing-data-in-a-result-set.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/8-processing-data-in-a-result-set.md deleted file mode 100644 index d45f9df5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/8-processing-data-in-a-result-set.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Processing Data in a Result Set -summary: Processing Data in a Result Set -author: Guo Huan -date: 2021-04-26 ---- - -# Processing Data in a Result Set - -**Setting a Result Set Type** - -Different types of result sets apply to different application scenarios. Applications select proper types of result sets based on requirements. Before running an SQL statement, you must create a statement object. Some methods of creating statement objects can set the type of a result set. [Table 1](#Result set types) lists result set parameters. The related Connection methods are as follows: - -``` -// Create a Statement object. This object will generate a ResultSet object with a specified type and concurrency. -createStatement(int resultSetType, int resultSetConcurrency); - -// Create a PreparedStatement object. This object will generate a ResultSet object with a specified type and concurrency. -prepareStatement(String sql, int resultSetType, int resultSetConcurrency); - -// Create a CallableStatement object. This object will generate a ResultSet object with a specified type and concurrency. -prepareCall(String sql, int resultSetType, int resultSetConcurrency); -``` - -**Table 1** Result set types - -| Parameter | Description | -| :------------------- | :----------------------------------------------------------- | -| resultSetType | Type of a result set. There are three types of result sets:
- **ResultSet.TYPE_FORWARD_ONLY**: The ResultSet object can only be navigated forward. It is the default value.
- **ResultSet.TYPE_SCROLL_SENSITIVE**: You can view the modified result by scrolling to the modified row.
- **ResultSet.TYPE_SCROLL_INSENSITIVE**: The ResultSet object is insensitive to changes in the underlying data source.
NOTE:
After a result set has obtained data from the database, the result set is insensitive to data changes made by other transactions, even if the result set type is **ResultSet.TYPE_SCROLL_SENSITIVE**. To obtain up-to-date data of the record pointed by the cursor from the database, call the refreshRow() method in a ResultSet object. | -| resultSetConcurrency | Concurrency type of a result set. There are two types of concurrency.
- **ResultSet.CONCUR_READ_ONLY**: Data in a result set cannot be updated except that an updated statement has been created in the result set data.
- **ResultSet.CONCUR_UPDATEABLE**: changeable result set. The concurrency type for a result set object can be updated if the result set is scrollable. | - -**Positioning a Cursor in a Result Set** - -ResultSet objects include a cursor pointing to the current data row. The cursor is initially positioned before the first row. The next method moves the cursor to the next row from its current position. When a ResultSet object does not have a next row, a call to the next method returns **false**. Therefore, this method is used in the while loop for result set iteration. However, the JDBC driver provides more cursor positioning methods for scrollable result sets, which allows positioning cursor in the specified row. [Table 2](#Methods for positioning) describes these methods. - -**Table 2** Methods for positioning a cursor in a result set - -| Method | Description | -| :------------ | :----------------------------------------------------------- | -| next() | Moves cursor to the next row from its current position. | -| previous() | Moves cursor to the previous row from its current position. | -| beforeFirst() | Places cursor before the first row. | -| afterLast() | Places cursor after the last row. | -| first() | Places cursor to the first row. | -| last() | Places cursor to the last row. | -| absolute(int) | Places cursor to a specified row. | -| relative(int) | Moves the row specified by the forward parameter (that is, the value of is 1, which is equivalent to next()) or backward (that is, the value of is -1, which is equivalent to previous()). | - -**Obtaining the Cursor Position from a Result Set** - -This cursor positioning method will be used to change the cursor position for a scrollable result set. The JDBC driver provides a method to obtain the cursor position in a result set. [Table 3](#Methods for obtaining) describes these methods. - -**Table 3** Methods for obtaining a cursor position in a result set - -| Method | Description | -| :-------------- | :------------------------------------------------- | -| isFirst() | Checks whether the cursor is in the first row. | -| isLast() | Checks whether the cursor is in the last row. | -| isBeforeFirst() | Checks whether the cursor is before the first row. | -| isAfterLast() | Checks whether the cursor is after the last row. | -| getRow() | Gets the current row number of the cursor. | - -**Obtaining Data from a Result Set** - -ResultSet objects provide a variety of methods to obtain data from a result set. [Table 4](#Common methods for obtaining) describes the common methods for obtaining data. If you want to know more about other methods, see JDK official documents. - -**Table 4** Common methods for obtaining data from a result set - -| Method | Description | -| :----------------------------------- | :----------------------------------------------------------- | -| int getInt(int columnIndex) | Retrieves the value of the column designated by a column index in the current row as an integer. | -| int getInt(String columnLabel) | Retrieves the value of the column designated by a column label in the current row as an integer. | -| String getString(int columnIndex) | Retrieves the value of the column designated by a column index in the current row as a string. | -| String getString(String columnLabel) | Retrieves the value of the column designated by a column label in the current row as a string. | -| Date getDate(int columnIndex) | Retrieves the value of the column designated by a column index in the current row as a date. | -| Date getDate(String columnLabel) | Retrieves the value of the column designated by a column name in the current row as a date. | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/8.1-log-management.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/8.1-log-management.md deleted file mode 100644 index af93fb3d..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/8.1-log-management.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Log Management -summary: Log Management -author: Zhang Cuiping -date: 2021-10-11 ---- - -# Log Management - -The MogDB JDBC driver uses log records to help solve problems when the MogDB JDBC driver is used in applications. MogDB JDBC supports the following log management methods: - -1. Use the SLF4J log framework for interconnecting with applications. -2. Use the JdkLogger log framework for interconnecting with applications. - -SLF4J and JdkLogger are mainstream frameworks for Java application log management in the industry. For details about how to use these frameworks, see the official documents. - -- SLF4J: -- JdkLogger: - -Method 1: Use the SLF4J log framework for interconnecting with applications. - -When a connection is set up, **logger=Slf4JLogger** is configured in the URL. - -The SLF4J may be implemented by using Log4j or Log4j2. When the Log4j is used to implement the SLF4J, the following JAR packages need to be added: **log4j-\*.jar**, **slf4j-api-\*.jar**, and **slf4j-log4\*-\*.jar** (\* varies according to versions), and configuration file **log4j.properties**. If the Log4j2 is used to implement the SLF4J, you need to add the following JAR packages: **log4j-api-\*.jar**, **log4j-core-\*.jar**, **log4j-slf4j18-impl-\*.jar**, and **slf4j-api-\*-alpha1.jar** (\* varies according to versions), and configuration file **log4j2.xml**. - -This method supports log management and control. The SLF4J can implement powerful log management and control functions through related configurations in files. This method is recommended. - -Example: - -``` -public static Connection GetConnection(String username, String passwd){ - - String sourceURL = "jdbc:opengauss://10.10.0.13:8000/postgres?logger=Slf4JLogger"; - Connection conn = null; - - try{ -// Create a connection. - conn = DriverManager.getConnection(sourceURL,username,passwd); - System.out.println("Connection succeed!"); - }catch (Exception e){ - e.printStackTrace(); - return null; - } - return conn; -} -``` - -The following is an example of the **log4j.properties** file: - -``` -log4j.logger.org.opengauss=ALL, log_gsjdbc - -# Default file output configuration -log4j.appender.log_gsjdbc=org.apache.log4j.RollingFileAppender -log4j.appender.log_gsjdbc.Append=true -log4j.appender.log_gsjdbc.File=gsjdbc.log -log4j.appender.log_gsjdbc.Threshold=TRACE -log4j.appender.log_gsjdbc.MaxFileSize=10MB -log4j.appender.log_gsjdbc.MaxBackupIndex=5 -log4j.appender.log_gsjdbc.layout=org.apache.log4j.PatternLayout -log4j.appender.log_gsjdbc.layout.ConversionPattern=%d %p %t %c - %m%n -log4j.appender.log_gsjdbc.File.Encoding = UTF-8 -``` - -The following is an example of the **log4j2.xml** file: - -``` - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -Method 3: Use the JdkLogger log framework for interconnecting with applications. - -The default Java logging framework stores its configurations in a file named **logging.properties**. Java installs the global configuration file in the folder in the Java installation directory. The **logging.properties** file can also be created and stored with a single project. - -Configuration example of **logging.properties**: - -``` -# Specify the processing program as a file. -handlers= java.util.logging.FileHandler - -# Specify the default global log level. -.level= ALL - -# Specify the log output control standard. -java.util.logging.FileHandler.level=ALL -java.util.logging.FileHandler.pattern = gsjdbc.log -java.util.logging.FileHandler.limit = 500000 -java.util.logging.FileHandler.count = 30 -java.util.logging.FileHandler.formatter = java.util.logging.SimpleFormatter -java.util.logging.FileHandler.append=false -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/9-closing-a-connection.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/9-closing-a-connection.md deleted file mode 100644 index 2d6064b5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/9-closing-a-connection.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Closing a Connection -summary: Closing a Connection -author: Guo Huan -date: 2021-04-26 ---- - -# Closing a Connection - -After you complete required data operations in the database, close the database connection. - -Call the close method to close the connection, for example, - -```text -Connection conn = DriverManager.getConnection\("url","user","password"\) ; -conn.close\(\); -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/connecting-to-a-database-using-uds.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/connecting-to-a-database-using-uds.md deleted file mode 100644 index aba6a8e4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/connecting-to-a-database-using-uds.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Connecting to a Database (Using UDS) -summary: Connecting to a Database (Using UDS) -author: Guo Huan -date: 2021-04-26 ---- - -# Connecting to a Database (Using UDS) - -The Unix domain socket is used for data exchange between different processes on the same host. You can add **junixsocket** to obtain the socket factory. - -The **junixsocket-core-\**\*XXX\**\*.jar**, **junixsocket-common-\**\*XXX\**\*.jar**, and **junixsocket-native-common-\**\*XXX\**\*.jar** JAR packages need to be referenced. In addition, you need to add **socketFactory=org.newsclub.net.unix.AFUNIXSocketFactory$FactoryArg&socketFactoryArg= ***[path-to-the-unix-socket]* to the URL connection string. - -Example: - -``` -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.Statement; -import java.util.Properties; - -public class Test { - public static void main(String[] args) { - String driver = "org.opengauss.Driver"; - Connection conn; - try { - Class.forName(driver).newInstance(); - Properties properties = new Properties(); - properties.setProperty("user", "username"); - properties.setProperty("password", "password"); - conn = DriverManager.getConnection("jdbc:opengauss://localhost:8000/postgres?socketFactory=org.newsclub" + - ".net.unix" + - ".AFUNIXSocketFactory$FactoryArg&socketFactoryArg=/data/tmp/.s.PGSQL.8000", - properties); - System.out.println("Connection Successful!"); - Statement statement = conn.createStatement(); - statement.executeQuery("select 1"); - } catch (Exception e) { - e.printStackTrace(); - } - } -} -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - Set the **socketFactoryArg** parameter based on the actual path. The value must be the same as that of the GUC parameter **unix_socket_directory**. -> - The connection host name must be set to **localhost**. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/development-based-on-jdbc.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/development-based-on-jdbc.md deleted file mode 100644 index ce61112a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/development-based-on-jdbc.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Development Based on JDBC -summary: Development Based on JDBC -author: Guo Huan -date: 2023-05-18 ---- - -# Development Based on JDBC - -Java Database Connectivity (JDBC) is a Java API for running SQL statements. It provides unified access interfaces for different relational databases, based on which applications process data. MogDB supports JDBC 4.0 and requires JDK 1.8 for code compiling. It does not support JDBC-ODBC bridge. - -- **[JDBC Package, Driver Class, and Environment Class](2-jdbc-package-driver-class-and-environment-class.md)** -- **[Development Process](3-development-process.md)** -- **[Loading the Driver](4-loading-the-driver.md)** -- **[Connecting to a Database](5-connecting-to-a-database.md)** -- **[Connecting to the Database (Using SSL)](6-connecting-to-a-database-using-ssl.md)** -- **[Connecting to a Database (Using UDS)](connecting-to-a-database-using-uds.md)** -- **[Running SQL Statements](7-running-sql-statements.md)** -- **[Processing Data in a Result Set](8-processing-data-in-a-result-set.md)** -- **[Closing a Connection](9-closing-a-connection.md)** -- **[Log Management](8.1-log-management.md)** -- **[Example: Common Operations](10-example-common-operations.md)** -- **[Example Retrying SQL Queries for Applications](11-example-retrying-sql-queries-for-applications.md)** -- **[Example Importing and Exporting Data Through Local Files](12-example-importing-and-exporting-data-through-local-files.md)** -- **[Example 2 Migrating Data from a MY Database to MogDB](13-example-2-migrating-data-from-a-my-database-to-mogdb.md)** -- **[Example Logic Replication Code](14-example-logic-replication-code.md)** -- **[Example: Parameters for Connecting to the Database in Different Scenarios](14.1-example-parameters-for-connecting-to-the-database-in-different-scenarios.md)** -- **[Example: JDBC Primary/Standby Cluster Load Balancing](example-jdbc-primary-and-backup-cluster-load-balancing.md)** -- **[JDBC Interface Reference](15-JDBC/jdbc-interface-reference.md)** -- **[Common JDBC Parameters](jdbc-based-common-parameter-reference.md)** -- **[JDBC Release Note](jdbc-release-notes.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/example-jdbc-primary-and-backup-cluster-load-balancing.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/example-jdbc-primary-and-backup-cluster-load-balancing.md deleted file mode 100644 index dd49d3fb..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/example-jdbc-primary-and-backup-cluster-load-balancing.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: Example JDBC Primary/Standby Cluster Load Balancing -summary: Example JDBC Primary/Standby Cluster Load Balancing -author: Guo Huan -date: 2023-04-18 ---- - -# Example: JDBC Primary/Standby Cluster Load Balancing - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** In the following example, **host:port** represents a node, where **host** indicates the name or IP address of the server where the database resides, and **port** indicates the port number of the server where the database resides. - -## JDBC Load Balancing Functions - -JDBC can set multiple database nodes in the URL to access the primary/standby cluster. After the load balancing parameter **autoBalance** is set in the URL, JDBC can establish the connection between the client and the primary/standby cluster on each node in the URL based on specific load balancing rules to implement load balancing. The default value is **false**. In this case, the JDBC always connects to the same node that meets the connection setup conditions configured in the URL. When connecting to the primary/standby cluster, ensure that no write operation is performed in the service or use this parameter together with **targetServerType=slave** to restrict the client to connect only to the standby node. Currently, JDBC provides four load balancing modes: roundrobin, priority roundrobin, leastconn, and shuffle. The following provides some examples. - -- **roundrobin**: Round-robin mode, that is, JDBC connects to candidate nodes in the URL string in turn. The value can be **roundrobin**, **true**, or **balance**. - - - If a client wants to connect to the one-primary-two-standby cluster in round-robin mode and can select nodes in the cluster in turn when creating connections for multiple times, you can use the following configuration: - - ``` - jdbc:opengauss://node1,node2,node3/database?autoBalance=roundrobin - ``` - - - If a client wants to use the round-robin mode to connect only to the standby node of the one-primary-two-standby cluster and perform the read-only operation on the standby node, you can use the following configuration: - - ``` - jdbc:opengauss://node1,node2,node3/database?autoBalance=roundrobin&targetServerType=slave - ``` - - - If a client wants to use the round-robin mode to connect only to the primary node of the one-primary-two-standby cluster to prevent write operations from being routed to the standby node, you can use the following configuration: - - ``` - jdbc:opengauss://node1,node2,node3/database?autoBalance=roundrobin&targetServerType=master - ``` - -- **shuffle**: Shuffle mode, where a node in the URL string is randomly selected to establish a connection. The value is **shuffle**. The reference configuration for connecting one-primary-two-standby cluster in shuffle mode is as follows: - -``` -jdbc:opengauss://node1,node2,node3/database?autoBalance=shuffle -``` - -- **leastconn**: The minimum connection mode, where candidate nodes are sorted based on the number of valid connections of each node. Connections are preferentially established with the node with fewer connections. This mode collects statistics on the connections established in leastconn mode in the current cluster through the current driver and periodically checks the validity of these connections. The value is **leastconn**. The reference configuration for connecting one-primary-two-standby cluster in leastconn mode is as follows: - -``` -jdbc:opengauss://node1,node2,node3/database?autoBalance=leastconn -``` - -- **priority roundrobin**: Round-robin mode with priorities. The connections for the first *n* candidate nodes are preferentially established. The value is **proprity[n]**, where **n** is a non-negative integer. Take the one-primary-two-standby cluster as an example. You can set the parameter as follows if a client wants to preferentially execute services on the primary node and standby node 1, and standby node 2 functions only as the standby node when other nodes are abnormal. - -``` -jdbc:opengauss://node1,node2,node3/database?autoBalance=priority2 -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **CAUTION:** -> -> - JDBC load balancing identifies a cluster based on the node set specified in the URL string. If multiple URLs with the same node set and load balancing mode use the same driver to establish connections, JDBC considers the connections as connections in the same cluster and performs load balancing as a whole. -> - JDBC supports driver-level load balancing. It balances the load of connections created in the same cluster based on the driver instead of the actual number of connections on each node in the cluster or other drivers. -> - In leastconn mode, the heartbeat thread is enabled, and scheduled tasks such as connection validity monitoring are executed every 20 seconds. If the heartbeat thread detects that the number of cached connections in leastconn mode is 0 for two consecutive times, the heartbeat thread is disabled and the cached leastconn information is cleared. - -## JDBC Quick Load Balancing Functions when Cluster Status Changes - -This function applies to the scenario where the connection pool is used to connect to the primary/standby cluster and the leastconn mode is set. The connection pool usually maintains a certain number of long-lived connections. When a node in the cluster fails, the connection pool re-creates connections on the remaining nodes. When the failed node is recovered, the connection pool has created sufficient long-lived connections on other nodes. Therefore, the newly recovered node is always idle unless some original connections are closed. When this function is enabled, the JDBC periodically checks the status of the node configured in the URL. When detecting that a node is recovered, JDBC filters out idle connections on other nodes and closes them. After detecting that the number of cached connections decreases, the connection pool creates connections on the newly recovered node based on the leastconn mode to rebalance the cluster load. This function is used together with **enableQuickAutoBalance**, **maxIdleTimeBeforeTerminal**, **minReservedConPerCluster**, and **minReservedConPerDatanode**. The details are as follows: - -- **enableQuickAutoBalance**: indicates whether to enable the JDBC quick load balancing function when the cluster changes. This function must be used together with the leastconn mode. When this parameter is set to **true** and **autoBalance** is set to **leastconn**, this function is enabled. The parameters take effect for connections. - - Value range: **“true”** or **“false”** - - Default value: **“false”** - -- **maxIdleTimeBeforeTerminal**: maximum idle duration of a connection when JDBC triggers quick load balancing. When filtering idle connections, the JDBC quick load balancing function considers the connections that are in the idle state and last for a period longer than or equal to the value of **maxIdleTimeBeforeTerminal** as idle connections. These connections may be closed by the JDBC. This parameter is valid only when **autoBalance** is set to **leastconn** and **enableQuickAutoBalance** is set to **true**. The parameters take effect for connections. - - Unit: second. - - Value range: an integer in [0, 9223372036854775). - - Default value: **0** - -- **minReservedConPerCluster**: The minimum percentage of idle connections in a cluster when JDBC triggers quick load balancing. By default, when JDBC triggers quick load balancing, all filtered idle connections are closed. If this parameter is set, JDBC reserves at least *minReservedConPerCluster*% of idle connections in the cluster when quick load balancing is triggered. This parameter takes effect for clusters. If this parameter is set for multiple URLs for which the same node is configured and the quick load balancing function is enabled, the minimum value is used. - - Value range: an integer in [0, 100]. - - Default value: **0** - -- **minReservedConPerDatanode**: minimum percentage of idle connections on a node when JDBC triggers quick load balancing. If this parameter is set, JDBC reserves at least *minReservedConPerDatanode*% of idle connections on each node when quick load balancing is triggered. If both **minReservedConPerDatanode** and **minReservedConPerCluster** are set for the URL string, JDBC ensures that the filtered connections to be closed meet both parameters. This parameter takes effect for clusters. If this parameter is set for multiple URLs for which the same node is configured and the quick load balancing function is enabled, the minimum value is used. - - Value range: an integer in [0, 100]. - - Default value: **0** - -The following is a simple case for enabling JDBC quick load balancing. You can perform the following configuration to enable the leastconn mode when the JDBC connection is established and the quick load balancing when the cluster status changes. After the functions are enabled, JDBC caches valid connections created using the URL and periodically queries the status of node 1, node 2, and node 3. When detecting that the node is recovered, JDBC filters out idle connections (idle duration > 30 seconds) from the cached connections and closes the connections. The connection pool establishes connections in leastconn mode to rebalance the number of connections in the cluster. - -``` -jdbc:opengauss://node1,node2,node3/database?autoBalance=leastconn&enableQuickAutoBalance=true -``` - -In addition, you can set additional parameters to control the criteria for filtering idle connections and the percentage of idle connections to be closed in the cluster and on each node. - -``` -jdbc:opengauss://node1,node2,node3/database?autoBalance=leastconn&enableQuickAutoBalance=true&maxIdleTimeBeforeTerminal=20&minReservedConPerCluster=20&minReservedConPerDatanode=20 -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **CAUTION:** -> -> - JDBC implements load balancing only based on the connections of the driver in the same cluster. When quick load balancing is triggered, only the connections created by the driver in a cluster and configured with corresponding parameters are disabled. -> - To use this function, you need to adjust parameters to meet the service requirements of the client. The JDBC cannot detect whether a connection is required by the actual service. Therefore, the JDBC filters out connections that can be closed by determining idle connections. If the parameters do not match the actual service requirements, connections held by a user may be closed. -> - When performing quick load balancing, JDBC closes some connections that meet the conditions based on the configured parameters. If most existing connections do not meet the conditions, for example, all connections are active, the quick load balancing result may be poor. -> - The JDBC quick load balancing function enables the heartbeat thread to close idle connections by phase. Quick load balancing depends on the leastconn mode. Therefore, the mechanism for disabling this function is the same as that in the leastconn mode. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/jdbc-based-common-parameter-reference.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/jdbc-based-common-parameter-reference.md deleted file mode 100644 index 491e3a33..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/jdbc-based-common-parameter-reference.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Common JDBC Parameters -summary: Common JDBC Parameters -author: Guo Huan -date: 2021-04-26 ---- - -# Common JDBC Parameters - -## targetServerType - -**Principle**: If the value is **master**, JDBC attempts to connect to the IP addresses configured in the string in sequence until the primary node in the cluster is connected. - -If the value is **slave**, JDBC attempts to connect to the IP addresses configured in the string in sequence until the standby node in the cluster is connected. The query statement is **select local_role, db_state from pg_stat_get_stream_replications();**. - -**Suggestion**: You are advised to set this parameter to **master** for services with write operations to ensure that the primary node can be properly connected after a primary/standby switchover. However, if the standby node is not completely promoted to primary during the primary/standby switchover, the connection cannot be established. As a result, service statements cannot be executed. - -## hostRecheckSeconds - -**Principle**: Specifies the period during which the DN list stored in JDBC remains trusted. Within this period, the DN list is directly read from the host addresses stored in JDBC. After that (or the primary node fails to be connected within the specified period), the node status in the DN list is updated and other IP addresses are connected. - -**Suggestion**: The default value is **10s**. You are advised to adjust the value based on service requirements. This parameter is used together with the **targetServerType** parameter. - -## allowReadOnly - -**Principle**: Checks whether the transaction access mode can be modified through **setReadOnly**. If the value is **true**, the transaction access mode can be modified. If the value is **false**, the transaction access mode cannot be modified through this interface. To modify the transaction access mode, execute **SET SESSION CHARACTERISTICS AS TRANSACTION + READ ONLY / READ WEITE**. - -**Suggestion**: The default value **true** is recommended. - -## fetchsize - -**Principle**: After **fetchsize** is set to *n* and the database server executes a query, JDBC communicates with the server when the invoker executes **resultset.next()**, fetches *n* pieces of data to the JDBC client, and returns the first piece of data to the invoker. When the invoker fetches the (*n*+1)th data record, the invoker fetches data from the database server again. - -**Function**: This prevents the database from transmitting all results to the client at a time, which exhausts the memory resources of the client. - -**Suggestion**: You are advised to set this parameter based on the amount of data queried by services and the memory of the client. When setting **fetchsize**, disable automatic commit (**autocommit**=**false**). Otherwise, the setting of **fetchsize** does not take effect. - -## defaultRowFetchSize - -**Function**: The default value of **fetchsize** is **0**. Setting **defaultRowFetchSize** will change the default value of **fetchsize**. - -## batchMode - -**Function**: This parameter specifies whether to connect the database in batch mode. The default value is **on**. After the function is enabled, the batch update performance is improved, and the return value is also batch updated. For example, if three data records are inserted in batches, the return value is **[3,0,0]** when the function is enabled, and the return value is **[1,1,1]** when the function is disabled. - -**Suggestion**: If the service framework (such as hibernate) checks the return value during batch update, you can set this parameter to solve the problem. - -## loginTimeout - -**Function**: Controls the time for establishing a connection with the database. The time includes connection timeout and socket timeout. If the time elapsed exceeds the threshold, the connection exits. The calculation formula is as follows: **loginTimeout** = **connectiontimeout** x Number of nodes + Connection authentication time + Initialization statement execution time. - -**Suggestion**: After this parameter is set, an asynchronous thread is started each time a connection is established. If there are a large number of connections, the pressure on the client may increase. If this parameter needs to be set, you are advised to set it to 3 x **connectTimeout** in centralized deployment to prevent connection failures when the network is abnormal and the third IP address is the IP address of the primary node. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** After this parameter is set, for multiple IP addresses, the value of this parameter is the time for attempting to connect to all the IP addresses. If this parameter is set to a small value, the subsequent IP addresses may fail to be connected. For example, if three IP addresses are set, **logintimeout** is set to **5s**, and it takes 5s to connect to the first two IP addresses, the third IP address cannot be connected. In the centralized deployment environment, the last IP address is the IP address of the primary node. As a result, the automatic search for the primary node may fail. - -## cancelSignalTimeout - -**Function**: Cancel messages may cause a block. This parameter controls **connectTimeout** and **socketTimeout** in a cancel message, in seconds. It is used to prevent timeout detection from being performed when the connection is canceled due to timeout. - -**Suggestion**: The default value is **10s**. You are advised to adjust the value based on service requirements. - -## connectTimeout - -**Function**: Controls the socket timeout threshold during connection setup. In this case, this timeout threshold is the time when the JDBC connects to the database through the socket, not the time when the connection object is returned. If the time elapsed exceeds the threshold, JDBC searches for the next IP address. - -**Suggestion**: This parameter determines the maximum timeout interval for establishing a TCP connection on each node. If a network fault occurs on a node, JDBC attempts to connect to the node until the time specified by **connectTimeout** elapses, and then attempts to connect to the next node. Considering the network jitter and delay, you are advised to set this parameter to **3s**. - -## socketTimeout - -**Function**: Controls the timeout threshold of socket operations. If the time of executing service statements or reading data streams from the network exceeds the threshold (that is, when the statement execution time exceeds the specified threshold and no data is returned), the connection is interrupted. - -**Suggestion**: This parameter specifies the maximum execution time of a single SQL statement. If the execution time of a single SQL statement exceeds the value of this parameter, an error is reported and the statement exits. You are advised to set this parameter based on service characteristics. - -## autosave - -**Function**: If the value is **always**, you can set a savepoint before each statement in a transaction. If an error is reported during statement execution in a transaction, the system returns to the latest savepoint. In this way, subsequent statements in the transaction can be properly executed and committed. - -**Suggestion**: You are not advised to set this parameter because the performance deteriorates severely. - -## currentSchema - -**Function**: Specifies the schema of the current connection. If this parameter is not set, the default schema is the username used for the connection. - -**Suggestion**: You are advised to set this parameter to the schema where the service data is located. - -## prepareThreshold - -**Function**: The default value is **5**. If an SQL statement is executed for multiple consecutive times in a session and the number of execution times specified by **prepareThreshold** is reached, JDBC does not send the PARSE command to the SQL statement but caches the SQL statement to improve the execution speed. - -**Suggestion**: The default value is **5**. Adjust the value based on service requirements. - -## preparedStatementCacheQueries - -**Function**: Specifies the number of queries cached in each connection. The default value is **256**. If more than 256 different queries are used in the **prepareStatement()** call, the least recently used query cache will be discarded. - -**Suggestion**: The default value is **256**. Adjust the value based on service requirements. This parameter is used together with **prepareThreshold**. - -## blobMode - -**Function**: Sets the **setBinaryStream** method to assign values to different types of data. The value **on** indicates that values are assigned to BLOB data. The value **off** indicates that values are assigned to bytea data. The default value is **on**. For example, you can assign values to parameters in the **preparestatement** and **callablestatement** objects. - -**Suggestion**: The default value is **true**. - -## setAutocommit - -**Function**: If the value is **true**, a transaction is automatically started when each statement is executed. After the execution is complete, the transaction is automatically committed. That is, each statement is a transaction. If the value is **false**, a transaction is automatically started. However, you need to manually commit the transaction. - -**Suggestion**: Adjust the value based on service characteristics. If autocommit needs to be disabled for performance or other purposes, the application must ensure that transactions can be committed. For example, explicitly commit translations after specifying service SQL statements. Particularly, ensure that all transactions are committed before the client exits. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/jdbc-release-notes.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/jdbc-release-notes.md deleted file mode 100644 index a35e97da..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/2-development-based-on-jdbc/jdbc-release-notes.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: JDBC Release Note -summary: JDBC Release Note -author: Bin.Liu -date: 2023-12-13 ---- - -# MogDB JDBC Changelog - -MogDB JDBC Changelog mainly contains new features, improvements, BUG fixes and other changes. Please read the details below carefully to understand any changes. - -## 5.0.0.9 - -2024-09-26 - -### Add - -- Add RAW type adaptation. Support getBytes/getBlob. SQLType is VARBINARY - -### Fixed - -- RegisterOut procedure/function out parameter CHAR as VARCHAR -- Modify the method of obtaining the data returned by the stored procedure/function cursor under `enable_plsql_return_hold_cursor=on`. The error in other scenarios remains unchanged - - Change the original `FETCH ALL in {cursorName}` to `FETCH FORWARD {fetchSize} in {cursorName}` under `autocommit=true` - - `fetchSize` is configured by prepareCall.setFetchSize and connection string `defaultRowFetchSize`. If neither is configured, the default value is 10 - - `close {cursorName}` statement will be executed under `rs.close()` -- Error when the procedure/function out parameter is BLOB/CLOB/RAW and is NULL -- Modify the BLOB type to return getColumnClassName to `org.postgresql.core.PGBlob` -- Modify the CLOB type return getColumnClassName to `org.postgresql.core.PGClob` -- The procedure/function parameter type is CHAR/VARCHAR/NVARCHAR, and the registered type is a mixed scenario of CHAR/VARCHAR/NVARCHAR -- registerOutParameter registers `TIME_WITH_TIMEZONE` as `TIMETZ` type -- registerOutParameter registers `TIMESTAMP_WITH_TIMEZONE` as `TIMESTAMPTZ` type -- setNull(1,types.Array,"table of") issue - -## 5.0.0.8 - -2024-07-01 - -### Add - -- connection.getMetaData.getDriverName() The unified name is no longer displayed PostgreSQL/openGauss - - mogdb-jdbc-{version}.og `MogDB JDBC Driver For og` - - mogdb-jdbc-{version}.pg `MogDB JDBC Driver For pg` - - mogdb-jdbc-{version}.mg `MogDB JDBC Driver For mg` - -### Fixed - -- `PgCallableStatement` supports `executeQuery` method without error -- Adapted to MogDB 5.0.6 to return the number of error rows in batch mode (batchMode=on) -- `ParseSQL` supports UTF8 encoding full-width space parsing -- The driver automatically sets `behavior_compat_options` to include `compat_oracle_txn_control` according to the autocommit value - - The `compat_oracle_txn_control` option cannot be set through `set behavior_compat_options` after connection - - autocommit=true. Automatically remove the option `compat_oracle_txn_control` in `behavior_compat_options` - - autocommit=false. Check whether `compat_oracle_txn_control` exists according to the initial connection to set `behavior_compat_options` - - If it exists at the initialization, `behavior_compat_options` contains `compat_oracle_txn_control` - - If initialization does not exist, `behavior_compat_options` does not contain `compat_oracle_txn_control` -- PrepareCall setBlob and registerOut blob send incorrect flags to the kernel -- PgStruct.getBastTypename returns without schema information - -## Version 5.0.0.7 (2024-04-15) - -### Fixed - -- Fix DB Parameter `compat_oracle_txn_control` and url `autosave`/ `setSavepoint` issue - - - `autosave` decides whether to send `savepoint` based on kernel transaction status - - Using the `setSavepoint` method does not consider the `compat_oracle_txn_control` parameter and the same logic as before - -## 5.0.0.6 - -2024-03-28 - -### Fixed - -- Fix db kernel parameter `compat_oracle_txn_control` and jdbc `setAutoCommit(true)` not commit issue - -## 5.0.0.5 - -2024-03-20 - -### Add - -- Adapt kernel parameter `enable_plsql_return_hold_cursor` scenario to fetchSize -- Adapt kernel parameter `compat_oracle_txn_control` does not send `start trans` under setAutoCommit(false) - -### Fixed - -- Fixed int2vector/oidvector/oidvector_extend/int2vector_extend query data problem -- Fixed the scenario where hibernate sets the Clob length to 0 - -## 5.0.0.4 - -2024-01-10 - -### Add - -- [feature](https://gitee.com/opengauss/openGauss-connector-jdbc/pulls/162) JDBC supports read/write splitting routing based on SQL statements at the statement level -- [I8G7TQ](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I8G7TQ) JDBC support cleanupSavepoints -- [I7WQOW](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I7WQOW) JDBC support uint1/uint2/uint4/uint8 - -### Fixed - -- [I8FJTX](https://gitee.com/opengauss/Plugin/issues/I8FJTX) The blob type returns an exception -- [I8GEZY](https://gitee.com/opengauss/Plugin/issues/I8GEZY) JDBC to connect compatible B database , an error is reported in the execution trigger syntax and the statement is truncated -- [I8I0AW](https://gitee.com/opengauss/openGauss-server/issues/I8I0AW) you cannot insert double-quoted column names into uppercase databases -- [I8AJBK](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I8AJBK) When JDBC inserts data in batches, the preparedStatementCacheQueries parameter does not take effect, and the cachedplan data on the database side rises rapidly - -### Changed - -- [I89ZZU](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I89ZZU) ParseSQL supports symmetric block comments (C style) -- [] Control whether to actively close the stored procedure return cursor according to the kernel parameter `enable_plsql_return_hold_cursor` -- [] Add parameter `enableUsrpwdCaseInsensitive` to control whether the connection user name and password are case-insensitive according to the kernel parameter `enable_usrpwd_case_insensitive` -- []Adapt Oracle `begin end` anonymous block SQL statement execution - -## 5.0.0.3 - -2023-11-16 - -### Add - -- [I8G9XM](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I8G9XM) jdbc return Type value for openGauss cursor type is inconsistent with Oracle and needs to be compatible -- [I8G9SY](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I8G9SY) jdbc calls procedure to register the parameter as double, but the database return type is NUMERIC, and an error occurs -- [I8G9N7](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I8G9N7) jdbc calls procedure to register the parameter as char, but the database return type is varchar, and an error occurs - -### Fixed - -- [I8D6H1](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I8D6H1) When a field in the struct is null, there will be exceptions in parsing and generating -- [I8B89Q](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I8B89Q) UTF8Encoding has defects -- [I8C2X3](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I8C2X3) errMessageForQuery does not consider the length issue -- [I84URC](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I84URC) In the jdbc driver, there is a problem with the "/" division operator in the SQL statement. - -### Changed - -- [I81MKF](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I81MKF) PgStruct cannot handle data containing special characters or special formats in input or output -- [I82Z5W](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I82Z5W) The JDBC driver supports processing returning statements during batch insertion - -## 5.0.0.2 - -2023-10-12 - -### Add - -- [I7IGAY](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I7IGAY) When CallableStatement registers the parameter type as Types.STRUCT, CallableStatement.execute() reports an error -- [I7WYWS](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I7WYWS) supports JDBC standard createStruct interface -- [I7VHXI](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I7VHXI) The information obtained by PgObject through PgArray through PgObject.getArrayValue() does not meet expectations. -- [I7QQ05](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I7QQ05) It is recommended to support the option parameter in the PG native JDBC driver connection string. -- [I7JDF9](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I7JDF9) jdbc calls a custom function. The out parameter and return value are of numeric type 0, and the return results are inconsistent. - -### Fixed - -- [I7W41A](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I7W41A) benchmarksql and database cannot be tested normally using unix domain sockets when they are on the same machine -- [I7PJFQ](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I7PJFQ) The driver under Druid reported an IO exception -- [I7PAP9](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I7PAP9) When using mybatis to convert java.util.Date to Timestamp, the data cannot be filtered correctly. - -### Changed - -- [I7WXRI](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I7WXRI) The problem of the upper limit of the number of bind variables needs to be fixed (incorporated into pgjdbc’s fix) -- [I7OP8Y](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I7OP8Y) In JDBC batch mode, the bound variable type takes the first batch OID optimization -- [I7I9DQ](https://gitee.com/opengauss/openGauss-connector-jdbc/issues/I7I9DQ) There is ambiguity in _getNameStatement in typecache diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/1-development-based-on-odbc.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/1-development-based-on-odbc.md deleted file mode 100644 index 90236f78..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/1-development-based-on-odbc.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Development Based on ODBC -summary: Development Based on ODBC -author: Guo Huan -date: 2021-04-26 ---- - -# Development Based on ODBC - -- **[ODBC Packages, Dependent Libraries, and Header Files](2-odbc-packages-dependent-libraries-and-header-files.md)** -- **[Configuring a Data Source in the Linux OS](3-configuring-a-data-source-in-the-linux-os.md)** -- **[Development Process](4-development-process.md)** -- **[Example: Common Functions and Batch Binding](5-example-common-functions-and-batch-binding.md)** -- **[Typical Application Scenarios and Configurations](5.1-typical-application-scenarios-and-configurations.md)** -- **[ODBC Interface Reference](6-ODBC/odbc-interface-reference.md)** - -Open Database Connectivity (ODBC) is a Microsoft API for accessing databases based on the X/OPEN CLI. Applications interact with the database through the APIs provided by ODBC, which enhances their portability, scalability, and maintainability. - -[Figure 1](#ODBC) shows the system structure of ODBC. - -**Figure 1** ODBC system structure - -![odbc-system-structure](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/development-based-on-odbc-overview-2.png) - -MogDB supports ODBC 3.5 in the following environments. - -**Table 1** OSs Supported by ODBC - -| OS | Platform | -| :------------------------------------------------- | :------- | -| CentOS 6.4/6.5/6.6/6.7/6.8/6.9/7.0/7.1/7.2/7.3/7.4 | x86_64 | -| CentOS 7.6 | ARM64 | -| EulerOS 2.0 SP2/SP3 | x86_64 | -| EulerOS 2.0 SP8 | ARM64 | - -The ODBC Driver Manager running on UNIX or Linux can be unixODBC or iODBC. unixODBC-2.3.0 is used as the component for connecting the database. - -Windows has a native ODBC Driver Manager. You can locate **Data Sources (ODBC)** by choosing **Control Panel** > **Administrative Tools**. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> The current database ODBC driver is based on an open-source version and may be incompatible with data types tinyint, smalldatetime, and nvarchar2. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/2-odbc-packages-dependent-libraries-and-header-files.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/2-odbc-packages-dependent-libraries-and-header-files.md deleted file mode 100644 index b5314fe1..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/2-odbc-packages-dependent-libraries-and-header-files.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ODBC Packages, Dependent Libraries, and Header Files -summary: ODBC Packages, Dependent Libraries, and Header Files -author: Guo Huan -date: 2021-04-26 ---- - -# ODBC Packages, Dependent Libraries, and Header Files - -**ODBC Packages for the Linux OS** - -Obtain the [**openGauss-x.x.x-ODBC.tar.gz**](https://opengauss.org/en/download/) package from the release package. In the Linux OS, header files (including **sql.h** and **sqlext.h**) and library (**libodbc.so**) are required in application development. These header files and library can be obtained from the **unixODBC-2.3.0** installation package. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/3-configuring-a-data-source-in-the-linux-os.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/3-configuring-a-data-source-in-the-linux-os.md deleted file mode 100644 index 6038f7ae..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/3-configuring-a-data-source-in-the-linux-os.md +++ /dev/null @@ -1,328 +0,0 @@ ---- -title: Configuring a Data Source in the Linux OS -summary: Configuring a Data Source in the Linux OS -author: Guo Huan -date: 2021-04-26 ---- - -# Configuring a Data Source in the Linux OS - -The ODBC driver (psqlodbcw.so) provided by MogDB can be used after it has been configured in a data source. To configure a data source, you must configure the **odbc.ini** and **odbcinst.ini** files on the server. The two files are generated during the unixODBC compilation and installation, and are saved in the **/usr/local/etc** directory by default. - -**Procedure** - -1. Obtain the source code package of unixODBC by following link: - - [https://sourceforge.net/projects/unixodbc/files/unixODBC](https://sourceforge.net/projects/unixodbc/files/unixODBC) - - After the download, validate the integrity based on the integrity validation algorithm provided by the community. - -2. Install unixODBC. It does not matter if unixODBC of another version has been installed. - - Currently, unixODBC-2.2.1 is not supported. For example, to install unixODBC-2.3.0, run the commands below. unixODBC is installed in the **/usr/local** directory by default. The data source file is generated in the **/usr/local/etc** directory, and the library file is generated in the **/usr/local/lib** directory. - - ```bash - tar zxvf unixODBC-2.3.0.tar.gz - cd unixODBC-2.3.0 - #Modify the configure file. (If it does not exist, modify the configure.ac file.) Find LIB_VERSION. - #Change the value of LIB_VERSION to 1:0:0 to compile a *.so.1 dynamic library with the same dependency on psqlodbcw.so. - vim configure - - ./configure --enable-gui=no #To perform compilation on an ARM server, add the configure parameter --build=aarch64-unknown-linux-gnu. - make - #The installation may require root permissions. - make install - ``` - -3. Replace the openGauss driver on the client. - - a. Decompress the **openGauss-x.x.x-ODBC.tar.gz** package. After the decompression, the **lib** and **odbc** folders are generated. The **odbc** folder contains another **lib** folder. Copy the **psqlodbca.la**, **psqlodbca.so**, **psqlodbcw.la**, and **psqlodbcw.so** files from **/odbc/lib** to **/usr/local/lib**. - - b. Copy the library in the **lib** directory obtained after decompressing **openGauss-x.x.x-ODBC.tar.gz** to the **/usr/local/lib** directory. - -4. Configure a data source. - - a. Configure the ODBC driver file. - - Add the following content to the **/usr/local/etc/odbcinst.ini** file: - - ``` - [GaussMPP] - Driver64=/usr/local/lib/psqlodbcw.so - setup=/usr/local/lib/psqlodbcw.so - ``` - - For descriptions of the parameters in the **odbcinst.ini** file, see [Table 1](#odbcinst.ini). - - **Table 1** odbcinst.ini configuration parameters - - | **Parameter** | **Description** | **Example** | - | ------------- | ------------------------------------------------------------ | ------------------------------------ | - | [DriverName] | Driver name, corresponding to the driver in DSN. | [DRIVER_N] | - | Driver64 | Path of the dynamic driver library. | Driver64=/usr/local/lib/psqlodbcw.so | - | setup | Driver installation path, which is the same as the dynamic library path in Driver64. | setup=/usr/local/lib/psqlodbcw.so | - - b. Configure the data source file. - - Add the following content to the **/usr/local/etc/odbc.ini** file: - - ```bash - [MPPODBC] - Driver=GaussMPP - Servername=10.145.130.26 (IP address of the server where the database resides) - Database=postgres (database name) - Username=omm (database username) - Password= (user password of the database) - Port=8000 (listening port of the database) - Sslmode=allow - ``` - - For descriptions of the parameters in the **odbc.ini** file, see [Table 2](#odbc.ini). - - **Table 2** odbc.ini configuration parameters - - | **Parameter** | **Description** | **Example** | - | ----------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | - | [DSN] | Data source name | [MPPODBC] | - | Driver | Driver name, corresponding to DriverName in **odbcinst.ini** | Driver=DRIVER_N | - | Servername | Server IP address. Multiple IP addresses can be configured. | Servername=10.145.130.26 | - | Database | Name of the database to connect to | Database=postgres | - | Username | Database username | Username=omm | - | Password | Database user password | Password=
NOTE:
After a user established a connection, the ODBC driver automatically clears their password stored in memory.
However, if this parameter is configured, UnixODBC will cache data source files, which may cause the password to be stored in the memory for a long time.
When you connect to an application, you are advised to send your password through an API instead of writing it in a data source configuration file. After the connection has been established, immediately clear the memory segment where your password is stored. | - | Port | Port number of the server | Port=8000 | - | Sslmode | Whether to enable SSL | Sslmode=allow | - | Debug | If this parameter is set to **1**, the **mylog** file of the PostgreSQL ODBC driver will be printed. The directory generated for storing logs is **/tmp/**. If this parameter is set to **0**, no directory is generated. | Debug=1 | - | UseServerSidePrepare | Whether to enable the extended query protocol for the database.
The value can be **0** or **1**. The default value is **1**, indicating that the extended query protocol is enabled. | UseServerSidePrepare=1 | - | UseBatchProtocol | Whether to enable the batch query protocol. If it is enabled, DML performance can be improved. The value can be **0** or **1**. The default value is **1**.
If this parameter is set to **0**, the batch query protocol is disabled (mainly for communication with earlier database versions).
If this parameter is set to **1** and **support_batch_bind** is set to **on**, the batch query protocol is enabled. | UseBatchProtocol=1 | - | ForExtensionConnector | This parameter specifies whether the savepoint is sent. | ForExtensionConnector=1 | - | UnamedPrepStmtThreshold | Each time **SQLFreeHandle** is invoked to release statements, ODBC sends a **Deallocate plan_name** statement to the server. A large number of such a statement exist in the service. To reduce the number of the statements to be sent, **stmt->plan_name** is left empty so that the database can identify them as unnamed statements. This parameter is added to control the threshold for unnamed statements. | UnamedPrepStmtThreshold=100 | - | ConnectionExtraInfo | Whether to display the driver deployment path and process owner in the **connection_info** parameter mentioned in [connection_info](../../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#connection_info). | ConnectionExtraInfo=1NOTE:The default value is **0**. If this parameter is set to **1**, the ODBC driver reports the driver deployment path and process owner to the database and displays the information in the **connection_info** parameter (see [connection_info](../../../reference-guide/guc-parameters/connection-and-authentication/connection-settings.md#connection_info)). In this case, you can query the information from [PG_STAT_ACTIVITY](../../../reference-guide/system-catalogs-and-system-views/system-views/PG_STAT_ACTIVITY.md). | - | BoolAsChar | If this parameter is set to **Yes**, the Boolean value is mapped to the SQL_CHAR type. If this parameter is not set, the value is mapped to the SQL_BIT type. | BoolsAsChar = Yes | - | RowVersioning | When an attempt is made to update a row of data, setting this parameter to **Yes** allows the application to detect whether the data has been modified by other users. | RowVersioning = Yes | - | ShowSystemTables | By default, the driver regards the system table as a common SQL table. | ShowSystemTables = Yes | - - The valid values of **Sslmode** are as follows: - - **Table 3** Sslmode options - - | sslmode | Whether SSL Encryption Is Enabled | Description | - | ----------- | --------------------------------- | ------------------------------------------------------------ | - | disable | No | SSL connection is not enabled. | - | allow | Possible | If the database server requires SSL connection, SSL connection can be enabled. However, authenticity of the database server will not be verified. | - | prefer | Possible | If the database supports SSL connection, SSL connection is recommended. However, authenticity of the database server will not be verified. | - | require | Yes | SSL connection is required and data is encrypted. However, authenticity of the database server will not be verified. | - | verify-ca | Yes | SSL connection is required and whether the database has a trusted certificate will be verified. | - | verify-full | Yes | SSL connection is required. In addition to the check scope specified by **verify-ca**, the system checks whether the name of the host where the database resides is the same as that in the certificate. MogDB does not support this mode. | - -5. (Optional) Generate an SSL certificate. For details, see [Generating Certificates](../../../security-guide/security/1-client-access-authentication.md#Generating Certificates).This step and step 6 need to be performed when the server and the client are connected via ssl. It can be skipped in case of non-ssl connection. - -6. (Optional) Replace an SSL certificate. For details, see [Replacing Certificates](../../../security-guide/security/1-client-access-authentication.md#Replacing Certificates). - -7. Enable SSL mode. - - Declare the following environment variables and ensure that the permission for the **client.key\*** series files is set to **600**. - - ``` - Go back to the root directory, create the .postgresql directory, and save root.crt, client.crt, client.key, client.key.cipher, client.key.rand, client.req, server.crt, server.key, server.key.cipher, server.key.rand, and server.req to the .postgresql directory. - In the Unix OS, server.crt and server.key must deny the access from the external or any group. Run the following command to set this permission: - chmod 0600 server.key - Copy the certificate files whose names start with root.crt and server to the install/data directory of the database (the directory is the same as that of the postgresql.conf file). - Modify the postgresql.conf file. - ssl = on - ssl_cert_file = 'server.crt' - ssl_key_file = 'server.key' - ssl_ca_file = 'root.crt' - After modifying the parameters, restart the database. - Set the sslmode parameter to require or verify-ca in the odbc.ini file. - ``` - -8. Configure the database server. - - a. Log in as the OS user **omm** to the primary node of the database. - - b. Run the following command to add NIC IP addresses or host names, with values separated by commas (,). The NICs and hosts are used to provide external services. In the following command, *NodeName* specifies the name of the current node. - - ``` - gs_guc reload -N NodeName -I all -c "listen_addresses='localhost,192.168.0.100,10.11.12.13'" - ``` - - If direct routing of LVS is used, add the virtual IP address (10.11.12.13) of LVS to the server listening list. - - You can also set **listen_addresses** to **\*** or **0.0.0.0** to listen to all NICs, but this incurs security risks and is not recommended. - - c. Run the following command to add an authentication rule to the configuration file of the primary database node. In this example, the IP address (10.11.12.13) of the client is the remote host IP address. - - ``` - gs_guc reload -N all -I all -h "host all jack 10.11.12.13/32 sha256" - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - **-N all** indicates all hosts in MogDB. - > - **-I all** indicates all instances of the host. - > - **-h** specifies statements that need to be added in the **pg_hba.conf** file. - > - **all** indicates that a client can connect to any database. - > - **jack** indicates the user that accesses the database. - > - **10.11.12.13/32** indicates hosts whose IP address is 10.11.12.13 can be connected. Configure the parameter based on your network conditions. **32** indicates that there are 32 bits whose value is 1 in the subnet mask. That is, the subnet mask is 255.255.255.255. - > - **sha256** indicates that the password of user **jack** is encrypted using the SHA-256 algorithm. - - If the ODBC client and the primary database node to connect are deployed on the same machine, you can use the local trust authentication mode. Run the following command: - - ``` - local all all trust - ``` - - If the ODBC client and the primary database node to connect are deployed on different machines, use the SHA-256 authentication mode. Run the following command: - - ``` - host all all xxx.xxx.xxx.xxx/32 sha256 - ``` - - d. Restart MogDB. - - ``` - gs_om -t stop - gs_om -t start - ``` - -9. Configure environment variables on the client. - - ``` - vim ~/.bashrc - ``` - - Add the following information to the configuration file: - - ```bash - export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH - export ODBCSYSINI=/usr/local/etc - export ODBCINI=/usr/local/etc/odbc.ini - ``` - -10. Run the following command to validate the addition: - - ``` - source ~/.bashrc - ``` - -**Verifying the Data Source Configuration** - -Run the **./isql-v** **MPPODBC** command (**MPPODBC** is the data source name). - -- If the following information is displayed, the configuration is correct and the connection succeeds. - - ``` - +---------------------------------------+ - | Connected! | - | | - | sql-statement | - | help [tablename] | - | quit | - | | - +---------------------------------------+ - SQL> - ``` - -- If error information is displayed, the configuration is incorrect. Check the configuration. - -**FAQs** - -- [UnixODBC]Can't open lib 'xxx/xxx/psqlodbcw.so' : file not found. - - Possible causes: - - - The path configured in the **odbcinst.ini** file is incorrect. - - Run **ls** to check the path in the error information, and ensure that the **psqlodbcw.so** file exists and you have execute permissions on it. - - - The dependent library of **psqlodbcw.so** does not exist or is not in system environment variables. - - Run **ldd** to check the path in the error information. If **libodbc.so.1** or other UnixODBC libraries do not exist, configure UnixODBC again following the procedure provided in this section, and add the **lib** directory under its installation directory to **LD_LIBRARY_PATH**. If other libraries do not exist, add the **lib** directory under the ODBC driver package to **LD_LIBRARY_PATH**. - -- [UnixODBC]connect to server failed: no such file or directory - - Possible causes: - - - An incorrect or unreachable database IP address or port number was configured. - - Check the **Servername** and **Port** configuration items in data sources. - - - Server monitoring is improper. - - If **Servername** and **Port** are correctly configured, ensure the proper network adapter and port are monitored by following the database server configurations in the procedure in this section. - - - Firewall and network gatekeeper settings are improper. - - Check firewall settings, and ensure that the database communication port is trusted. - - Check to ensure network gatekeeper settings are proper (if any). - -- [unixODBC]The password-stored method is not supported. - - Possible causes: - - The **sslmode** configuration item is not configured in the data sources. - - Solution: - - Set the configuration item to **allow** or a higher level. For details, see [Table 3](#sslmode). - -- Server common name "xxxx" does not match host name "xxxxx" - - Possible causes: - - When **verify-full** is used for SSL encryption, the driver checks whether the host name in certificates is the same as the actual one. - - Solution: - - To solve this problem, use **verify-ca** to stop checking host names, or generate a set of CA certificates containing the actual host names. - -- Driver's SQLAllocHandle on SQL_HANDLE_DBC failed - - Possible causes: - - The executable file (such as the **isql** tool of unixODBC) and the database driver (**psqlodbcw.so**) depend on different library versions of ODBC, such as **libodbc.so.1** and **libodbc.so.2**. You can verify this problem by using the following method: - - ``` - ldd `which isql` | grep odbc - ldd psqlodbcw.so | grep odbc - ``` - - If the suffix digits of the outputs **libodbc.so** are different or indicate different physical disk files, this problem exists. Both **isql** and **psqlodbcw.so** load **libodbc.so**. If different physical files are loaded, different ODBC libraries with the same function list conflict with each other in a visible domain. As a result, the database driver cannot be loaded. - - Solution: - - Uninstall the unnecessary unixODBC, such as libodbc.so.2, and create a soft link with the same name and the .so.2 suffix for the remaining libodbc.so.1 library. - -- FATAL: Forbid remote connection with trust method! - - For security purposes, the primary database node forbids access from other nodes in MogDB without authentication. - - To access the primary database node from inside MogDB, deploy the ODBC program on the host where the primary database node is located and set the server address to **127.0.0.1**. It is recommended that the service system be deployed outside MogDB. If it is deployed inside, database performance may be affected. - -- [unixODBC]Invalid attribute value - - This problem occurs when you use SQL on other MogDB. The possible cause is that the unixODBC version is not the recommended one. You are advised to run the **odbcinst -version** command to check the unixODBC version. - -- authentication method 10 not supported. - - If this error occurs on an open-source client, the cause may be: - - The database stores only the SHA-256 hash of the password, but the open-source client supports only MD5 hashes. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - The database stores the hashes of user passwords instead of actual passwords. - > - If a password is updated or a user is created, both types of hashes will be stored, compatible with open-source authentication protocols. - > - An MD5 hash can only be generated using the original password, but the password cannot be obtained by reversing its SHA-256 hash. Passwords in the old version will only have SHA-256 hashes and not support MD5 authentication. - > - The MD5 encryption algorithm has lower security and poses security risks. Therefore, you are advised to use a more secure encryption algorithm. - - To solve this problem, you can update the user password (see [ALTER USER](../../../reference-guide/sql-syntax/ALTER-USER.md)) or create a user (see [CREATE USER](../../../reference-guide/sql-syntax/CREATE-USER.md)) having the same permissions as the faulty user. - -- unsupported frontend protocol 3.51: server supports 1.0 to 3.0 - - The database version is too early or the database is an open-source database. Use the driver of the required version to connect to the database. - -- FATAL: GSS authentication method is not allowed because XXXX user password is not disabled. - - In **pg_hba.conf** of the target primary database node, the authentication mode is set to **gss** for authenticating the IP address of the current client. However, this authentication algorithm cannot authenticate clients. Change the authentication algorithm to **sha256** and try again. For details, see [8](#8). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/4-development-process.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/4-development-process.md deleted file mode 100644 index 40fee82e..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/4-development-process.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Development Process -summary: Development Process -author: Guo Huan -date: 2021-04-26 ---- - -# Development Process - -**Figure 1** ODBC-based application development process - -![odbc-based-application-development-process](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/development-process-4.png) - -**APIs Involved in the Development Process** - -**Table 1** API description - -| **Function** | **API** | -| :-------------------------------------------------------- | :----------------------------------------------------------- | -| Allocate a handle | SQLAllocHandle is a generic function for allocating a handle. It can replace the following functions:
- SQLAllocEnv: allocate an environment handle
- SQLAllocConnect: allocate a connection handle
- SQLAllocStmt: allocate a statement handle | -| Set environment attributes | SQLSetEnvAttr | -| Set connection attributes | SQLSetConnectAttr | -| Set statement attributes | SQLSetStmtAttr | -| Connect to a data source | SQLConnect | -| Bind a buffer to a column in the result set | SQLBindCol | -| Bind the parameter marker of an SQL statement to a buffer | SQLBindParameter | -| Return the error message of the last operation | SQLGetDiagRec | -| Prepare an SQL statement for execution | SQLPrepare | -| Run a prepared SQL statement | SQLExecute | -| Run an SQL statement directly | SQLExecDirect | -| Fetch the next row (or rows) from the result set | SQLFetch | -| Return data in a column of the result set | SQLGetData | -| Get the column information from a result set | SQLColAttribute | -| Disconnect from a data source | SQLDisconnect | -| Release a handle | SQLFreeHandle is a generic function for releasing a handle. It can replace the following functions:
- SQLFreeEnv: release an environment handle
- SQLFreeConnect: release a connection handle
- SQLFreeStmt: release a statement handle | - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> If an execution request (not in a transaction block) received in the database contains multiple statements, the request is packed into a transaction. If one of the statements fails, the entire request will be rolled back. -> -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif)**NOTICE:** -> -> ODBC is the central layer of the application program and the database. It is responsible for transmitting the SQL instructions issued by the application program to the database, and does not parse the SQL syntax by itself. Therefore, when an SQL statement with confidential information (such as a plaintext password) is written in an application, the confidential information will be exposed in the driver log. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/5-example-common-functions-and-batch-binding.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/5-example-common-functions-and-batch-binding.md deleted file mode 100644 index 8709581c..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/5-example-common-functions-and-batch-binding.md +++ /dev/null @@ -1,440 +0,0 @@ ---- -title: Common Functions and Batch Binding -summary: Common Functions and Batch Binding -author: Guo Huan -date: 2021-04-26 ---- - -# Example: Common Functions and Batch Binding - -## Code for Common Functions - -``` -//The following example shows how to obtain data from MogDB through the ODBC interface. -// DBtest.c (compile with: libodbc.so) -#include -#include -#include -#ifdef WIN32 -#include -#endif -SQLHENV V_OD_Env; // Handle ODBC environment -SQLHSTMT V_OD_hstmt; // Handle statement -SQLHDBC V_OD_hdbc; // Handle connection -char typename[100]; -SQLINTEGER value = 100; -SQLINTEGER V_OD_erg,V_OD_buffer,V_OD_err,V_OD_id; -int main(int argc,char *argv[]) -{ - // 1. Allocate an environment handle. - V_OD_erg = SQLAllocHandle(SQL_HANDLE_ENV,SQL_NULL_HANDLE,&V_OD_Env); - if ((V_OD_erg != SQL_SUCCESS) && (V_OD_erg != SQL_SUCCESS_WITH_INFO)) - { - printf("Error AllocHandle\n"); - exit(0); - } - // 2. Set environment attributes (version information). - SQLSetEnvAttr(V_OD_Env, SQL_ATTR_ODBC_VERSION, (void*)SQL_OV_ODBC3, 0); - // 3. Allocate a connection handle. - V_OD_erg = SQLAllocHandle(SQL_HANDLE_DBC, V_OD_Env, &V_OD_hdbc); - if ((V_OD_erg != SQL_SUCCESS) && (V_OD_erg != SQL_SUCCESS_WITH_INFO)) - { - SQLFreeHandle(SQL_HANDLE_ENV, V_OD_Env); - exit(0); - } - // 4. Set connection attributes. - SQLSetConnectAttr(V_OD_hdbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_ON, 0); - // 5. Connect to the data source. userName and password indicate the username and password for connecting to the database. Set them as needed. - // If the username and password have been set in the odbc.ini file, you do not need to set userName or password here, retaining "" for them. However, you are not advised to do so because the username and password will be disclosed if the permission for odbc.ini is abused. - V_OD_erg = SQLConnect(V_OD_hdbc, (SQLCHAR*) "MPPODBC", SQL_NTS, - (SQLCHAR*) "userName", SQL_NTS, (SQLCHAR*) "password", SQL_NTS); - if ((V_OD_erg != SQL_SUCCESS) && (V_OD_erg != SQL_SUCCESS_WITH_INFO)) - { - printf("Error SQLConnect %d\n",V_OD_erg); - SQLFreeHandle(SQL_HANDLE_ENV, V_OD_Env); - exit(0); - } - printf("Connected !\n"); - // 6. Set statement attributes. - SQLSetStmtAttr(V_OD_hstmt,SQL_ATTR_QUERY_TIMEOUT,(SQLPOINTER *)3,0); - // 7. Allocate a statement handle. - SQLAllocHandle(SQL_HANDLE_STMT, V_OD_hdbc, &V_OD_hstmt); - // 8. Run SQL statements. - SQLExecDirect(V_OD_hstmt,"drop table IF EXISTS customer_t1",SQL_NTS); - SQLExecDirect(V_OD_hstmt,"CREATE TABLE customer_t1(c_customer_sk INTEGER, c_customer_name VARCHAR(32));",SQL_NTS); - SQLExecDirect(V_OD_hstmt,"insert into customer_t1 values(25,li)",SQL_NTS); - // 9. Prepare for execution. - SQLPrepare(V_OD_hstmt,"insert into customer_t1 values(?)",SQL_NTS); - // 10. Bind parameters. - SQLBindParameter(V_OD_hstmt,1,SQL_PARAM_INPUT,SQL_C_SLONG,SQL_INTEGER,0,0, - &value,0,NULL); - // 11. Run prepared statements. - SQLExecute(V_OD_hstmt); - SQLExecDirect(V_OD_hstmt,"select id from testtable",SQL_NTS); - // 12. Obtain attributes of a specific column in the result set. - SQLColAttribute(V_OD_hstmt,1,SQL_DESC_TYPE,typename,100,NULL,NULL); - printf("SQLColAtrribute %s\n",typename); - // 13. Bind the result set. - SQLBindCol(V_OD_hstmt,1,SQL_C_SLONG, (SQLPOINTER)&V_OD_buffer,150, - (SQLLEN *)&V_OD_err); - // 14. Obtain data in the result set by executing SQLFetch. - V_OD_erg=SQLFetch(V_OD_hstmt); - // 15. Obtain and return data by executing SQLGetData. - while(V_OD_erg != SQL_NO_DATA) - { - SQLGetData(V_OD_hstmt,1,SQL_C_SLONG,(SQLPOINTER)&V_OD_id,0,NULL); - printf("SQLGetData ----ID = %d\n",V_OD_id); - V_OD_erg=SQLFetch(V_OD_hstmt); - }; - printf("Done !\n"); - // 16. Disconnect data source connections and release handles. - SQLFreeHandle(SQL_HANDLE_STMT,V_OD_hstmt); - SQLDisconnect(V_OD_hdbc); - SQLFreeHandle(SQL_HANDLE_DBC,V_OD_hdbc); - SQLFreeHandle(SQL_HANDLE_ENV, V_OD_Env); - return(0); - } -``` - -## Code for Batch Processing - -``` -/********************************************************************** -* Enable UseBatchProtocol in the data source and set the database parameter support_batch_bind -* to on. -* The CHECK_ERROR command is used to check and print error information. -* This example is used to interactively obtain the DSN, data volume to be processed, and volume of ignored data from users, and insert required data into the test_odbc_batch_insert table. -***********************************************************************/ -#include -#include -#include -#include -#include - -void Exec(SQLHDBC hdbc, SQLCHAR* sql) -{ - SQLRETURN retcode; // Return status - SQLHSTMT hstmt = SQL_NULL_HSTMT; // Statement handle - SQLCHAR loginfo[2048]; - - // Allocate Statement Handle - retcode = SQLAllocHandle(SQL_HANDLE_STMT, hdbc, &hstmt); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLAllocHandle(SQL_HANDLE_STMT) failed"); - return; - } - - // Prepare Statement - retcode = SQLPrepare(hstmt, (SQLCHAR*) sql, SQL_NTS); - sprintf((char*)loginfo, "SQLPrepare log: %s", (char*)sql); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLPrepare(hstmt, (SQLCHAR*) sql, SQL_NTS) failed"); - return; - } - - // Execute Statement - retcode = SQLExecute(hstmt); - sprintf((char*)loginfo, "SQLExecute stmt log: %s", (char*)sql); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLExecute(hstmt) failed"); - return; - } - // Free Handle - retcode = SQLFreeHandle(SQL_HANDLE_STMT, hstmt); - sprintf((char*)loginfo, "SQLFreeHandle stmt log: %s", (char*)sql); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLFreeHandle(SQL_HANDLE_STMT, hstmt) failed"); - return; - } -} - -int main () -{ - SQLHENV henv = SQL_NULL_HENV; - SQLHDBC hdbc = SQL_NULL_HDBC; - int batchCount = 1000; // Amount of data that is bound in batches - SQLLEN rowsCount = 0; - int ignoreCount = 0; // Amount of data that is not imported to the database among the data that is bound in batches - - SQLRETURN retcode; - SQLCHAR dsn[1024] = {'\0'}; - SQLCHAR loginfo[2048]; - - do - { - if (ignoreCount > batchCount) - { - printf("ignoreCount(%d) should be less than batchCount(%d)\n", ignoreCount, batchCount); - } - }while(ignoreCount > batchCount); - - retcode = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &henv); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLAllocHandle failed"); - goto exit; - } - - // Set ODBC Verion - retcode = SQLSetEnvAttr(henv, SQL_ATTR_ODBC_VERSION, - (SQLPOINTER*)SQL_OV_ODBC3, 0); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLSetEnvAttr failed"); - goto exit; - } - - // Allocate Connection - retcode = SQLAllocHandle(SQL_HANDLE_DBC, henv, &hdbc); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLAllocHandle failed"); - goto exit; - } - - - // Set Login Timeout - retcode = SQLSetConnectAttr(hdbc, SQL_LOGIN_TIMEOUT, (SQLPOINTER)5, 0); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLSetConnectAttr failed"); - goto exit; - } - - - // Set Auto Commit - retcode = SQLSetConnectAttr(hdbc, SQL_ATTR_AUTOCOMMIT, - (SQLPOINTER)(1), 0); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLSetConnectAttr failed"); - goto exit; - } - - - // Connect to DSN - // gaussdb indicates the name of the data source used by users. - sprintf(loginfo, "SQLConnect(DSN:%s)", dsn); - retcode = SQLConnect(hdbc, (SQLCHAR*) "gaussdb", SQL_NTS, - (SQLCHAR*) NULL, 0, NULL, 0); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLConnect failed"); - goto exit; - } - - // init table info. - Exec(hdbc, "drop table if exists test_odbc_batch_insert"); - Exec(hdbc, "create table test_odbc_batch_insert(id int primary key, col varchar2(50))"); - - // The following code constructs the data to be inserted based on the data volume entered by users: - { - SQLRETURN retcode; - SQLHSTMT hstmtinesrt = SQL_NULL_HSTMT; - int i; - SQLCHAR *sql = NULL; - SQLINTEGER *ids = NULL; - SQLCHAR *cols = NULL; - SQLLEN *bufLenIds = NULL; - SQLLEN *bufLenCols = NULL; - SQLUSMALLINT *operptr = NULL; - SQLUSMALLINT *statusptr = NULL; - SQLULEN process = 0; - - // Data is constructed by column. Each column is stored continuously. - ids = (SQLINTEGER*)malloc(sizeof(ids[0]) * batchCount); - cols = (SQLCHAR*)malloc(sizeof(cols[0]) * batchCount * 50); - // Data size in each row for a column - bufLenIds = (SQLLEN*)malloc(sizeof(bufLenIds[0]) * batchCount); - bufLenCols = (SQLLEN*)malloc(sizeof(bufLenCols[0]) * batchCount); - // Whether this row needs to be processed. The value is SQL_PARAM_IGNORE or SQL_PARAM_PROCEED. - operptr = (SQLUSMALLINT*)malloc(sizeof(operptr[0]) * batchCount); - memset(operptr, 0, sizeof(operptr[0]) * batchCount); - // Processing result of the row - // Note: In the database, a statement belongs to one transaction. Therefore, data is processed as a unit. Either all data is inserted successfully or all data fails to be inserted. - statusptr = (SQLUSMALLINT*)malloc(sizeof(statusptr[0]) * batchCount); - memset(statusptr, 88, sizeof(statusptr[0]) * batchCount); - - if (NULL == ids || NULL == cols || NULL == bufLenCols || NULL == bufLenIds) - { - fprintf(stderr, "FAILED:\tmalloc data memory failed\n"); - goto exit; - } - - for (int i = 0; i < batchCount; i++) - { - ids[i] = i; - sprintf(cols + 50 * i, "column test value %d", i); - bufLenIds[i] = sizeof(ids[i]); - bufLenCols[i] = strlen(cols + 50 * i); - operptr[i] = (i < ignoreCount) ? SQL_PARAM_IGNORE : SQL_PARAM_PROCEED; - } - - // Allocate Statement Handle - retcode = SQLAllocHandle(SQL_HANDLE_STMT, hdbc, &hstmtinesrt); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLAllocHandle failed"); - goto exit; - } - - // Prepare Statement - sql = (SQLCHAR*)"insert into test_odbc_batch_insert values(?, ?)"; - retcode = SQLPrepare(hstmtinesrt, (SQLCHAR*) sql, SQL_NTS); - sprintf((char*)loginfo, "SQLPrepare log: %s", (char*)sql); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLPrepare failed"); - goto exit; - } - - retcode = SQLSetStmtAttr(hstmtinesrt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)batchCount, sizeof(batchCount)); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLSetStmtAttr failed"); - goto exit; - } - - retcode = SQLBindParameter(hstmtinesrt, 1, SQL_PARAM_INPUT, SQL_C_SLONG, SQL_INTEGER, sizeof(ids[0]), 0,&(ids[0]), 0, bufLenIds); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLBindParameter failed"); - goto exit; - } - - retcode = SQLBindParameter(hstmtinesrt, 2, SQL_PARAM_INPUT, SQL_C_CHAR, SQL_CHAR, 50, 50, cols, 50, bufLenCols); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLBindParameter failed"); - goto exit; - } - - retcode = SQLSetStmtAttr(hstmtinesrt, SQL_ATTR_PARAMS_PROCESSED_PTR, (SQLPOINTER)&process, sizeof(process)); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLSetStmtAttr failed"); - goto exit; - } - - retcode = SQLSetStmtAttr(hstmtinesrt, SQL_ATTR_PARAM_STATUS_PTR, (SQLPOINTER)statusptr, sizeof(statusptr[0]) * batchCount); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLSetStmtAttr failed"); - goto exit; - } - - retcode = SQLSetStmtAttr(hstmtinesrt, SQL_ATTR_PARAM_OPERATION_PTR, (SQLPOINTER)operptr, sizeof(operptr[0]) * batchCount); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLSetStmtAttr failed"); - goto exit; - } - - retcode = SQLExecute(hstmtinesrt); - sprintf((char*)loginfo, "SQLExecute stmt log: %s", (char*)sql); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLExecute(hstmtinesrt) failed"); - goto exit; - - retcode = SQLRowCount(hstmtinesrt, &rowsCount); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLRowCount failed"); - goto exit; - } - - if (rowsCount != (batchCount - ignoreCount)) - { - sprintf(loginfo, "(batchCount - ignoreCount)(%d) != rowsCount(%d)", (batchCount - ignoreCount), rowsCount); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLExecute failed"); - goto exit; - } - } - else - { - sprintf(loginfo, "(batchCount - ignoreCount)(%d) == rowsCount(%d)", (batchCount - ignoreCount), rowsCount); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLExecute failed"); - goto exit; - } - } - - // check row number returned - if (rowsCount != process) - { - sprintf(loginfo, "process(%d) != rowsCount(%d)", process, rowsCount); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLExecute failed"); - goto exit; - } - } - else - { - sprintf(loginfo, "process(%d) == rowsCount(%d)", process, rowsCount); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLExecute failed"); - goto exit; - } - } - - for (int i = 0; i < batchCount; i++) - { - if (i < ignoreCount) - { - if (statusptr[i] != SQL_PARAM_UNUSED) - { - sprintf(loginfo, "statusptr[%d](%d) != SQL_PARAM_UNUSED", i, statusptr[i]); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLExecute failed"); - goto exit; - } - } - } - else if (statusptr[i] != SQL_PARAM_SUCCESS) - { - sprintf(loginfo, "statusptr[%d](%d) != SQL_PARAM_SUCCESS", i, statusptr[i]); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLExecute failed"); - goto exit; - } - } - } - - retcode = SQLFreeHandle(SQL_HANDLE_STMT, hstmtinesrt); - sprintf((char*)loginfo, "SQLFreeHandle hstmtinesrt"); - - if (!SQL_SUCCEEDED(retcode)) { - printf("SQLFreeHandle failed"); - goto exit; - } - } - - -exit: - (void) printf ("\nComplete.\n"); - - // Connection - if (hdbc != SQL_NULL_HDBC) { - SQLDisconnect(hdbc); - SQLFreeHandle(SQL_HANDLE_DBC, hdbc); - } - - // Environment - if (henv != SQL_NULL_HENV) - SQLFreeHandle(SQL_HANDLE_ENV, henv); - - return 0; -} -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/5.1-typical-application-scenarios-and-configurations.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/5.1-typical-application-scenarios-and-configurations.md deleted file mode 100644 index b1ed0a77..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/5.1-typical-application-scenarios-and-configurations.md +++ /dev/null @@ -1,496 +0,0 @@ ---- -title: Typical Application Scenarios and Configurations -summary: Typical Application Scenarios and Configurations -author: Zhang Cuiping -date: 2021-10-11 ---- - -# Typical Application Scenarios and Configurations - -## Log Diagnosis Scenario - -ODBC logs are classified into unixODBC driver manager logs and psqlODBC driver logs. The former is used to trace whether the application API is successfully executed, and the latter is used to locate problems based on DFX logs generated during underlying implementation. - -The unixODBC log needs to be configured in the **odbcinst.ini** file: - -```bash -[ODBC] -Trace=Yes -TraceFile=/path/to/odbctrace.log - -[GaussMPP] -Driver64=/usr/local/lib/psqlodbcw.so -setup=/usr/local/lib/psqlodbcw.so -``` - -You only need to add the following information to the **odbc.ini** file: - -```bash -[mogdb] -Driver=GaussMPP -Servername=10.10.0.13 (database server IP address) -... -Debug=1 (Enable the debug log function of the driver.) -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The unixODBC logs are generated in the path configured by **TraceFile**. The psqlODBC generates the **mylog_***xxx***.log** file in the **/tmp/** directory. - -## High Performance - -If a large amount of data needs to be inserted, you are advised to perform the following operations: - -- You need to set **UseBatchProtocol** to **1** in the **odbc.ini** file and **support_batch_bind** to **on** in the database. -- The ODBC program binding type must be the same as that in the database. -- The character set of the client is the same as that of the database. -- The transaction is committed manually. - -**odbc.ini** configuration file: - -```bash -[mogdb] -Driver=GaussMPP -Servername=10.10.0.13 (database server IP address) -... -UseBatchProtocol=1 (enabled by default) -ConnSettings=set client_encoding=UTF8 (Set the character code on the client to be the same as that on the server.) -``` - -Binding type case: - -```c -#include -#include -#include -#include -#include -#include - -#define MESSAGE_BUFFER_LEN 128 -SQLHANDLE h_env = NULL; -SQLHANDLE h_conn = NULL; -SQLHANDLE h_stmt = NULL; -void print_error() -{ - SQLCHAR Sqlstate[SQL_SQLSTATE_SIZE+1]; - SQLINTEGER NativeError; - SQLCHAR MessageText[MESSAGE_BUFFER_LEN]; - SQLSMALLINT TextLength; - SQLRETURN ret = SQL_ERROR; - - ret = SQLGetDiagRec(SQL_HANDLE_STMT, h_stmt, 1, Sqlstate, &NativeError, MessageText, MESSAGE_BUFFER_LEN, &TextLength); - if ( SQL_SUCCESS == ret) - { - printf("\n STMT ERROR-%05d %s", NativeError, MessageText); - return; - } - - ret = SQLGetDiagRec(SQL_HANDLE_DBC, h_conn, 1, Sqlstate, &NativeError, MessageText, MESSAGE_BUFFER_LEN, &TextLength); - if ( SQL_SUCCESS == ret) - { - printf("\n CONN ERROR-%05d %s", NativeError, MessageText); - return; - } - - ret = SQLGetDiagRec(SQL_HANDLE_ENV, h_env, 1, Sqlstate, &NativeError, MessageText, MESSAGE_BUFFER_LEN, &TextLength); - if ( SQL_SUCCESS == ret) - { - printf("\n ENV ERROR-%05d %s", NativeError, MessageText); - return; - } - - return; -} - -/* Expect the function to return SQL_SUCCESS. */ -#define RETURN_IF_NOT_SUCCESS(func) \ -{\ - SQLRETURN ret_value = (func);\ - if (SQL_SUCCESS != ret_value)\ - {\ - print_error();\ - printf("\n failed line = %u: expect SQL_SUCCESS, but ret = %d", __LINE__, ret_value);\ - return SQL_ERROR; \ - }\ -} - -/* Expect the function to return SQL_SUCCESS. */ -#define RETURN_IF_NOT_SUCCESS_I(i, func) \ -{\ - SQLRETURN ret_value = (func);\ - if (SQL_SUCCESS != ret_value)\ - {\ - print_error();\ - printf("\n failed line = %u (i=%d): : expect SQL_SUCCESS, but ret = %d", __LINE__, (i), ret_value);\ - return SQL_ERROR; \ - }\ -} - -/* Expect the function to return SQL_SUCCESS_WITH_INFO. */ -#define RETURN_IF_NOT_SUCCESS_INFO(func) \ -{\ - SQLRETURN ret_value = (func);\ - if (SQL_SUCCESS_WITH_INFO != ret_value)\ - {\ - print_error();\ - printf("\n failed line = %u: expect SQL_SUCCESS_WITH_INFO, but ret = %d", __LINE__, ret_value);\ - return SQL_ERROR; \ - }\ -} - -/* Expect the values are the same. */ -#define RETURN_IF_NOT(expect, value) \ -if ((expect) != (value))\ -{\ - printf("\n failed line = %u: expect = %u, but value = %u", __LINE__, (expect), (value)); \ - return SQL_ERROR;\ -} - -/* Expect the character strings are the same. */ -#define RETURN_IF_NOT_STRCMP_I(i, expect, value) \ -if (( NULL == (expect) ) || (NULL == (value)))\ -{\ - printf("\n failed line = %u (i=%u): input NULL pointer !", __LINE__, (i)); \ - return SQL_ERROR; \ -}\ -else if (0 != strcmp((expect), (value)))\ -{\ - printf("\n failed line = %u (i=%u): expect = %s, but value = %s", __LINE__, (i), (expect), (value)); \ - return SQL_ERROR;\ -} - - -// prepare + execute SQL statement -int execute_cmd(SQLCHAR *sql) -{ - if ( NULL == sql ) - { - return SQL_ERROR; - } - - if ( SQL_SUCCESS != SQLPrepare(h_stmt, sql, SQL_NTS)) - { - return SQL_ERROR; - } - - if ( SQL_SUCCESS != SQLExecute(h_stmt)) - { - return SQL_ERROR; - } - - return SQL_SUCCESS; -} -// execute + commit handle -int commit_exec() -{ - if ( SQL_SUCCESS != SQLExecute(h_stmt)) - { - return SQL_ERROR; - } - - // Manual committing - if ( SQL_SUCCESS != SQLEndTran(SQL_HANDLE_DBC, h_conn, SQL_COMMIT)) - { - return SQL_ERROR; - } - - return SQL_SUCCESS; -} - -int begin_unit_test() -{ - SQLINTEGER ret; - - /* Allocate an environment handle. */ - ret = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &h_env); - if ((SQL_SUCCESS != ret) && (SQL_SUCCESS_WITH_INFO != ret)) - { - printf("\n begin_unit_test::SQLAllocHandle SQL_HANDLE_ENV failed ! ret = %d", ret); - return SQL_ERROR; - } - - /* Set the version number before connection. */ - if (SQL_SUCCESS != SQLSetEnvAttr(h_env, SQL_ATTR_ODBC_VERSION, (SQLPOINTER)SQL_OV_ODBC3, 0)) - { - print_error(); - printf("\n begin_unit_test::SQLSetEnvAttr SQL_ATTR_ODBC_VERSION failed ! ret = %d", ret); - SQLFreeHandle(SQL_HANDLE_ENV, h_env); - return SQL_ERROR; - } - - /* Allocate a connection handle. */ - ret = SQLAllocHandle(SQL_HANDLE_DBC, h_env, &h_conn); - if (SQL_SUCCESS != ret) - { - print_error(); - printf("\n begin_unit_test::SQLAllocHandle SQL_HANDLE_DBC failed ! ret = %d", ret); - SQLFreeHandle(SQL_HANDLE_ENV, h_env); - return SQL_ERROR; - } - - /* Establish a connection. */ - ret = SQLConnect(h_conn, (SQLCHAR*) "mogdb", SQL_NTS, - (SQLCHAR*) NULL, 0, NULL, 0); - if (SQL_SUCCESS != ret) - { - print_error(); - printf("\n begin_unit_test::SQLConnect failed ! ret = %d", ret); - SQLFreeHandle(SQL_HANDLE_DBC, h_conn); - SQLFreeHandle(SQL_HANDLE_ENV, h_env); - return SQL_ERROR; - } - - /* Allocate a statement handle. */ - ret = SQLAllocHandle(SQL_HANDLE_STMT, h_conn, &h_stmt); - if (SQL_SUCCESS != ret) - { - print_error(); - printf("\n begin_unit_test::SQLAllocHandle SQL_HANDLE_STMT failed ! ret = %d", ret); - SQLFreeHandle(SQL_HANDLE_DBC, h_conn); - SQLFreeHandle(SQL_HANDLE_ENV, h_env); - return SQL_ERROR; - } - - return SQL_SUCCESS; -} - -void end_unit_test() -{ - /* Release a statement handle. */ - if (NULL != h_stmt) - { - SQLFreeHandle(SQL_HANDLE_STMT, h_stmt); - } - - /* Release a connection handle. */ - if (NULL != h_conn) - { - SQLDisconnect(h_conn); - SQLFreeHandle(SQL_HANDLE_DBC, h_conn); - } - - /* Release an environment handle. */ - if (NULL != h_env) - { - SQLFreeHandle(SQL_HANDLE_ENV, h_env); - } - - return; -} - -int main() -{ - // begin test - if (begin_unit_test() != SQL_SUCCESS) - { - printf("\n begin_test_unit failed."); - return SQL_ERROR; - } - // The handle configuration is the same as that in the preceding case - int i = 0; - SQLCHAR* sql_drop = "drop table if exists test_bindnumber_001"; - SQLCHAR* sql_create = "create table test_bindnumber_001(" - "f4 number, f5 number(10, 2)" - ")"; - SQLCHAR* sql_insert = "insert into test_bindnumber_001 values(?, ?)"; - SQLCHAR* sql_select = "select * from test_bindnumber_001"; - SQLLEN RowCount; - SQL_NUMERIC_STRUCT st_number; - SQLCHAR getValue[2][MESSAGE_BUFFER_LEN]; - - /* Step 1. Create a table. */ - RETURN_IF_NOT_SUCCESS(execute_cmd(sql_drop)); - RETURN_IF_NOT_SUCCESS(execute_cmd(sql_create)); - - /* Step 2.1 Bind parameters using the SQL_NUMERIC_STRUCT structure. */ - RETURN_IF_NOT_SUCCESS(SQLPrepare(h_stmt, sql_insert, SQL_NTS)); - - // First line: 1234.5678 - memset(st_number.val, 0, SQL_MAX_NUMERIC_LEN); - st_number.precision = 8; - st_number.scale = 4; - st_number.sign = 1; - st_number.val[0] = 0x4E; - st_number.val[1] = 0x61; - st_number.val[2] = 0xBC; - - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_NUMERIC, SQL_NUMERIC, sizeof(SQL_NUMERIC_STRUCT), 4, &st_number, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_NUMERIC, SQL_NUMERIC, sizeof(SQL_NUMERIC_STRUCT), 4, &st_number, 0, NULL)); - - // Disable the automatic commit function. - SQLSetConnectAttr(h_conn, SQL_ATTR_AUTOCOMMIT, (SQLPOINTER)SQL_AUTOCOMMIT_OFF, 0); - - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - // Second line: 12345678 - memset(st_number.val, 0, SQL_MAX_NUMERIC_LEN); - st_number.precision = 8; - st_number.scale = 0; - st_number.sign = 1; - st_number.val[0] = 0x4E; - st_number.val[1] = 0x61; - st_number.val[2] = 0xBC; - - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_NUMERIC, SQL_NUMERIC, sizeof(SQL_NUMERIC_STRUCT), 0, &st_number, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_NUMERIC, SQL_NUMERIC, sizeof(SQL_NUMERIC_STRUCT), 0, &st_number, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - // Third line: 12345678 - memset(st_number.val, 0, SQL_MAX_NUMERIC_LEN); - st_number.precision = 0; - st_number.scale = 4; - st_number.sign = 1; - st_number.val[0] = 0x4E; - st_number.val[1] = 0x61; - st_number.val[2] = 0xBC; - - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_NUMERIC, SQL_NUMERIC, sizeof(SQL_NUMERIC_STRUCT), 4, &st_number, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_NUMERIC, SQL_NUMERIC, sizeof(SQL_NUMERIC_STRUCT), 4, &st_number, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - - /* Step 2.2 Bind parameters by using the SQL_C_CHAR character string in the fourth line. */ - RETURN_IF_NOT_SUCCESS(SQLPrepare(h_stmt, sql_insert, SQL_NTS)); - SQLCHAR* szNumber = "1234.5678"; - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_CHAR, SQL_NUMERIC, strlen(szNumber), 0, szNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_CHAR, SQL_NUMERIC, strlen(szNumber), 0, szNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - /* Step 2.3 Bind parameters by using SQL_C_FLOAT in the fifth line. */ - RETURN_IF_NOT_SUCCESS(SQLPrepare(h_stmt, sql_insert, SQL_NTS)); - SQLREAL fNumber = 1234.5678; - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_FLOAT, SQL_NUMERIC, sizeof(fNumber), 4, &fNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_FLOAT, SQL_NUMERIC, sizeof(fNumber), 4, &fNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - /* Step 2.4 Bind parameters by using SQL_C_DOUBLE in the sixth line. */ - RETURN_IF_NOT_SUCCESS(SQLPrepare(h_stmt, sql_insert, SQL_NTS)); - SQLDOUBLE dNumber = 1234.5678; - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_DOUBLE, SQL_NUMERIC, sizeof(dNumber), 4, &dNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_DOUBLE, SQL_NUMERIC, sizeof(dNumber), 4, &dNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - SQLBIGINT bNumber1 = 0xFFFFFFFFFFFFFFFF; - SQLBIGINT bNumber2 = 12345; - - /* Step 2.5 Bind parameters by using SQL_C_SBIGINT in the seventh line. */ - RETURN_IF_NOT_SUCCESS(SQLPrepare(h_stmt, sql_insert, SQL_NTS)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_SBIGINT, SQL_NUMERIC, sizeof(bNumber1), 4, &bNumber1, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_SBIGINT, SQL_NUMERIC, sizeof(bNumber2), 4, &bNumber2, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - /* Step 2.6 Bind parameters by using SQL_C_UBIGINT in the eighth line. */ - RETURN_IF_NOT_SUCCESS(SQLPrepare(h_stmt, sql_insert, SQL_NTS)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_UBIGINT, SQL_NUMERIC, sizeof(bNumber1), 4, &bNumber1, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_UBIGINT, SQL_NUMERIC, sizeof(bNumber2), 4, &bNumber2, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - SQLLEN lNumber1 = 0xFFFFFFFFFFFFFFFF; - SQLLEN lNumber2 = 12345; - - /* Step 2.7 Bind parameters by using SQL_C_LONG in the ninth line. */ - RETURN_IF_NOT_SUCCESS(SQLPrepare(h_stmt, sql_insert, SQL_NTS)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_LONG, SQL_NUMERIC, sizeof(lNumber1), 0, &lNumber1, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_LONG, SQL_NUMERIC, sizeof(lNumber2), 0, &lNumber2, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - /* Step 2.8 Bind parameters by using SQL_C_ULONG in the tenth line. */ - RETURN_IF_NOT_SUCCESS(SQLPrepare(h_stmt, sql_insert, SQL_NTS)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_ULONG, SQL_NUMERIC, sizeof(lNumber1), 0, &lNumber1, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_ULONG, SQL_NUMERIC, sizeof(lNumber2), 0, &lNumber2, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - SQLSMALLINT sNumber = 0xFFFF; - - /* Step 2.9 Bind parameters by using SQL_C_SHORT in the eleventh line. */ - RETURN_IF_NOT_SUCCESS(SQLPrepare(h_stmt, sql_insert, SQL_NTS)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_SHORT, SQL_NUMERIC, sizeof(sNumber), 0, &sNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_SHORT, SQL_NUMERIC, sizeof(sNumber), 0, &sNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - /* Step 2.10 Bind parameters by using SQL_C_USHORT in the twelfth line. */ - RETURN_IF_NOT_SUCCESS(SQLPrepare(h_stmt, sql_insert, SQL_NTS)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_USHORT, SQL_NUMERIC, sizeof(sNumber), 0, &sNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_USHORT, SQL_NUMERIC, sizeof(sNumber), 0, &sNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - SQLCHAR cNumber = 0xFF; - - /* Step 2.11 Bind parameters by using SQL_C_TINYINT in the thirteenth line. */ - RETURN_IF_NOT_SUCCESS(SQLPrepare(h_stmt, sql_insert, SQL_NTS)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_TINYINT, SQL_NUMERIC, sizeof(cNumber), 0, &cNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_TINYINT, SQL_NUMERIC, sizeof(cNumber), 0, &cNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - /* Step 2.12 Bind parameters by using SQL_C_UTINYINT in the fourteenth line.*/ - RETURN_IF_NOT_SUCCESS(SQLPrepare(h_stmt, sql_insert, SQL_NTS)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 1, SQL_PARAM_INPUT, SQL_C_UTINYINT, SQL_NUMERIC, sizeof(cNumber), 0, &cNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(SQLBindParameter(h_stmt, 2, SQL_PARAM_INPUT, SQL_C_UTINYINT, SQL_NUMERIC, sizeof(cNumber), 0, &cNumber, 0, NULL)); - RETURN_IF_NOT_SUCCESS(commit_exec()); - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(1, RowCount); - - /* Use the character string type to unify the expectation. */ - SQLCHAR* expectValue[14][2] = {{"1234.5678", "1234.57"}, - {"12345678", "12345678"}, - {"0", "0"}, - {"1234.5678", "1234.57"}, - {"1234.5677", "1234.57"}, - {"1234.5678", "1234.57"}, - {"-1", "12345"}, - {"18446744073709551615", "12345"}, - {"-1", "12345"}, - {"4294967295", "12345"}, - {"-1", "-1"}, - {"65535", "65535"}, - {"-1", "-1"}, - {"255", "255"}, - }; - - RETURN_IF_NOT_SUCCESS(execute_cmd(sql_select)); - while ( SQL_NO_DATA != SQLFetch(h_stmt)) - { - RETURN_IF_NOT_SUCCESS_I(i, SQLGetData(h_stmt, 1, SQL_C_CHAR, &getValue[0], MESSAGE_BUFFER_LEN, NULL)); - RETURN_IF_NOT_SUCCESS_I(i, SQLGetData(h_stmt, 2, SQL_C_CHAR, &getValue[1], MESSAGE_BUFFER_LEN, NULL)); - - //RETURN_IF_NOT_STRCMP_I(i, expectValue[i][0], getValue[0]); - //RETURN_IF_NOT_STRCMP_I(i, expectValue[i][1], getValue[1]); - i++; - } - - RETURN_IF_NOT_SUCCESS(SQLRowCount(h_stmt, &RowCount)); - RETURN_IF_NOT(i, RowCount); - SQLCloseCursor(h_stmt); - /* Final step. Delete the table and restore the environment. */ - RETURN_IF_NOT_SUCCESS(execute_cmd(sql_drop)); - - end_unit_test(); -} -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** In the preceding example, the number column is defined. When the **SQLBindParameter** API is called, the performance of binding SQL_NUMERIC is higher than that of SQL_LONG. If char is used, the data type needs to be converted when data is inserted to the database server, causing a performance bottleneck. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-0-odbc-overview.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-0-odbc-overview.md deleted file mode 100644 index 2afef879..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-0-odbc-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Description -summary: Description -author: Guo Huan -date: 2021-05-17 ---- - -# Description - -The ODBC interface is a set of API functions provided to users. This chapter describes its common interfaces. For details on other interfaces, see "ODBC Programmer's Reference" at MSDN (). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-1-SQLAllocEnv.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-1-SQLAllocEnv.md deleted file mode 100644 index 69cde828..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-1-SQLAllocEnv.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: SQLAllocEnv -summary: SQLAllocEnv -author: Guo Huan -date: 2021-05-17 ---- - -# SQLAllocEnv - -In ODBC 3.x, SQLAllocEnv (an ODBC 2.x function) was deprecated and replaced by SQLAllocHandle. For details, see [SQLAllocHandle](2-3-SQLAllocHandle.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-10-SQLExecDirect.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-10-SQLExecDirect.md deleted file mode 100644 index b939ecb3..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-10-SQLExecDirect.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: SQLExecDirect -summary: SQLExecDirect -author: Guo Huan -date: 2021-05-17 ---- - -# SQLExecDirect - -## Function - -SQLExecDirect is used to execute a prepared SQL statement specified in this parameter. This is the fastest method for executing only one SQL statement at a time. - -## Prototype - -``` -SQLRETURN SQLExecDirect(SQLHSTMT StatementHandle, - SQLCHAR *StatementText, - SQLINTEGER TextLength); -``` - -## Parameter - -**Table 1** SQLExecDirect parameters - -| **Keyword** | **Parameter Description** | -| :-------------- | :----------------------------------------------------------- | -| StatementHandle | Statement handle, obtained from SQLAllocHandle. | -| StatementText | SQL statement to be executed. One SQL statement can be executed at a time. | -| TextLength | Length of **StatementText**. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_NEED_DATA** indicates that parameters provided before executing the SQL statement are insufficient. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. -- **SQL_STILL_EXECUTING** indicates that the statement is being executed. -- **SQL_NO_DATA** indicates that the SQL statement does not return a result set. - -## Precautions - -If SQLExecDirect returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, with **HandleType** and **Handle** set to **SQL_HANDLE_STMT** and **StatementHandle**, respectively, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-11-SQLExecute.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-11-SQLExecute.md deleted file mode 100644 index 378e20e4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-11-SQLExecute.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: SQLExecute -summary: SQLExecute -author: Guo Huan -date: 2021-05-17 ---- - -# SQLExecute - -## Function - -SQLExecute is used to execute a prepared SQL statement using SQLPrepare. The statement is executed using the current value of any application variables that were bound to parameter markers by SQLBindParameter. - -## Prototype - -``` -SQLRETURN SQLExecute(SQLHSTMT StatementHandle); -``` - -## Parameter - -**Table 1** SQLExecute parameters - -| **Keyword** | **Parameter Description** | -| :-------------- | :------------------------------- | -| StatementHandle | Statement handle to be executed. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_NEED_DATA** indicates that parameters provided before executing the SQL statement are insufficient. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_NO_DATA** indicates that the SQL statement does not return a result set. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. -- **SQL_STILL_EXECUTING** indicates that the statement is being executed. - -## Precautions - -If SQLExecute returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, with **HandleType** and **Handle** set to **SQL_HANDLE_STMT** and **StatementHandle**, respectively, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-12-SQLFetch.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-12-SQLFetch.md deleted file mode 100644 index f32a9a60..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-12-SQLFetch.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: SQLFetch -summary: SQLFetch -author: Guo Huan -date: 2021-05-17 ---- - -# SQLFetch - -## Function - -SQLFetch is used to advance the cursor to the next row of the result set and retrieve any bound columns. - -## Prototype - -``` -CSQLRETURN SQLFetch(SQLHSTMT StatementHandle); -``` - -## Parameter - -**Table 1** SQLFetch parameters - -| **Keyword** | **Parameter Description** | -| :-------------- | :---------------------------------------------- | -| StatementHandle | Statement handle, obtained from SQLAllocHandle. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_NO_DATA** indicates that the SQL statement does not return a result set. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. -- **SQL_STILL_EXECUTING** indicates that the statement is being executed. - -## Precautions - -If SQLFetch returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, with **HandleType** and **Handle** set to **SQL_HANDLE_STMT** and **StatementHandle**, respectively, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-13-SQLFreeStmt.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-13-SQLFreeStmt.md deleted file mode 100644 index b0c549f3..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-13-SQLFreeStmt.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: SQLFreeStmt -summary: SQLFreeStmt -author: Guo Huan -date: 2021-05-17 ---- - -# SQLFreeStmt - -In ODBC 3.x, SQLFreeStmt (an ODBC 2.x function) was deprecated and replaced by SQLFreeHandle. For details, see [SQLFreeHandle](2-15-SQLFreeHandle.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-14-SQLFreeConnect.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-14-SQLFreeConnect.md deleted file mode 100644 index 654509c7..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-14-SQLFreeConnect.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: SQLFreeConnect -summary: SQLFreeConnect -author: Guo Huan -date: 2021-05-17 ---- - -# SQLFreeConnect - -In ODBC 3.x, SQLFreeConnect (an ODBC 2.x function) was deprecated and replaced by SQLFreeHandle. For details, see [SQLFreeHandle](2-15-SQLFreeHandle.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-15-SQLFreeHandle.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-15-SQLFreeHandle.md deleted file mode 100644 index e689e0ce..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-15-SQLFreeHandle.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: SQLFreeHandle -summary: SQLFreeHandle -author: Guo Huan -date: 2021-05-17 ---- - -# SQLFreeHandle - -## Function - -SQLFreeHandle is used to release resources associated with a specific environment, connection, or statement handle. It replaces the ODBC 2.x functions: SQLFreeEnv, SQLFreeConnect, and SQLFreeStmt. - -## Prototype - -``` -SQLRETURN SQLFreeHandle(SQLSMALLINT HandleType, - SQLHANDLE Handle); -``` - -## Parameter - -**Table 1** SQLFreeHandle parameters - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| HandleType | Type of handle to be freed by SQLFreeHandle. The value must be one of the following:
- SQL_HANDLE_ENV
- SQL_HANDLE_DBC
- SQL_HANDLE_STMT
- SQL_HANDLE_DESC
If **HandleType** is not one of the preceding values, SQLFreeHandle returns **SQL_INVALID_HANDLE**. | -| Handle | Name of the handle to be freed. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. - -## Precautions - -If SQLFreeHandle returns **SQL_ERROR**, the handle is still valid. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-16-SQLFreeEnv.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-16-SQLFreeEnv.md deleted file mode 100644 index b5f7ce56..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-16-SQLFreeEnv.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: SQLFreeEnv -summary: SQLFreeEnv -author: Guo Huan -date: 2021-05-17 ---- - -# SQLFreeEnv - -In ODBC 3.x, SQLFreeEnv (an ODBC 2.x function) was deprecated and replaced by SQLFreeHandle. For details, see [SQLFreeHandle](2-15-SQLFreeHandle.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-17-SQLPrepare.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-17-SQLPrepare.md deleted file mode 100644 index b2b6f1a0..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-17-SQLPrepare.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: SQLPrepare -summary: SQLPrepare -author: Guo Huan -date: 2021-05-17 ---- - -# SQLPrepare - -## Function - -SQLPrepare is used to prepare an SQL statement to be executed. - -## Prototype - -``` -SQLRETURN SQLPrepare(SQLHSTMT StatementHandle, - SQLCHAR *StatementText, - SQLINTEGER TextLength); -``` - -## Parameter - -**Table 1** SQLPrepare parameters - -| **Keyword** | **Parameter Description** | -| :-------------- | :--------------------------- | -| StatementHandle | Statement handle. | -| StatementText | SQL text string. | -| TextLength | Length of **StatementText**. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. -- **SQL_STILL_EXECUTING** indicates that the statement is being executed. - -## Precautions - -If SQLPrepare returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, with **HandleType** and **Handle** set to **SQL_HANDLE_STMT** and **StatementHandle**, respectively, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-18-SQLGetData.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-18-SQLGetData.md deleted file mode 100644 index 22db826d..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-18-SQLGetData.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: SQLGetData -summary: SQLGetData -author: Guo Huan -date: 2021-05-17 ---- - -# SQLGetData - -## Function - -SQLGetData is used to retrieve data for a single column in the result set. It can be called for many times to retrieve data of variable lengths. - -## Prototype - -``` -SQLRETURN SQLGetData(SQLHSTMT StatementHandle, - SQLUSMALLINT Col_or_Param_Num, - SQLSMALLINT TargetType, - SQLPOINTER TargetValuePtr, - SQLLEN BufferLength, - SQLLEN *StrLen_or_IndPtr); -``` - -## Parameter - -**Table 1** SQLGetData parameters - -| **Keyword** | **Parameter Description** | -| :--------------- | :----------------------------------------------------------- | -| StatementHandle | Statement handle, obtained from SQLAllocHandle. | -| Col_or_Param_Num | Column number for which the data retrieval is requested. The column number starts with 1 and increases in ascending order. The number of the bookmark column is 0. | -| TargetType | C data type in the TargetValuePtr buffer. If **TargetType** is **SQL_ARD_TYPE**, the driver uses the data type of the **SQL_DESC_CONCISE_TYPE** field in ARD. If **TargetType** is **SQL_C_DEFAULT**, the driver selects a default data type according to the source SQL data type. | -| TargetValuePtr | **Output parameter**: pointer to the pointer that points to the buffer where the data is located. | -| BufferLength | Size of the buffer pointed to by **TargetValuePtr**. | -| StrLen_or_IndPtr | **Output parameter**: pointer to the buffer where the length or identifier value is returned. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_NO_DATA** indicates that the SQL statement does not return a result set. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. -- **SQL_STILL_EXECUTING** indicates that the statement is being executed. - -## Precautions - -If SQLGetData returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, with **HandleType** and **Handle** set to **SQL_HANDLE_STMT** and **StatementHandle**, respectively, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-19-SQLGetDiagRec.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-19-SQLGetDiagRec.md deleted file mode 100644 index 291300b0..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-19-SQLGetDiagRec.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: SQLGetDiagRec -summary: SQLGetDiagRec -author: Guo Huan -date: 2021-05-17 ---- - -# SQLGetDiagRec - -## Function - -SQLGetDiagRec is used to return the current values of multiple fields in a diagnostic record that contains error, warning, and status information. - -## Prototype - -``` -SQLRETURN SQLGetDiagRec(SQLSMALLINT HandleType - SQLHANDLE Handle, - SQLSMALLINT RecNumber, - SQLCHAR *SQLState, - SQLINTEGER *NativeErrorPtr, - SQLCHAR *MessageText, - SQLSMALLINT BufferLength - SQLSMALLINT *TextLengthPtr); -``` - -## Parameter - -**Table 1** SQLGetDiagRec parameters - -| **Keyword** | **Parameter Description** | -| :------------- | :----------------------------------------------------------- | -| HandleType | A handle-type identifier that describes the type of handle for which diagnostics are desired. The value must be one of the following:
- SQL_HANDLE_ENV
- SQL_HANDLE_DBC
- SQL_HANDLE_STMT
- SQL_HANDLE_DESC | -| Handle | A handle for the diagnostic data structure. Its type is indicated by **HandleType**. If **HandleType** is **SQL_HANDLE_ENV**, **Handle** may be a shared or non-shared environment handle. | -| RecNumber | Status record from which the application seeks information. **RecNumber** starts with 1. | -| SQLState | **Output parameter**: pointer to a buffer that saves the 5-character **SQLSTATE** code pertaining to **RecNumber**. | -| NativeErrorPtr | **Output parameter**: pointer to a buffer that saves the native error code. | -| MessageText | Pointer to a buffer that saves text strings of diagnostic information. | -| BufferLength | Length of **MessageText**. | -| TextLengthPtr | **Output parameter**: pointer to the buffer, the total number of bytes in the returned **MessageText**. If the number of bytes available to return is greater than **BufferLength**, then the diagnostics information text in **MessageText** is truncated to **BufferLength** minus the length of the null termination character. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. - -## Precautions - -SQLGetDiagRec does not release diagnostic records for itself. It uses the following return values to report execution results: - -- **SQL_SUCCESS** indicates that the function successfully returns diagnostic information. -- **SQL_SUCCESS_WITH_INFO** indicates that the **MessageText** buffer is too small to hold the requested diagnostic information. No diagnostic records are generated. -- **SQL_INVALID_HANDLE** indicates that the handle indicated by **HandType** and **Handle** is an invalid handle. -- **SQL_ERROR** indicates that **RecNumber** is less than or equal to 0 or that **BufferLength** is smaller than 0. - -If an ODBC function returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec to obtain the **SQLSTATE** value. The possible **SQLSTATE** values are listed as follows: - -**Table 2** SQLSTATE values - -| SQLSATATE | Error | Description | -| :-------- | :------------------------------------ | :----------------------------------------------------------- | -| HY000 | General error. | An error occurred for which there is no specific SQLSTATE. | -| HY001 | Memory allocation error. | The driver is unable to allocate memory required to support execution or completion of the function. | -| HY008 | Operation canceled. | SQLCancel is called to terminate the statement execution, but the StatementHandle function is still called. | -| HY010 | Function sequence error. | The function is called prior to sending data to data parameters or columns being executed. | -| HY013 | Memory management error. | The function fails to be called. The error may be caused by low memory conditions. | -| HYT01 | Connection timeout. | The timeout period expired before the application was able to connect to the data source. | -| IM001 | Function not supported by the driver. | The called function is not supported by the StatementHandle driver. | - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-2-SQLAllocConnect.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-2-SQLAllocConnect.md deleted file mode 100644 index cd0c16f4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-2-SQLAllocConnect.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: SQLAllocConnect -summary: SQLAllocConnect -author: Guo Huan -date: 2021-05-17 ---- - -# SQLAllocConnect - -In ODBC 3.x, SQLAllocConnect (an ODBC 2.x function) was deprecated and replaced by SQLAllocHandle. For details, see [SQLAllocHandle](2-3-SQLAllocHandle.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-20-SQLSetConnectAttr.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-20-SQLSetConnectAttr.md deleted file mode 100644 index 2e5dc4b4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-20-SQLSetConnectAttr.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: SQLSetConnectAttr -summary: SQLSetConnectAttr -author: Guo Huan -date: 2021-05-17 ---- - -# SQLSetConnectAttr - -## Function - -SQLSetConnectAttr is used to set connection attributes. - -## Prototype - -``` -SQLRETURN SQLSetConnectAttr(SQLHDBC ConnectionHandle - SQLINTEGER Attribute, - SQLPOINTER ValuePtr, - SQLINTEGER StringLength); -``` - -## Parameter - -**Table 1** SQLSetConnectAttr parameters - -| **Keyword** | **Parameter Description** | -| :--------------- | :----------------------------------------------------------- | -| ConnectionHandle | Connection handle. | -| Attribute | Attribute to set. | -| ValuePtr | Pointer to the **Attribute** value. **ValuePtr** depends on the **Attribute** value, and can be a 32-bit unsigned integer value or a null-terminated string. If the **ValuePtr** parameter is a driver-specific value, it may be a signed integer. | -| StringLength | If **ValuePtr** points to a string or a binary buffer, **StringLength** is the length of ***ValuePtr**. If **ValuePtr** points to an integer, **StringLength** is ignored. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. - -## Precautions - -If SQLSetConnectAttr returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, with **HandleType** and **Handle** set to **SQL_HANDLE_DBC** and **ConnectionHandle**, respectively, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-21-SQLSetEnvAttr.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-21-SQLSetEnvAttr.md deleted file mode 100644 index 9a30d73e..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-21-SQLSetEnvAttr.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: SQLSetEnvAttr -summary: SQLSetEnvAttr -author: Guo Huan -date: 2021-05-17 ---- - -# SQLSetEnvAttr - -## Function - -SQLSetEnvAttr is used to set environment attributes. - -## Prototype - -``` -SQLRETURN SQLSetEnvAttr(SQLHENV EnvironmentHandle - SQLINTEGER Attribute, - SQLPOINTER ValuePtr, - SQLINTEGER StringLength); -``` - -## Parameter - -**Table 1** SQLSetEnvAttr parameters - -| **Keyword** | **Parameter Description** | -| :---------------- | :----------------------------------------------------------- | -| EnvironmentHandle | Environment handle. | -| Attribute | Environment attribute to be set. The value must be one of the following:
- **SQL_ATTR_ODBC_VERSION**: ODBC version
- **SQL_CONNECTION_POOLING**: connection pool attribute
- **SQL_OUTPUT_NTS**: string type returned by the driver | -| ValuePtr | Pointer to the **Attribute** value. **ValuePtr** depends on the **Attribute** value, and can be a 32-bit integer value or a null-terminated string. | -| StringLength | If **ValuePtr** points to a string or a binary buffer, **StringLength** is the length of ***ValuePtr**. If **ValuePtr** points to an integer, **StringLength** is ignored. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. - -## Precautions - -If SQLSetEnvAttr returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, set **HandleType** and **Handle** to **SQL_HANDLE_ENV** and **EnvironmentHandle**, and obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-22-SQLSetStmtAttr.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-22-SQLSetStmtAttr.md deleted file mode 100644 index 8da851db..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-22-SQLSetStmtAttr.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: SQLSetStmtAttr -summary: SQLSetStmtAttr -author: Guo Huan -date: 2021-05-17 ---- - -# SQLSetStmtAttr - -## Function - -SQLSetStmtAttr is used to set attributes related to a statement. - -## Prototype - -``` -SQLRETURN SQLSetStmtAttr(SQLHSTMT StatementHandle - SQLINTEGER Attribute, - SQLPOINTER ValuePtr, - SQLINTEGER StringLength); -``` - -## Parameter - -**Table 1** SQLSetStmtAttr parameters - -| **Keyword** | **Parameter Description** | -| :-------------- | :----------------------------------------------------------- | -| StatementHandle | Statement handle. | -| Attribute | Attribute to set. | -| ValuePtr | Pointer to the **Attribute** value. **ValuePtr** depends on the **Attribute** value, and can be a 32-bit unsigned integer value or a pointer to a null-terminated string, a binary buffer, or a driver-specified value. If the **ValuePtr** parameter is a driver-specific value, it may be a signed integer. | -| StringLength | If **ValuePtr** points to a string or a binary buffer, **StringLength** is the length of ***ValuePtr**. If **ValuePtr** points to an integer, **StringLength** is ignored. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. - -## Precautions - -If SQLSetStmtAttr returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, with **HandleType** and **Handle** set to **SQL_HANDLE_STMT** and **StatementHandle**, respectively, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-23-Examples.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-23-Examples.md deleted file mode 100644 index ef42c9cb..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-23-Examples.md +++ /dev/null @@ -1,345 +0,0 @@ ---- -title: Examples -summary: Examples -author: Guo Huan -date: 2021-05-17 ---- - -# Examples - -## Code for Common Functions - -```c -// The following example shows how to obtain data from MogDB through the ODBC interface. -// DBtest.c (compile with: libodbc.so) -#include -#include -#include -#ifdef WIN32 -#include -#endif -SQLHENV V_OD_Env; // Handle ODBC environment -SQLHSTMT V_OD_hstmt; // Handle statement -SQLHDBC V_OD_hdbc; // Handle connection -char typename[100]; -SQLINTEGER value = 100; -SQLINTEGER V_OD_erg,V_OD_buffer,V_OD_err,V_OD_id; -int main(int argc,char *argv[]) -{ - // 1. Allocate an environment handle. - V_OD_erg = SQLAllocHandle(SQL_HANDLE_ENV,SQL_NULL_HANDLE,&V_OD_Env); - if ((V_OD_erg != SQL_SUCCESS) && (V_OD_erg != SQL_SUCCESS_WITH_INFO)) - { - printf("Error AllocHandle\n"); - exit(0); - } - // 2. Set environment attributes (version information). - SQLSetEnvAttr(V_OD_Env, SQL_ATTR_ODBC_VERSION, (void*)SQL_OV_ODBC3, 0); - // 3. Allocate a connection handle. - V_OD_erg = SQLAllocHandle(SQL_HANDLE_DBC, V_OD_Env, &V_OD_hdbc); - if ((V_OD_erg != SQL_SUCCESS) && (V_OD_erg != SQL_SUCCESS_WITH_INFO)) - { - SQLFreeHandle(SQL_HANDLE_ENV, V_OD_Env); - exit(0); - } - // 4. Set connection attributes. - SQLSetConnectAttr(V_OD_hdbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_ON, 0); - // 5. Connect to the data source. userName and password indicate the username and password for connecting to the database. Set them as needed. - // If the username and password have been set in the odbc.ini file, you do not need to set userName or password here, retaining "" for them. However, you are not advised to do so because the username and password will be disclosed if the permission for odbc.ini is abused. - V_OD_erg = SQLConnect(V_OD_hdbc, (SQLCHAR*) "gaussdb", SQL_NTS, - (SQLCHAR*) "userName", SQL_NTS, (SQLCHAR*) "password", SQL_NTS); - if ((V_OD_erg != SQL_SUCCESS) && (V_OD_erg != SQL_SUCCESS_WITH_INFO)) - { - printf("Error SQLConnect %d\n",V_OD_erg); - SQLFreeHandle(SQL_HANDLE_ENV, V_OD_Env); - exit(0); - } - printf("Connected !\n"); - // 6. Set statement attributes. - SQLSetStmtAttr(V_OD_hstmt,SQL_ATTR_QUERY_TIMEOUT,(SQLPOINTER *)3,0); - // 7. Allocate a statement handle. - SQLAllocHandle(SQL_HANDLE_STMT, V_OD_hdbc, &V_OD_hstmt); - // 8. Run SQL statements. - SQLExecDirect(V_OD_hstmt,"drop table IF EXISTS customer_t1",SQL_NTS); - SQLExecDirect(V_OD_hstmt,"CREATE TABLE customer_t1(c_customer_sk INTEGER, c_customer_name VARCHAR(32));",SQL_NTS); - SQLExecDirect(V_OD_hstmt,"insert into customer_t1 values(25,li)",SQL_NTS); - // 9. Prepare for execution. - SQLPrepare(V_OD_hstmt,"insert into customer_t1 values(?)",SQL_NTS); - // 10. Bind parameters. - SQLBindParameter(V_OD_hstmt,1,SQL_PARAM_INPUT,SQL_C_SLONG,SQL_INTEGER,0,0, - &value,0,NULL); - // 11. Run prepared statements. - SQLExecute(V_OD_hstmt); - SQLExecDirect(V_OD_hstmt,"select id from testtable",SQL_NTS); - // 12. Obtain attributes of a specific column in the result set. - SQLColAttribute(V_OD_hstmt,1,SQL_DESC_TYPE,typename,100,NULL,NULL); - printf("SQLColAtrribute %s\n",typename); - // 13. Bind the result set. - SQLBindCol(V_OD_hstmt,1,SQL_C_SLONG, (SQLPOINTER)&V_OD_buffer,150, - (SQLLEN *)&V_OD_err); - // 14. Obtain data in the result set by executing SQLFetch. - V_OD_erg=SQLFetch(V_OD_hstmt); - // 15. Obtain and return data by executing SQLGetData. - while(V_OD_erg != SQL_NO_DATA) - { - SQLGetData(V_OD_hstmt,1,SQL_C_SLONG,(SQLPOINTER)&V_OD_id,0,NULL); - printf("SQLGetData ----ID = %d\n",V_OD_id); - V_OD_erg=SQLFetch(V_OD_hstmt); - }; - printf("Done !\n"); - // 16. Disconnect data source connections and release handles. - SQLFreeHandle(SQL_HANDLE_STMT,V_OD_hstmt); - SQLDisconnect(V_OD_hdbc); - SQLFreeHandle(SQL_HANDLE_DBC,V_OD_hdbc); - SQLFreeHandle(SQL_HANDLE_ENV, V_OD_Env); - return(0); - } -``` - -## Code for Batch Processing - -```c -/********************************************************************** -*Set UseBatchProtocol to 1 in the data source and set the database parameter support_batch_bind -*to on. -*The CHECK_ERROR command is used to check and print error information. -*This example is used to interactively obtain the DSN, data volume to be processed, and volume of ignored data from users, and insert required data into the test_odbc_batch_insert table. -***********************************************************************/ -#include -#include -#include -#include -#include - -#include "util.c" - -void Exec(SQLHDBC hdbc, SQLCHAR* sql) -{ - SQLRETURN retcode; // Return status - SQLHSTMT hstmt = SQL_NULL_HSTMT; // Statement handle - SQLCHAR loginfo[2048]; - - // Allocate Statement Handle - retcode = SQLAllocHandle(SQL_HANDLE_STMT, hdbc, &hstmt); - CHECK_ERROR(retcode, "SQLAllocHandle(SQL_HANDLE_STMT)", - hstmt, SQL_HANDLE_STMT); - - // Prepare Statement - retcode = SQLPrepare(hstmt, (SQLCHAR*) sql, SQL_NTS); - sprintf((char*)loginfo, "SQLPrepare log: %s", (char*)sql); - CHECK_ERROR(retcode, loginfo, hstmt, SQL_HANDLE_STMT); - - // Execute Statement - retcode = SQLExecute(hstmt); - sprintf((char*)loginfo, "SQLExecute stmt log: %s", (char*)sql); - CHECK_ERROR(retcode, loginfo, hstmt, SQL_HANDLE_STMT); - - // Free Handle - retcode = SQLFreeHandle(SQL_HANDLE_STMT, hstmt); - sprintf((char*)loginfo, "SQLFreeHandle stmt log: %s", (char*)sql); - CHECK_ERROR(retcode, loginfo, hstmt, SQL_HANDLE_STMT); -} - -int main () -{ - SQLHENV henv = SQL_NULL_HENV; - SQLHDBC hdbc = SQL_NULL_HDBC; - int batchCount = 1000; - SQLLEN rowsCount = 0; - int ignoreCount = 0; - - SQLRETURN retcode; - SQLCHAR dsn[1024] = {'\0'}; - SQLCHAR loginfo[2048]; - - // Interactively obtain data source names. - getStr("Please input your DSN", (char*)dsn, sizeof(dsn), 'N'); - // Interactively obtain the volume of data to be batch processed. - getInt("batchCount", &batchCount, 'N', 1); - do - { - // Interactively obtain the volume of batch processing data that is not inserted into the database. - getInt("ignoreCount", &ignoreCount, 'N', 1); - if (ignoreCount > batchCount) - { - printf("ignoreCount(%d) should be less than batchCount(%d)\n", ignoreCount, batchCount); - } - }while(ignoreCount > batchCount); - - retcode = SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, &henv); - CHECK_ERROR(retcode, "SQLAllocHandle(SQL_HANDLE_ENV)", - henv, SQL_HANDLE_ENV); - - // Set ODBC Verion - retcode = SQLSetEnvAttr(henv, SQL_ATTR_ODBC_VERSION, - (SQLPOINTER*)SQL_OV_ODBC3, 0); - CHECK_ERROR(retcode, "SQLSetEnvAttr(SQL_ATTR_ODBC_VERSION)", - henv, SQL_HANDLE_ENV); - - // Allocate Connection - retcode = SQLAllocHandle(SQL_HANDLE_DBC, henv, &hdbc); - CHECK_ERROR(retcode, "SQLAllocHandle(SQL_HANDLE_DBC)", - henv, SQL_HANDLE_DBC); - - // Set Login Timeout - retcode = SQLSetConnectAttr(hdbc, SQL_LOGIN_TIMEOUT, (SQLPOINTER)5, 0); - CHECK_ERROR(retcode, "SQLSetConnectAttr(SQL_LOGIN_TIMEOUT)", - hdbc, SQL_HANDLE_DBC); - - // Set Auto Commit - retcode = SQLSetConnectAttr(hdbc, SQL_ATTR_AUTOCOMMIT, - (SQLPOINTER)(1), 0); - CHECK_ERROR(retcode, "SQLSetConnectAttr(SQL_ATTR_AUTOCOMMIT)", - hdbc, SQL_HANDLE_DBC); - - // Connect to DSN - sprintf(loginfo, "SQLConnect(DSN:%s)", dsn); - retcode = SQLConnect(hdbc, (SQLCHAR*) dsn, SQL_NTS, - (SQLCHAR*) NULL, 0, NULL, 0); - CHECK_ERROR(retcode, loginfo, hdbc, SQL_HANDLE_DBC); - - // init table info. - Exec(hdbc, "drop table if exists test_odbc_batch_insert"); - Exec(hdbc, "create table test_odbc_batch_insert(id int primary key, col varchar2(50))"); - - // The following code constructs the data to be inserted based on the data volume entered by users: - { - SQLRETURN retcode; - SQLHSTMT hstmtinesrt = SQL_NULL_HSTMT; - int i; - SQLCHAR *sql = NULL; - SQLINTEGER *ids = NULL; - SQLCHAR *cols = NULL; - SQLLEN *bufLenIds = NULL; - SQLLEN *bufLenCols = NULL; - SQLUSMALLINT *operptr = NULL; - SQLUSMALLINT *statusptr = NULL; - SQLULEN process = 0; - - // Data is constructed by column. Each column is stored continuously. - ids = (SQLINTEGER*)malloc(sizeof(ids[0]) * batchCount); - cols = (SQLCHAR*)malloc(sizeof(cols[0]) * batchCount * 50); - // Data size in each row for a column - bufLenIds = (SQLLEN*)malloc(sizeof(bufLenIds[0]) * batchCount); - bufLenCols = (SQLLEN*)malloc(sizeof(bufLenCols[0]) * batchCount); - // Whether this row needs to be processed. The value is SQL_PARAM_IGNORE or SQL_PARAM_PROCEED. - operptr = (SQLUSMALLINT*)malloc(sizeof(operptr[0]) * batchCount); - memset(operptr, 0, sizeof(operptr[0]) * batchCount); - // Processing result of the row - // Note: In the database, a statement belongs to one transaction. Therefore, data is processed as a unit. Either all data is inserted successfully or all data fails to be inserted. - statusptr = (SQLUSMALLINT*)malloc(sizeof(statusptr[0]) * batchCount); - memset(statusptr, 88, sizeof(statusptr[0]) * batchCount); - - if (NULL == ids || NULL == cols || NULL == bufLenCols || NULL == bufLenIds) - { - fprintf(stderr, "FAILED:\tmalloc data memory failed\n"); - goto exit; - } - - for (int i = 0; i < batchCount; i++) - { - ids[i] = i; - sprintf(cols + 50 * i, "column test value %d", i); - bufLenIds[i] = sizeof(ids[i]); - bufLenCols[i] = strlen(cols + 50 * i); - operptr[i] = (i < ignoreCount) ? SQL_PARAM_IGNORE : SQL_PARAM_PROCEED; - } - - // Allocate Statement Handle - retcode = SQLAllocHandle(SQL_HANDLE_STMT, hdbc, &hstmtinesrt); - CHECK_ERROR(retcode, "SQLAllocHandle(SQL_HANDLE_STMT)", - hstmtinesrt, SQL_HANDLE_STMT); - - // Prepare Statement - sql = (SQLCHAR*)"insert into test_odbc_batch_insert values(?, ?)"; - retcode = SQLPrepare(hstmtinesrt, (SQLCHAR*) sql, SQL_NTS); - sprintf((char*)loginfo, "SQLPrepare log: %s", (char*)sql); - CHECK_ERROR(retcode, loginfo, hstmtinesrt, SQL_HANDLE_STMT); - - retcode = SQLSetStmtAttr(hstmtinesrt, SQL_ATTR_PARAMSET_SIZE, (SQLPOINTER)batchCount, sizeof(batchCount)); - CHECK_ERROR(retcode, "SQLSetStmtAttr", hstmtinesrt, SQL_HANDLE_STMT); - - retcode = SQLBindParameter(hstmtinesrt, 1, SQL_PARAM_INPUT, SQL_C_SLONG, SQL_INTEGER, sizeof(ids[0]), 0,&(ids[0]), 0, bufLenIds); - CHECK_ERROR(retcode, "SQLBindParameter for id", hstmtinesrt, SQL_HANDLE_STMT); - - retcode = SQLBindParameter(hstmtinesrt, 2, SQL_PARAM_INPUT, SQL_C_CHAR, SQL_CHAR, 50, 50, cols, 50, bufLenCols); - CHECK_ERROR(retcode, "SQLBindParameter for cols", hstmtinesrt, SQL_HANDLE_STMT); - - retcode = SQLSetStmtAttr(hstmtinesrt, SQL_ATTR_PARAMS_PROCESSED_PTR, (SQLPOINTER)&process, sizeof(process)); - CHECK_ERROR(retcode, "SQLSetStmtAttr for SQL_ATTR_PARAMS_PROCESSED_PTR", hstmtinesrt, SQL_HANDLE_STMT); - - retcode = SQLSetStmtAttr(hstmtinesrt, SQL_ATTR_PARAM_STATUS_PTR, (SQLPOINTER)statusptr, sizeof(statusptr[0]) * batchCount); - CHECK_ERROR(retcode, "SQLSetStmtAttr for SQL_ATTR_PARAM_STATUS_PTR", hstmtinesrt, SQL_HANDLE_STMT); - - retcode = SQLSetStmtAttr(hstmtinesrt, SQL_ATTR_PARAM_OPERATION_PTR, (SQLPOINTER)operptr, sizeof(operptr[0]) * batchCount); - CHECK_ERROR(retcode, "SQLSetStmtAttr for SQL_ATTR_PARAM_OPERATION_PTR", hstmtinesrt, SQL_HANDLE_STMT); - - retcode = SQLExecute(hstmtinesrt); - sprintf((char*)loginfo, "SQLExecute stmt log: %s", (char*)sql); - CHECK_ERROR(retcode, loginfo, hstmtinesrt, SQL_HANDLE_STMT); - - retcode = SQLRowCount(hstmtinesrt, &rowsCount); - CHECK_ERROR(retcode, "SQLRowCount execution", hstmtinesrt, SQL_HANDLE_STMT); - - if (rowsCount != (batchCount - ignoreCount)) - { - sprintf(loginfo, "(batchCount - ignoreCount)(%d) != rowsCount(%d)", (batchCount - ignoreCount), rowsCount); - CHECK_ERROR(SQL_ERROR, loginfo, NULL, SQL_HANDLE_STMT); - } - else - { - sprintf(loginfo, "(batchCount - ignoreCount)(%d) == rowsCount(%d)", (batchCount - ignoreCount), rowsCount); - CHECK_ERROR(SQL_SUCCESS, loginfo, NULL, SQL_HANDLE_STMT); - } - - // check row number returned - if (rowsCount != process) - { - sprintf(loginfo, "process(%d) != rowsCount(%d)", process, rowsCount); - CHECK_ERROR(SQL_ERROR, loginfo, NULL, SQL_HANDLE_STMT); - } - else - { - sprintf(loginfo, "process(%d) == rowsCount(%d)", process, rowsCount); - CHECK_ERROR(SQL_SUCCESS, loginfo, NULL, SQL_HANDLE_STMT); - } - - for (int i = 0; i < batchCount; i++) - { - if (i < ignoreCount) - { - if (statusptr[i] != SQL_PARAM_UNUSED) - { - sprintf(loginfo, "statusptr[%d](%d) != SQL_PARAM_UNUSED", i, statusptr[i]); - CHECK_ERROR(SQL_ERROR, loginfo, NULL, SQL_HANDLE_STMT); - } - } - else if (statusptr[i] != SQL_PARAM_SUCCESS) - { - sprintf(loginfo, "statusptr[%d](%d) != SQL_PARAM_SUCCESS", i, statusptr[i]); - CHECK_ERROR(SQL_ERROR, loginfo, NULL, SQL_HANDLE_STMT); - } - } - - retcode = SQLFreeHandle(SQL_HANDLE_STMT, hstmtinesrt); - sprintf((char*)loginfo, "SQLFreeHandle hstmtinesrt"); - CHECK_ERROR(retcode, loginfo, hstmtinesrt, SQL_HANDLE_STMT); - } - - -exit: - printf ("\nComplete.\n"); - - // Connection - if (hdbc != SQL_NULL_HDBC) { - SQLDisconnect(hdbc); - SQLFreeHandle(SQL_HANDLE_DBC, hdbc); - } - - // Environment - if (henv != SQL_NULL_HENV) - SQLFreeHandle(SQL_HANDLE_ENV, henv); - - return 0; -} -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-3-SQLAllocHandle.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-3-SQLAllocHandle.md deleted file mode 100644 index e27295b9..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-3-SQLAllocHandle.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: SQLAllocHandle -summary: SQLAllocHandle -author: Guo Huan -date: 2021-05-17 ---- - -# SQLAllocHandle - -## Function - -SQLAllocHandle is used to allocate environment, connection, statement, or descriptor handles. This function replaces the deprecated ODBC 2.x functions SQLAllocEnv, SQLAllocConnect, and SQLAllocStmt. - -## Prototype - -``` -SQLRETURN SQLAllocHandle(SQLSMALLINT HandleType, - SQLHANDLE InputHandle, - SQLHANDLE *OutputHandlePtr); -``` - -## Parameter - -**Table 1** SQLAllocHandle parameters - -| **Keyword** | **Parameter Description** | -| :-------------- | :----------------------------------------------------------- | -| HandleType | Type of handle to be allocated by SQLAllocHandle. The value must be one of the following:
- SQL_HANDLE_ENV (environment handle)
- SQL_HANDLE_DBC (connection handle)
- SQL_HANDLE_STMT (statement handle)
- SQL_HANDLE_DESC (descriptor handle)
The handle application sequence is: **SQL_HANDLE_ENV** > **SQL_HANDLE_DBC** > **SQL_HANDLE_STMT**. The handle applied later depends on the handle applied prior to it. | -| InputHandle | Existing handle to use as a context for the new handle being allocated.
- If **HandleType** is **SQL_HANDLE_ENV**, this parameter is set to **SQL_NULL_HANDLE**.
- If **HandleType** is **SQL_HANDLE_DBC**, this parameter value must be an environment handle.
- If **HandleType** is **SQL_HANDLE_STMT** or **SQL_HANDLE_DESC**, this parameter value must be a connection handle. | -| OutputHandlePtr | **Output parameter**: Pointer to a buffer that stores the returned handle in the newly allocated data structure. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. - -## Precautions - -If SQLAllocHandle returns **SQL_ERROR** when it is used to allocate a non-environment handle, it sets **OutputHandlePtr** to **SQL_NULL_HDBC**, **SQL_NULL_HSTMT**, or **SQL_NULL_HDESC**. The application can then call SQLGetDiagRec, with **HandleType** and **Handle** set to the value of **IntputHandle**, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-4-SQLAllocStmt.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-4-SQLAllocStmt.md deleted file mode 100644 index f23e49c5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-4-SQLAllocStmt.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: SQLAllocStmt -summary: SQLAllocStmt -author: Guo Huan -date: 2021-05-17 ---- - -# SQLAllocStmt - -In ODBC 3.x, SQLAllocStmt was deprecated and replaced by SQLAllocHandle. For details, see [SQLAllocHandle](2-3-SQLAllocHandle.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-5-SQLBindCol.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-5-SQLBindCol.md deleted file mode 100644 index bd4dc0ca..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-5-SQLBindCol.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: SQLBindCol -summary: SQLBindCol -author: Guo Huan -date: 2021-05-17 ---- - -# SQLBindCol - -## Function - -SQLBindCol is used to bind columns in a result set to an application data buffer. - -## Prototype - -``` -SQLRETURN SQLBindCol(SQLHSTMT StatementHandle, - SQLUSMALLINT ColumnNumber, - SQLSMALLINT TargetType, - SQLPOINTER TargetValuePtr, - SQLLEN BufferLength, - SQLLEN *StrLen_or_IndPtr); -``` - -## Parameters - -**Table 1** SQLBindCol parameters - -| **Keyword** | **Parameter Description** | -| :--------------- | :----------------------------------------------------------- | -| StatementHandle | Statement handle. | -| ColumnNumber | Number of the column to be bound. The column number starts with 0 and increases in ascending order. Column 0 is the bookmark column. If no bookmark column is set, column numbers start with 1. | -| TargetType | C data type in the buffer. | -| TargetValuePtr | **Output parameter**: pointer to the buffer bound with the column. The SQLFetch function returns data in the buffer. If **TargetValuePtr** is null, **StrLen_or_IndPtr** is a valid value. | -| BufferLength | Length of the **TargetValuePtr** buffer in bytes. | -| StrLen_or_IndPtr | **Output parameter**: pointer to the length or indicator of the buffer. If **StrLen_or_IndPtr** is null, no length or indicator is used. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. - -## Precautions - -If SQLBindCol returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, with **HandleType** and **Handle** set to **SQL_HANDLE_STMT** and **StatementHandle**, respectively, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-6-SQLBindParameter.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-6-SQLBindParameter.md deleted file mode 100644 index d8b5f924..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-6-SQLBindParameter.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: SQLBindParameter -summary: SQLBindParameter -author: Guo Huan -date: 2021-05-17 ---- - -# SQLBindParameter - -## Function - -SQLBindParameter is used to bind parameter markers in an SQL statement to a buffer. - -## Prototype - -``` -SQLRETURN SQLBindParameter(SQLHSTMT StatementHandle, - SQLUSMALLINT ParameterNumber, - SQLSMALLINT InputOutputType, - SQLSMALLINT ValuetType, - SQLSMALLINT ParameterType, - SQLULEN ColumnSize, - SQLSMALLINT DecimalDigits, - SQLPOINTER ParameterValuePtr, - SQLLEN BufferLength, - SQLLEN *StrLen_or_IndPtr); -``` - -## Parameters - -**Table 1** SQLBindParameter - -| **Keyword** | **Parameter Description** | -| :---------------- | :----------------------------------------------------------- | -| StatementHandle | Statement handle. | -| ParameterNumber | Parameter marker number, starting with 1 and increasing in ascending order. | -| InputOutputType | Input/output type of the parameter. | -| ValueType | C data type of the parameter. | -| ParameterType | SQL data type of the parameter. | -| ColumnSize | Size of the column or expression of the corresponding parameter marker. | -| DecimalDigits | Decimal digit of the column or the expression of the corresponding parameter marker. | -| ParameterValuePtr | Pointer to the storage parameter buffer. | -| BufferLength | Length of the **ParameterValuePtr** buffer in bytes. | -| StrLen_or_IndPtr | Pointer to the length or indicator of the buffer. If **StrLen_or_IndPtr** is null, no length or indicator is used. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. - -## Precautions - -If SQLBindParameter returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, with **HandleType** and **Handle** set to **SQL_HANDLE_STMT** and **StatementHandle**, respectively, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-7-SQLColAttribute.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-7-SQLColAttribute.md deleted file mode 100644 index fef3d7d5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-7-SQLColAttribute.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: SQLColAttribute -summary: SQLColAttribute -author: Guo Huan -date: 2021-05-17 ---- - -# SQLColAttribute - -## Function - -SQLColAttribute is used to return the descriptor information about a column in the result set. - -## Prototype - -``` -SQLRETURN SQLColAttibute(SQLHSTMT StatementHandle, - SQLUSMALLINT ColumnNumber, - SQLUSMALLINT FieldIdentifier, - SQLPOINTER CharacterAtrriburePtr, - SQLSMALLINT BufferLength, - SQLSMALLINT *StringLengthPtr, - SQLLEN *NumericAttributePtr); -``` - -## Parameters - -**Table 1** SQLColAttribute parameters - -| **Keyword** | **Parameter Description** | -| :-------------------- | :----------------------------------------------------------- | -| StatementHandle | Statement handle. | -| ColumnNumber | Column number of the field to be queried, starting with 1 and increasing in ascending order. | -| FieldIdentifier | Field identifier of **ColumnNumber** in IRD. | -| CharacterAttributePtr | **Output parameter**: pointer to the buffer that returns the **FieldIdentifier** value. | -| BufferLength | - **BufferLength** indicates the length of the buffer if **FieldIdentifier** is an ODBC-defined field and **CharacterAttributePtr** points to a string or a binary buffer.
- Ignore this parameter if **FieldIdentifier** is an ODBC-defined field and **CharacterAttributePtr** points to an integer. | -| StringLengthPtr | **Output parameter**: pointer to a buffer in which the total number of valid bytes (for string data) is stored in ***CharacterAttributePtr**. Ignore the value of **BufferLength** if the data is not a string. | -| NumericAttributePtr | **Output parameter**: pointer to an integer buffer in which the value of **FieldIdentifier** in the **ColumnNumber** row of the IRD is returned. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. - -## Precautions - -If SQLColAttribute returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, with **HandleType** and **Handle** set to **SQL_HANDLE_STMT** and **StatementHandle**, respectively, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-8-SQLConnect.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-8-SQLConnect.md deleted file mode 100644 index e61f96ad..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-8-SQLConnect.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: SQLConnect -summary: SQLConnect -author: Guo Huan -date: 2021-05-17 ---- - -# SQLConnect - -## Function - -SQLConnect is used to establish a connection between a driver and a data source. After the connection is established, the connection handle can be used to access all information about the data source, including its application operating status, transaction processing status, and error information. - -## Prototype - -``` -SQLRETURN SQLConnect(SQLHDBC ConnectionHandle, - SQLCHAR *ServerName, - SQLSMALLINT NameLength1, - SQLCHAR *UserName, - SQLSMALLINT NameLength2, - SQLCHAR *Authentication, - SQLSMALLINT NameLength3); -``` - -## Parameter - -**Table 1** SQLConnect parameters - -| **Keyword** | **Parameter Description** | -| :--------------- | :------------------------------------------------ | -| ConnectionHandle | Connection handle, obtained from SQLAllocHandle. | -| ServerName | Name of the data source to connect. | -| NameLength1 | Length of **ServerName**. | -| UserName | Username of the database in the data source. | -| NameLength2 | Length of **UserName**. | -| Authentication | User password of the database in the data source. | -| NameLength3 | Length of **Authentication**. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. -- **SQL_STILL_EXECUTING** indicates that the statement is being executed. - -## Precautions - -If SQLConnect returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, with **HandleType** and **Handle** set to **SQL_HANDLE_DBC** and **ConnectionHandle**, respectively, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-9-SQLDisconnect.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-9-SQLDisconnect.md deleted file mode 100644 index a4eab78a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/2-9-SQLDisconnect.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: SQLDisconnect -summary: SQLDisconnect -author: Guo Huan -date: 2021-05-17 ---- - -# SQLDisconnect - -## Function - -SQLDisconnect is used to close the connection associated with a database connection handle. - -## Prototype - -``` -SQLRETURN SQLDisconnect(SQLHDBC ConnectionHandle); -``` - -## Parameter - -**Table 1** SQLDisconnect parameters - -| **Keyword** | **Parameter Description** | -| :--------------- | :----------------------------------------------- | -| ConnectionHandle | Connection handle, obtained from SQLAllocHandle. | - -## Return Value - -- **SQL_SUCCESS** indicates that the call succeeded. -- **SQL_SUCCESS_WITH_INFO** indicates that some warning information is displayed. -- **SQL_ERROR** indicates major errors, such as memory allocation and connection failures. -- **SQL_INVALID_HANDLE** indicates that invalid handles were called. This value may also be returned by other APIs. - -## Precautions - -If SQLDisconnect returns **SQL_ERROR** or **SQL_SUCCESS_WITH_INFO**, the application can call SQLGetDiagRec, with **HandleType** and **Handle** set to **SQL_HANDLE_DBC** and **ConnectionHandle**, respectively, to obtain the **SQLSTATE** value. The **SQLSTATE** value provides the detailed function calling information. - -## Example - -See [Examples](2-23-Examples.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/odbc-interface-reference.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/odbc-interface-reference.md deleted file mode 100644 index 8ed22e55..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/3-development-based-on-odbc/6-ODBC/odbc-interface-reference.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: ODBC Interface Reference -summary: ODBC Interface Reference -author: Guo Huan -date: 2021-05-17 ---- - -# ODBC Interface Reference - -- **[SQLAllocEnv](2-1-SQLAllocEnv.md)** -- **[SQLAllocConnect](2-2-SQLAllocConnect.md)** -- **[SQLAllocHandle](2-3-SQLAllocHandle.md)** -- **[SQLAllocStmt](2-4-SQLAllocStmt.md)** -- **[SQLBindCol](2-5-SQLBindCol.md)** -- **[SQLBindParameter](2-6-SQLBindParameter.md)** -- **[SQLColAttribute](2-7-SQLColAttribute.md)** -- **[SQLConnect](2-8-SQLConnect.md)** -- **[SQLDisconnect](2-9-SQLDisconnect.md)** -- **[SQLExecDirect](2-10-SQLExecDirect.md)** -- **[SQLExecute](2-11-SQLExecute.md)** -- **[SQLFetch](2-12-SQLFetch.md)** -- **[SQLFreeStmt](2-13-SQLFreeStmt.md)** -- **[SQLFreeConnect](2-14-SQLFreeConnect.md)** -- **[SQLFreeHandle](2-15-SQLFreeHandle.md)** -- **[SQLFreeEnv](2-16-SQLFreeEnv.md)** -- **[SQLPrepare](2-17-SQLPrepare.md)** -- **[SQLGetData](2-18-SQLGetData.md)** -- **[SQLGetDiagRec](2-19-SQLGetDiagRec.md)** -- **[SQLSetConnectAttr](2-20-SQLSetConnectAttr.md)** -- **[SQLSetEnvAttr](2-21-SQLSetEnvAttr.md)** -- **[SQLSetStmtAttr](2-22-SQLSetStmtAttr.md)** -- **[Examples](2-23-Examples.md)** diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/1-database-connection-control-functions.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/1-database-connection-control-functions.md deleted file mode 100644 index 353c68c4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/1-database-connection-control-functions.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Database connection control functions -summary: Database connection control functions -author: Guo Huan -date: 2021-05-17 ---- - -# Database Connection Control Functions - -Database connection control functions control the connections to MogDB servers. An application can connect to multiple servers at a time. For example, a client connects to multiple databases. Each connection is represented by a PGconn object, which is obtained from the function PQconnectdb, PQconnectdbParams, or PQsetdbLogin. Note that these functions will always return a non-null object pointer, unless there is too little memory to allocate the PGconn object. The interface for establishing a connection is stored in the PGconn object. The PQstatus function can be called to check the return value for a successful connection. - -+ **[PQconnectdbParams](2-PQconnectdbParams.md)** -+ **[PQconnectdb](3-PQconnectdb.md)** -+ **[PQconninfoParse](4-PQconninfoParse.md)** -+ **[PQconnectStart](5-PQconnectStart.md)** -+ **[PQerrorMessage](6-PQerrorMessage.md)** -+ **[PQsetdbLogin](7-PQsetdbLogin.md)** -+ **[PQfinish](8-PQfinish.md)** -+ **[PQreset](9-PQreset.md)** -+ **[PQstatus](10-PQstatus.md)** diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/10-PQstatus.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/10-PQstatus.md deleted file mode 100644 index 0e891c2d..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/10-PQstatus.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: PQstatus -summary: PQstatus -author: Guo Huan -date: 2021-05-17 ---- - -# PQstatus - -## Function - -PQstatus is used to return the connection status. - -## Prototype - -``` -ConnStatusType PQstatus(const PGconn *conn); -``` - -## Parameter - -**Table 1** PQ status parameter - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| conn | Points to the object pointer that contains the connection information. | - -## Return Value - -**ConnStatusType** indicates the connection status. The enumerated values are as follows: - -``` -CONNECTION_STARTED -Waiting for the connection to be established. - -CONNECTION_MADE -Connection succeeded; waiting to send - -CONNECTION_AWAITING_RESPONSE -Waiting for a response from the server. - -CONNECTION_AUTH_OK -Authentication received; waiting for backend startup to complete. - -CONNECTION_SSL_STARTUP -Negotiating SSL encryption. - -CONNECTION_SETENV -Negotiating environment-driven parameter settings. - -CONNECTION_OK -Normal connection. - -CONNECTION_BAD -Failed connection. -``` - -## Precautions - -The connection status can be one of the preceding values. After the asynchronous connection procedure is complete, only two of them, **CONNECTION_OK** and **CONNECTION_BAD**, can return. **CONNECTION_OK** indicates that the connection to the database is normal. **CONNECTION_BAD** indicates that the connection attempt fails. Generally, the **CONNECTION_OK** state remains until PQfinish is called. However, a communication failure may cause the connection status to become to **CONNECTION_BAD** before the connection procedure is complete. In this case, the application can attempt to call PQreset to restore the communication. - -## Example - -For details, see [Example](../../libpq-example.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/2-PQconnectdbParams.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/2-PQconnectdbParams.md deleted file mode 100644 index 644e629b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/2-PQconnectdbParams.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: PQconnectdbParams -summary: PQconnectdbParams -author: Guo Huan -date: 2021-05-17 ---- - -# PQconnectdbParams - -## Function - -PQconnectdbParams is used to establish a new connection with the database server. - -## Prototype - -``` -PGconn *PQconnectdbParams(const char * const *keywords, - const char * const *values, - int expand_dbname); -``` - -## Parameter - -**Table 1** PQconnectdbParams parameters - -| **Keyword** | **Parameter Description** | -| :------------ | :----------------------------------------------------------- | -| keywords | An array of strings, each of which is a keyword. | -| values | Value assigned to each keyword. | -| expand_dbname | When **expand\_dbname** is non-zero, the **dbname** keyword value can be recognized as a connection string. Only **dbname** that first appears is expanded in this way, and any subsequent **dbname** value is treated as a database name. | - -## Return Value - -**PGconn \*** points to the object pointer that contains a connection. The memory is applied for by the function internally. - -## Precautions - -This function establishes a new database connection using the parameters taken from two NULL-terminated arrays. Unlike PQsetdbLogin, the parameter set can be extended without changing the function signature. Therefore, use of this function (or its non-blocking analogs PQconnectStartParams and PQconnectPoll) is preferred for new application programming. - -## Example - -For details, see [Example](../../libpq-example.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/3-PQconnectdb.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/3-PQconnectdb.md deleted file mode 100644 index ba689d78..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/3-PQconnectdb.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: PQconnectdb -summary: PQconnectdb -author: Guo Huan -date: 2021-05-17 ---- - -# PQconnectdb - -## Function - -PQconnectdb is used to establish a new connection with the database server. - -## Prototype - -``` -PGconn *PQconnectdb(const char *conninfo); -``` - -## Parameter - -**Table 1** PQconnectdb parameter - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| conninfo | Connection string. For details about the fields in the string, see Connection Characters. | - -## Return Value - -**PGconn \*** points to the object pointer that contains a connection. The memory is applied for by the function internally. - -## Precautions - -- This function establishes a new database connection using the parameters taken from the string **conninfo**. -- The input parameter can be empty, indicating that all default parameters can be used. It can contain one or more values separated by spaces or contain a URL. - -## Example - -For details, see [Example](../../libpq-example.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/4-PQconninfoParse.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/4-PQconninfoParse.md deleted file mode 100644 index 12ba2791..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/4-PQconninfoParse.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: PQconninfoParse -summary: PQconninfoParse -author: Guo Huan -date: 2021-05-17 ---- - -# PQconninfoParse - -## Function - -PQconninfoParse is used to return parsed connection options based on the connection. - -## Prototype - -``` -PQconninfoOption* PQconninfoParse(const char* conninfo, char** errmsg); -``` - -## Parameters - -**Table 1** - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| conninfo | Passed string. This parameter can be left empty. In this case, the default value is used. It can contain one or more values separated by spaces or contain a URL. | -| errmsg | Error information. | - -## Return Value - -PQconninfoOption pointers diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/5-PQconnectStart.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/5-PQconnectStart.md deleted file mode 100644 index d4214eb5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/5-PQconnectStart.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: PQconnectStart -summary: PQconnectStart -author: Guo Huan -date: 2021-05-17 ---- - -# PQconnectStart - -## Function - -PQconnectStart is used to establish a non-blocking connection with the database server. - -## Prototype - -``` -PGconn* PQconnectStart(const char* conninfo); -``` - -## Parameters - -**Table 1** - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| conninfo | String of connection information. This parameter can be left empty. In this case, the default value is used. It can contain one or more values separated by spaces or contain a URL. | - -## Return Value - -PGconn pointers diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/6-PQerrorMessage.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/6-PQerrorMessage.md deleted file mode 100644 index f729e2b8..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/6-PQerrorMessage.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: PQerrorMessage -summary: PQerrorMessage -author: Guo Huan -date: 2021-05-17 ---- - -# PQerrorMessage - -## Function - -PQerrorMessage is used to return error information on a connection. - -## Prototype - -``` -char* PQerrorMessage(const PGconn* conn); -``` - -## Parameter - -**Table 1** - -| **Keyword** | **Parameter Description** | -| :---------- | :------------------------ | -| conn | Connection handle. | - -## Return Value - -char pointers - -## Example - -For details, see [Example](../../libpq-example.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/7-PQsetdbLogin.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/7-PQsetdbLogin.md deleted file mode 100644 index 2e064f37..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/7-PQsetdbLogin.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: PQsetdbLogin -summary: PQsetdbLogin -author: Guo Huan -date: 2021-05-17 ---- - -# PQsetdbLogin - -## Function - -PQsetdbLogin is used to establish a new connection with the database server. - -## Prototype - -``` -PGconn *PQsetdbLogin(const char *pghost, - const char *pgport, - const char *pgoptions, - const char *pgtty, - const char *dbName, - const char *login, - const char *pwd); -``` - -## Parameter - -**Table 1** PQsetdbLogin parameters - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| pghost | Name of the host to be connected. For details, see the **host** field described in Connection Characters. | -| pgport | Port number of the host server. For details, see the **port** field described in Connection Characters. | -| pgoptions | Command-line options to be sent to the server during running. For details, see the **options** field described in Connection Characters. | -| pgtty | This field can be ignored. (Previously, this field declares the output direction of server logs.) | -| dbName | Name of the database to be connected. For details, see the **dbname** field described in Connection Characters. | -| login | Username for connection. For details, see the **user** field described in Connection Characters. | -| pwd | Password used for authentication during connection. For details, see the **password** field described in Connection Characters. | - -## Return Value - -**PGconn \*** points to the object pointer that contains a connection. The memory is applied for by the function internally. - -## Precautions - -- This function is the predecessor of PQconnectdb with a fixed set of parameters. When an undefined parameter is called, its default value is used. Write NULL or an empty string for any one of the fixed parameters that is to be defaulted. -- If the **dbName** value contains an = sign or a valid prefix in the connection URL, it is taken as a conninfo string and passed to PQconnectdb, and the remaining parameters are consistent with PQconnectdbParams parameters. - -## Example - -For details, see [Example](../../libpq-example.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/8-PQfinish.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/8-PQfinish.md deleted file mode 100644 index 234d94f6..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/8-PQfinish.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: PQfinish -summary: PQfinish -author: Guo Huan -date: 2021-05-17 ---- - -# PQfinish - -## Function - -PQfinish is used to close the connection to the server and release the memory used by the PGconn object. - -## Prototype - -``` -void PQfinish(PGconn *conn); -``` - -## Parameter - -**Table 1** PQfinish parameter - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| conn | Points to the object pointer that contains the connection information. | - -## Precautions - -If the server connection attempt fails (as indicated by PQstatus), the application should call PQfinish to release the memory used by the PGconn object. The PGconn pointer must not be used again after PQfinish has been called. - -## Example - -For details, see [Example](../../libpq-example.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/9-PQreset.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/9-PQreset.md deleted file mode 100644 index 5360ef5e..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/1-database-connection-control-functions/9-PQreset.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: PQreset -summary: PQreset -author: Guo Huan -date: 2021-05-17 ---- - -# PQreset - -## Function - -PQreset is used to reset the communication port to the server. - -## Prototype - -``` -void PQreset(PGconn *conn); -``` - -## Parameter - -**Table 1** PQreset parameter - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| conn | Points to the object pointer that contains the connection information. | - -## Precautions - -This function will close the connection to the server and attempt to establish a new connection to the same server by using all the parameters previously used. This function is applicable to fault recovery after a connection exception occurs. - -## Example - -For details, see [Example](../../libpq-example.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/1-PQclear.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/1-PQclear.md deleted file mode 100644 index d31b1cec..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/1-PQclear.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: PQclear -summary: PQclear -author: Guo Huan -date: 2021-05-17 ---- - -# PQclear - -## Function - -PQclear is used to release the storage associated with PGresult. Any query result should be released by PQclear when it is no longer needed. - -## Prototype - -``` -void PQclear(PGresult *res); -``` - -## Parameters - -**Table 1** PQclear parameter - -| **Keyword** | **Parameter Description** | -| :---------- | :--------------------------------------------- | -| res | Object pointer that contains the query result. | - -## Precautions - -PGresult is not automatically released. That is, it does not disappear when a new query is submitted or even if you close the connection. To delete it, you must call PQclear. Otherwise, memory leakage occurs. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/10-PQntuples.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/10-PQntuples.md deleted file mode 100644 index e3100a76..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/10-PQntuples.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: PQntuples -summary: PQntuples -author: Guo Huan -date: 2021-05-17 ---- - -# PQntuples - -## Function - -PQntuples is used to return the number of rows (tuples) in the query result. An overflow may occur if the return value is out of the value range allowed in a 32-bit OS. - -## Prototype - -``` -int PQntuples(const PGresult *res); -``` - -## Parameter - -**Table 1** - -| **Keyword** | **Parameter Description** | -| :---------- | :------------------------ | -| res | Operation result handle. | - -## Return Value - -Value of the int type - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/11-PQprepare.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/11-PQprepare.md deleted file mode 100644 index 27ec13ad..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/11-PQprepare.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: PQprepare -summary: PQprepare -author: Guo Huan -date: 2021-05-17 ---- - -# PQprepare - -## Function - -PQprepare is used to submit a request to create a prepared statement with given parameters and wait for completion. - -## Prototype - -``` -PGresult *PQprepare(PGconn *conn, - const char *stmtName, - const char *query, - int nParams, - const Oid *paramTypes); -``` - -## Parameters - -**Table 1** PQprepare parameters - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| conn | Points to the object pointer that contains the connection information. | -| stmtName | Name of **stmt** to be executed. | -| query | Query string to be executed. | -| nParams | Parameter quantity. | -| paramTypes | Array of the parameter type. | - -## Return Value - -**PGresult** indicates the object pointer that contains the query result. - -## Precautions - -- PQprepare creates a prepared statement for later execution with PQexecPrepared. This function allows commands to be repeatedly executed, without being parsed and planned each time they are executed. PQprepare is supported only in protocol 3.0 or later. It will fail when protocol 2.0 is used. -- This function creates a prepared statement named **stmtName** from the query string, which must contain an SQL command. **stmtName** can be **""** to create an unnamed statement. In this case, any pre-existing unnamed statement will be automatically replaced. Otherwise, this is an error if the statement name has been defined in the current session. If any parameters are used, they are referred to in the query as $1, $2, and so on. **nParams** is the number of parameters for which types are pre-specified in the array paramTypes[]. (The array pointer can be **NULL** when **nParams** is **0**.) paramTypes[] specifies the data types to be assigned to the parameter symbols by OID. If **paramTypes** is **NULL**, or any element in the array is **0**, the server assigns a data type to the parameter symbol in the same way as it does for an untyped literal string. In addition, the query can use parameter symbols whose numbers are greater than **nParams**. Data types of these symbols will also be inferred. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> You can also execute the **SQLPREPARE** statement to create a prepared statement that is used with PQexecPrepared. Although there is no libpq function of deleting a prepared statement, the **SQL DEALLOCATE** statement can be used for this purpose. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/12-PQresultStatus.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/12-PQresultStatus.md deleted file mode 100644 index 4a295841..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/12-PQresultStatus.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: PQresultStatus -summary: PQresultStatus -author: Guo Huan -date: 2021-05-17 ---- - -# PQresultStatus - -## Function - -PQresultStatus is used to return the result status of a command. - -## Prototype - -``` -ExecStatusType PQresultStatus(const PGresult *res); -``` - -## Parameter - -**Table 1** PQresultStatus parameter - -| **Keyword** | **Parameter Description** | -| :---------- | :--------------------------------------------- | -| res | Object pointer that contains the query result. | - -## Return Value - -**PQresultStatus** indicates the command execution status. The enumerated values are as follows: - -``` -PQresultStatus can return one of the following values: -PGRES_EMPTY_QUERY -The string sent to the server was empty. - -PGRES_COMMAND_OK -A command that does not return data was successfully executed. - -PGRES_TUPLES_OK -A query (such as SELECT or SHOW) that returns data was successfully executed. - -PGRES_COPY_OUT -Copy Out (from the server) data transfer started. - -PGRES_COPY_IN -Copy In (to the server) data transfer started. - -PGRES_BAD_RESPONSE -The response from the server cannot be understood. - -PGRES_NONFATAL_ERROR -A non-fatal error (notification or warning) occurred. - -PGRES_FATAL_ERROR -A fatal error occurred. - -PGRES_COPY_BOTH -Copy In/Out (to and from the server) data transfer started. This state occurs only in streaming replication. - -PGRES_SINGLE_TUPLE -PGresult contains a result tuple from the current command. This state occurs in a single-row query. -``` - -## Precautions - -- Note that the SELECT command that happens to retrieve zero rows still returns **PGRES_TUPLES_OK**. **PGRES_COMMAND_OK** is used for commands that can never return rows (such as INSERT or UPDATE, without return clauses). The result status **PGRES_EMPTY_QUERY** might indicate a bug in the client software. -- The result status **PGRES_NONFATAL_ERROR** will never be returned directly by PQexec or other query execution functions. Instead, such results will be passed to the notice processor. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/2-PQexec.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/2-PQexec.md deleted file mode 100644 index c752c487..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/2-PQexec.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: PQexec -summary: PQexec -author: Guo Huan -date: 2021-05-17 ---- - -# PQexec - -## Function - -PQexec is used to commit a command to the server and wait for the result. - -## Prototype - -``` -PGresult *PQexec(PGconn *conn, const char *command); -``` - -## Parameter - -**Table 1** PQexec parameters - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| conn | Points to the object pointer that contains the connection information. | -| command | Query string to be executed. | - -## Return Value - -**PGresult** indicates the object pointer that contains the query result. - -## Precautions - -The PQresultStatus function should be called to check the return value for any errors (including the value of a null pointer, in which **PGRES_FATAL_ERROR** will be returned). The PQerrorMessage function can be called to obtain more information about such errors. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> The command string can contain multiple SQL commands separated by semicolons (;). Multiple queries sent in a PQexec call are processed in one transaction, unless there are specific BEGIN/COMMIT commands in the query string to divide the string into multiple transactions. Note that the returned PGresult structure describes only the result of the last command executed from the string. If a command fails, the string processing stops and the returned PGresult describes the error condition. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/3-PQexecParams.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/3-PQexecParams.md deleted file mode 100644 index f8c2120c..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/3-PQexecParams.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: PQexecParams -summary: PQexecParams -author: Guo Huan -date: 2021-05-17 ---- - -# PQexecParams - -## Function - -PQexecParams is used to run a command to bind one or more parameters. - -## Prototype - -``` -PGresult* PQexecParams(PGconn* conn, - const char* command, - int nParams, - const Oid* paramTypes, - const char* const* paramValues, - const int* paramLengths, - const int* paramFormats, - int resultFormat); -``` - -## Parameter - -**Table 1** - -| **Keyword** | **Parameter Description** | -| :----------- | :---------------------------------- | -| conn | Connection handle. | -| command | SQL text string. | -| nParams | Number of parameters to be bound. | -| paramTypes | Types of parameters to be bound. | -| paramValues | Values of parameters to be bound. | -| paramLengths | Parameter lengths. | -| paramFormats | Parameter formats (text or binary). | -| resultFormat | Result format (text or binary). | - -## Return Value - -PGresult pointers diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/4-PQexecParamsBatch.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/4-PQexecParamsBatch.md deleted file mode 100644 index f7e4b3c0..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/4-PQexecParamsBatch.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: PQexecParamsBatch -summary: PQexecParamsBatch -author: Guo Huan -date: 2021-05-17 ---- - -# PQexecParamsBatch - -## Function - -PQexecParamsBatch is used to run a command to bind batches of parameters. - -## Prototype - -``` -PGresult* PQexecParamsBatch(PGconn* conn, - const char* command, - int nParams, - int nBatch, - const Oid* paramTypes, - const char* const* paramValues, - const int* paramLengths, - const int* paramFormats, - int resultFormat); -``` - -## Parameter - -**Table 1** - -| **Keyword** | **Parameter Description** | -| :----------- | :---------------------------------- | -| conn | Connection handle. | -| command | SQL text string. | -| nParams | Number of parameters to be bound. | -| nBatch | Number of batch operations. | -| paramTypes | Types of parameters to be bound. | -| paramValues | Values of parameters to be bound. | -| paramLengths | Parameter lengths. | -| paramFormats | Parameter formats (text or binary). | -| resultFormat | Result format (text or binary). | - -## Return Value - -PGresult pointers diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/5-PQexecPrepared.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/5-PQexecPrepared.md deleted file mode 100644 index b2eab81b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/5-PQexecPrepared.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: PQexecPrepared -summary: PQexecPrepared -author: Guo Huan -date: 2021-05-17 ---- - -# PQexecPrepared - -## Function - -PQexecPrepared is used to send a request to execute a prepared statement with given parameters and wait for the result. - -## Prototype - -``` -PGresult* PQexecPrepared(PGconn* conn, - const char* stmtName, - int nParams, - const char* const* paramValues, - const int* paramLengths, - const int* paramFormats, - int resultFormat); -``` - -## Parameter - -**Table 1** - -| **Keyword** | **Parameter Description** | -| :----------- | :----------------------------------------------------------- | -| conn | Connection handle. | -| stmtName | **stmt** name, which can be set to "" or NULL to reference an unnamed statement. Otherwise, it must be the name of an existing prepared statement. | -| nParams | Parameter quantity. | -| paramValues | Actual values of parameters. | -| paramLengths | Actual data lengths of parameters. | -| paramFormats | Parameter formats (text or binary). | -| resultFormat | Return result format (text or binary). | - -## Return Value - -PGresult pointers diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/6-PQexecPreparedBatch.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/6-PQexecPreparedBatch.md deleted file mode 100644 index 23ed2c96..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/6-PQexecPreparedBatch.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: PQexecPreparedBatch -summary: PQexecPreparedBatch -author: Guo Huan -date: 2021-05-17 ---- - -# PQexecPreparedBatch - -## Function - -PQexecPreparedBatch is used to send a request to execute a prepared statement with batches of given parameters and wait for the result. - -## Prototype - -``` -PGresult* PQexecPreparedBatch(PGconn* conn, - const char* stmtName, - int nParams, - int nBatchCount, - const char* const* paramValues, - const int* paramLengths, - const int* paramFormats, - int resultFormat); -``` - -## Parameter - -**Table 1** - -| **Keyword** | **Parameter Description** | -| :----------- | :----------------------------------------------------------- | -| conn | Connection handle. | -| stmtName | **stmt** name, which can be set to "" or NULL to reference an unnamed statement. Otherwise, it must be the name of an existing prepared statement. | -| nParams | Parameter quantity. | -| nBatchCount | Number of batches. | -| paramValues | Actual values of parameters. | -| paramLengths | Actual data lengths of parameters. | -| paramFormats | Parameter formats (text or binary). | -| resultFormat | Return result format (text or binary). | - -## Return Value - -PGresult pointers diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/7-PQfname.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/7-PQfname.md deleted file mode 100644 index f6203554..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/7-PQfname.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: PQfname -summary: PQfname -author: Guo Huan -date: 2021-05-17 ---- - -# PQfname - -## Function - -PQfname is used to return the column name associated with the given column number. Column numbers start from 0. The caller should not release the result directly. The result will be released when the associated PGresult handle is passed to PQclear. - -## Prototype - -``` -char *PQfname(const PGresult *res, - int column_number); -``` - -## Parameter - -**Table 1** - -| **Keyword** | **Parameter Description** | -| :------------ | :------------------------ | -| res | Operation result handle. | -| column_number | Number of columns. | - -## Return Value - -char pointers - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/8-PQgetvalue.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/8-PQgetvalue.md deleted file mode 100644 index 86a7fa97..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/8-PQgetvalue.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: PQgetvalue -summary: PQgetvalue -author: Guo Huan -date: 2021-05-17 ---- - -# PQgetvalue - -## Function - -PQgetvalue is used to return a single field value of one row of a PGresult. Row and column numbers start from 0. The caller should not release the result directly. The result will be released when the associated PGresult handle is passed to PQclear. - -## Prototype - -``` -char *PQgetvalue(const PGresult *res, - int row_number, - int column_number); -``` - -## Parameter - -**Table 1** - -| **Keyword** | **Parameter Description** | -| :------------ | :------------------------ | -| res | Operation result handle. | -| row_number | Number of rows. | -| column_number | Number of columns. | - -## Return Value - -For data in text format, the value returned by PQgetvalue is a null-terminated string representation of the field value. - -For binary data, the value is a binary representation determined by the typsend and typreceive functions of the data type. - -If this field is left blank, an empty string is returned. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/9-PQnfields.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/9-PQnfields.md deleted file mode 100644 index 459ecaa8..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/9-PQnfields.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: PQnfields -summary: PQnfields -author: Guo Huan -date: 2021-05-17 ---- - -# PQnfields - -## Function - -PQnfields is used to return the number of columns (fields) in each row of the query result. - -## Prototype - -``` -int PQnfields(const PGresult *res); -``` - -## Parameter - -**Table 1** - -| **Keyword** | **Parameter Description** | -| :---------- | :------------------------ | -| res | Operation result handle. | - -## Return Value - -Value of the int type - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/database-statement-execution-functions.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/database-statement-execution-functions.md deleted file mode 100644 index b33d4ad5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/2-database-statement-execution-functions/database-statement-execution-functions.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Database Statement Execution Functions -summary: Database Statement Execution Functions -author: Guo Huan -date: 2023-05-18 ---- - -# Database Statement Execution Functions - -After the connection to the database server is successfully established, you can use the functions described in this section to execute SQL queries and commands. - -- **[PQclear](1-PQclear.md)** -- **[PQexec](2-PQexec.md)** -- **[PQexecParams](3-PQexecParams.md)** -- **[PQexecParamsBatch](4-PQexecParamsBatch.md)** -- **[PQexecPrepared](5-PQexecPrepared.md)** -- **[PQexecPreparedBatch](6-PQexecPreparedBatch.md)** -- **[PQfname](7-PQfname.md)** -- **[PQgetvalue](8-PQgetvalue.md)** -- **[PQnfields](9-PQnfields.md)** -- **[PQntuples](10-PQntuples.md)** -- **[PQprepare](11-PQprepare.md)** -- **[PQresultStatus](12-PQresultStatus.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/1-functions-for-asynchronous-command-processing.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/1-functions-for-asynchronous-command-processing.md deleted file mode 100644 index 9b0302c3..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/1-functions-for-asynchronous-command-processing.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Functions for Asynchronous Command Processing -summary: Functions for Asynchronous Command Processing -author: Guo Huan -date: 2021-05-17 ---- - -# Functions for Asynchronous Command Processing - -The PQexec function is adequate for committing commands in common, synchronous applications. However, it has several defects, which may be important to some users: - -- PQexec waits for the end of the command, but the application may have other work to do (for example, maintaining a user interface). In this case, PQexec would not want to be blocked to wait for the response. -- As the client application is suspended while waiting for the result, it is difficult for the application to determine whether to cancel the ongoing command. -- PQexec can return only one PGresult structure. If the committed command string contains multiple SQL commands, all the PGresult structures except the last PGresult are discarded by PQexec. -- PQexec always collects the entire result of the command and caches it in a PGresult. Although this mode simplifies the error handling logic for applications, it is impractical for results that contain multiple rows. - -Applications that do not want to be restricted by these limitations can use the following functions that PQexec is built from: PQsendQuery and PQgetResult. The functions PQsendQueryParams, PQsendPrepare, and PQsendQueryPrepared can also be used with PQgetResult. - -- **[PQsendQuery](2-PQsendQuery.md)** -- **[PQsendQueryParams](3-PQsendQueryParams.md)** -- **[PQsendPrepare](4-PQsendPrepare.md)** -- **[PQsendQueryPrepared](5-PQsendQueryPrepared.md)** -- **[PQflush](6-PQflush.md)** diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/2-PQsendQuery.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/2-PQsendQuery.md deleted file mode 100644 index c8237c71..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/2-PQsendQuery.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: PQsendQuery -summary: PQsendQuery -author: Guo Huan -date: 2021-05-17 ---- - -# PQsendQuery - -## Function - -PQsendQuery is used to commit a command to the server without waiting for the result. If the query is successful, **1** is returned. Otherwise, **0** is returned. - -## Prototype - -```c -int PQsendQuery(PGconn *conn, const char *command); -``` - -## Parameter - -**Table 1** PQsendQuery parameters - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| conn | Points to the object pointer that contains the connection information. | -| command | Query string to be executed. | - -## Return Value - -**int** indicates the execution result. **1** indicates successful execution and **0** indicates an execution failure. The failure cause is stored in **conn->errorMessage**. - -## Precautions - -After PQsendQuery is successfully called, call PQgetResult one or more times to obtain the results. PQsendQuery cannot be called again (on the same connection) until PQgetResult returns a null pointer, indicating that the command execution is complete. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/3-PQsendQueryParams.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/3-PQsendQueryParams.md deleted file mode 100644 index d8e5c99f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/3-PQsendQueryParams.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: PQsendQueryParams -summary: PQsendQueryParams -author: Guo Huan -date: 2021-05-17 ---- - -# PQsendQueryParams - -## Function - -PQsendQueryParams is used to commit a command and separate parameters to the server without waiting for the result. - -## Prototype - -```c -int PQsendQueryParams(PGconn *conn, - const char *command, - int nParams, - const Oid *paramTypes, - const char * const *paramValues, - const int *paramLengths, - const int *paramFormats, - int resultFormat); -``` - -## Parameter - -**Table 1** PQsendQueryParams parameters - -| **Keyword** | **Parameter Description** | -| :----------- | :----------------------------------------------------------- | -| conn | Points to the object pointer that contains the connection information. | -| command | Query string to be executed. | -| nParams | Parameter quantity. | -| paramTypes | Parameter type. | -| paramValues | Parameter value. | -| paramLengths | Parameter length. | -| paramFormats | Parameter format. | -| resultFormat | Result format. | - -## Return Value - -**int** indicates the execution result. **1** indicates successful execution and **0** indicates an execution failure. The failure cause is stored in **conn->errorMessage**. - -## Precautions - -PQsendQueryParams is equivalent to PQsendQuery. The only difference is that query parameters can be specified separately from the query string. PQsendQueryParams parameters are handled in the same way as PQexecParams parameters. Like PQexecParams, PQsendQueryParams cannot work on connections using protocol 2.0 and it allows only one command in the query string. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/4-PQsendPrepare.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/4-PQsendPrepare.md deleted file mode 100644 index 550c4f1e..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/4-PQsendPrepare.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: PQsendPrepare -summary: PQsendPrepare -author: Guo Huan -date: 2021-05-17 ---- - -# PQsendPrepare - -## Function - -PQsendPrepare is used to send a request to create a prepared statement with given parameters, without waiting for completion. - -## Prototype - -```c -int PQsendPrepare(PGconn *conn, - const char *stmtName, - const char *query, - int nParams, - const Oid *paramTypes); -``` - -## Parameters - -**Table 1** PQsendPrepare parameters - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| conn | Points to the object pointer that contains the connection information. | -| stmtName | Name of **stmt** to be executed. | -| query | Query string to be executed. | -| nParams | Parameter quantity. | -| paramTypes | Array of the parameter type. | - -## Return Value - -**int** indicates the execution result. **1** indicates successful execution and **0** indicates an execution failure. The failure cause is stored in **conn->errorMessage**. - -## Precautions - -PQsendPrepare is an asynchronous version of PQprepare. If it can dispatch a request, **1** is returned. Otherwise, **0** is returned. After a successful calling of PQsendPrepare, call PQgetResult to check whether the server successfully created the prepared statement. PQsendPrepare parameters are handled in the same way as PQprepare parameters. Like PQprepare, PQsendPrepare cannot work on connections using protocol 2.0. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/5-PQsendQueryPrepared.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/5-PQsendQueryPrepared.md deleted file mode 100644 index 4ce131cc..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/5-PQsendQueryPrepared.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: PQsendQueryPrepared -summary: PQsendQueryPrepared -author: Guo Huan -date: 2021-05-17 ---- - -# PQsendQueryPrepared - -## Function - -PQsendQueryPrepared is used to send a request to execute a prepared statement with given parameters, without waiting for the result. - -## Prototype - -```c -int PQsendQueryPrepared(PGconn *conn, - const char *stmtName, - int nParams, - const char * const *paramValues, - const int *paramLengths, - const int *paramFormats, - int resultFormat); -``` - -## Parameters - -**Table 1** PQsendQueryPrepared parameters - -| **Keyword** | **Parameter Description** | -| :----------- | :----------------------------------------------------------- | -| conn | Points to the object pointer that contains the connection information. | -| stmtName | Name of **stmt** to be executed. | -| nParams | Parameter quantity. | -| paramValues | Parameter value. | -| paramLengths | Parameter length. | -| paramFormats | Parameter format. | -| resultFormat | Result format. | - -## Return Value - -**int** indicates the execution result. **1** indicates successful execution and **0** indicates an execution failure. The failure cause is stored in **conn->errorMessage**. - -## Precautions - -PQsendQueryPrepared is similar to PQsendQueryParams, but the command to be executed is specified by naming a previously-prepared statement, instead of providing a query string. PQsendQueryPrepared parameters are handled in the same way as PQexecPrepared parameters. Like PQexecPrepared, PQsendQueryPrepared cannot work on connections using protocol 2.0. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/6-PQflush.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/6-PQflush.md deleted file mode 100644 index 485875af..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/3-functions-for-asynchronous-command-processing/6-PQflush.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: PQflush -summary: PQflush -author: Guo Huan -date: 2021-05-17 ---- - -# PQflush - -## Function - -PQflush is used to try to flush any queued output data to the server. - -## Prototype - -```c -Cint PQflush(PGconn *conn); -``` - -## Parameter - -**Table 1** PQflush parameter - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| conn | Points to the object pointer that contains the connection information. | - -## Return Value - -**int** indicates the execution result. If the operation is successful (or the send queue is empty), **0** is returned. If the operation fails, **-1** is returned. If all data in the send queue fails to be sent, **1** is returned. (This case occurs only when the connection is non-blocking.) The failure cause is stored in **conn->error_message**. - -## Precautions - -Call PQflush after sending any command or data over a non-blocking connection. If **1** is returned, wait for the socket to become read- or write-ready. If the socket becomes write-ready, call PQflush again. If the socket becomes read-ready, call PQconsumeInput and then call PQflush again. Repeat the operation until the value **0** is returned for PQflush. (It is necessary to check for read-ready and drain the input using PQconsumeInput. This is because the server can block trying to send us data, for example, notification messages, and will not read our data until we read it.) Once PQflush returns **0**, wait for the socket to be read-ready and then read the response as described above. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/1-PQgetCancel.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/1-PQgetCancel.md deleted file mode 100644 index 234b1051..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/1-PQgetCancel.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: PQgetCancel -summary: PQgetCancel -author: Guo Huan -date: 2021-05-17 ---- - -# PQgetCancel - -## Function - -PQgetCancel is used to create a data structure that contains the information required to cancel a command issued through a specific database connection. - -## Prototype - -```c -PGcancel *PQgetCancel(PGconn *conn); -``` - -## Parameter - -**Table 1** PQgetCancel parameter - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| conn | Points to the object pointer that contains the connection information. | - -## Return Value - -**PGcancel** points to the object pointer that contains the cancel information. - -## Precautions - -PQgetCancel creates a PGcancel object for a given PGconn connection object. If the given connection object (**conn**) is NULL or an invalid connection, PQgetCancel will return NULL. The PGcancel object is an opaque structure that cannot be directly accessed by applications. It can be transferred only to PQcancel or PQfreeCancel. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/2-PQfreeCancel.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/2-PQfreeCancel.md deleted file mode 100644 index 3df29bc6..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/2-PQfreeCancel.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: PQfreeCancel -summary: PQfreeCancel -author: Guo Huan -date: 2021-05-17 ---- - -# PQfreeCancel - -## Function - -PQfreeCancel is used to release the data structure created by PQgetCancel. - -## Prototype - -```c -void PQfreeCancel(PGcancel *cancel); -``` - -## Parameter - -**Table 1** PQfreeCancel parameter - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| cancel | Points to the object pointer that contains the cancel information. | - -## Precautions - -PQfreeCancel releases a data object previously created by PQgetCancel. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/3-PQcancel.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/3-PQcancel.md deleted file mode 100644 index 1a71f6b7..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/3-PQcancel.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: PQcancel -summary: PQcancel -author: Guo Huan -date: 2021-05-17 ---- - -# PQcancel - -## Function - -PQcancel is used to request the server to abandon processing of the current command. - -## Prototype - -```c -int PQcancel(PGcancel *cancel, char *errbuf, int errbufsize); -``` - -## Parameter - -**Table 1** PQcancel parameters - -| **Keyword** | **Parameter Description** | -| :---------- | :----------------------------------------------------------- | -| cancel | Points to the object pointer that contains the cancel information. | -| errbuf | Buffer for storing error information. | -| errbufsize | Size of the buffer for storing error information. | - -## Return Value - -**int** indicates the execution result. **1** indicates successful execution and **0** indicates an execution failure. The failure cause is stored in **errbuf**. - -## Precautions - -- Successful sending does not guarantee that the request will have any effect. If the cancellation is valid, the current command is terminated early and an error is returned. If the cancellation fails (for example, because the server has processed the command), no result is returned. -- If **errbuf** is a local variable in a signal handler, you can safely call PQcancel from the signal handler. For PQcancel, the PGcancel object is read-only, so it can also be called from a thread that is separate from the thread that is operating the PGconn object. - -## Example - -For details, see Example. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/functions-for-canceling-queries-in-progress.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/functions-for-canceling-queries-in-progress.md deleted file mode 100644 index 6da19da1..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/4-functions-for-canceling-queries-in-progress/functions-for-canceling-queries-in-progress.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Functions for Canceling Queries in Progress -summary: Functions for Canceling Queries in Progress -author: Guo Huan -date: 2023-05-18 ---- - -# Functions for Canceling Queries in Progress - -A client application can use the functions described in this section to cancel a command that is still being processed by the server. - -- **[PQgetCancel](1-PQgetCancel.md)** -- **[PQfreeCancel](2-PQfreeCancel.md)** -- **[PQcancel](3-PQcancel.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/libpq-api-reference.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/libpq-api-reference.md deleted file mode 100644 index 95894b3a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/2-libpq/libpq-api-reference.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: libpq API Reference -summary: libpq API Reference -author: Guo Huan -date: 2023-05-18 ---- - -# libpq API Reference - -- **[Database Connection Control Functions](1-database-connection-control-functions/1-database-connection-control-functions.md)** -- **[Database Statement Execution Functions](2-database-statement-execution-functions/database-statement-execution-functions.md)** -- **[Functions for Asynchronous Command Processing](3-functions-for-asynchronous-command-processing/1-functions-for-asynchronous-command-processing.md)** -- **[Functions for Canceling Queries in Progress](4-functions-for-canceling-queries-in-progress/functions-for-canceling-queries-in-progress.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/dependent-header-files-of-libpq.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/dependent-header-files-of-libpq.md deleted file mode 100644 index d417a526..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/dependent-header-files-of-libpq.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Dependent Header Files of libpq -summary: Dependent Header Files of libpq -author: Guo Huan -date: 2022-04-26 ---- - -# Dependent Header Files of libpq - -Client programs that use **libpq** must include the header file **libpq-fe.h** and must link with the libpq library. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/development-based-on-libpq.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/development-based-on-libpq.md deleted file mode 100644 index cd120482..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/development-based-on-libpq.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Development Based on libpq -summary: Development Based on libpq -author: Guo Huan -date: 2023-05-18 ---- - -# Development Based on libpq - -**libpq** is a C application programming interface to MogDB. **libpq** contains a set of library functions that allow client programs to send query requests to the MogDB and obtain query results. It is also the underlying engine of other MogDB application interfaces, such as ODBC. This section provides two examples to show how to write code using **libpq**. - -+ **[Dependent Header Files of libpq](dependent-header-files-of-libpq.md)** -+ **[Development Process](development-process.md)** -+ **[Example](libpq-example.md)** -+ **[Link Parameters](link-parameters.md)** -+ **[libpq API Reference](2-libpq/libpq-api-reference.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/development-process.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/development-process.md deleted file mode 100644 index 6e1a4a5c..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/development-process.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Development Process -summary: Development Process -author: Guo Huan -date: 2022-04-26 ---- - -# Development Process - -To compile and connect to a libpq source program, perform the following operations: - -1. Decompress the release package (for example, `MogDB-3.0.1-CentOS-64bit-Libpq.tar.gz`). The required header file is stored in the **include** folder, and the **lib** folder contains the required libpq library file. - - > **NOTE:** In addition to **libpq-fe.h**, the **include** folder contains the header files **postgres_ext.h**, **gs_thread.h**, and **gs_threadlocal.h** by default. These three header files are the dependency files of **libpq-fe.h**. - -2. Include the **libpq-fe.h** header file. - - ``` - #include - ``` - -3. Provide the **-I** *directory* option to provide the installation location of the header files. (Sometimes the compiler looks for the default directory, so this option can be ignored.) Example: - - ``` - gcc -I (Directory where the header files are located) -L (Directory where the libpq library is located) testprog.c -lpq - ``` - -4. If the makefile is used, add the following options to variables *CPPFLAGS*, *LDFLAGS*, and *LIBS*: - - ``` - CPPFLAGS += -I (Directory where the header files are located) - LDFLAGS += -L (Directory where the libpq library is located) - LIBS += -lpq - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/libpq-example.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/libpq-example.md deleted file mode 100644 index e6954672..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/libpq-example.md +++ /dev/null @@ -1,282 +0,0 @@ ---- -title: Example -summary: Example -author: Guo Huan -date: 2022-04-26 ---- - -# Example - -## Code for Common Functions - -Example 1: - -```c - -/* - * testlibpq.c - */ -#include -#include -#include - -static void -exit_nicely(PGconn *conn) -{ - PQfinish(conn); - exit(1); -} - -int -main(int argc, char **argv) -{ - const char *conninfo; - PGconn *conn; - PGresult *res; - int nFields; - int i,j; - - /* - * This value is used when the user provides the value of the conninfo character string in the command line. - * Otherwise, the environment variables or the default values - * are used for all other connection parameters. - */ - if (argc > 1) - conninfo = argv[1]; - else - conninfo = "dbname=postgres port=42121 host='10.44.133.171' application_name=test connect_timeout=5 sslmode=allow user='test' password='test_1234'"; - - /* Connect to the database. */ - conn = PQconnectdb(conninfo); - - /* Check whether the backend connection has been successfully established. */ - if (PQstatus(conn) != CONNECTION_OK) - { - fprintf(stderr, "Connection to database failed: %s", - PQerrorMessage(conn)); - exit_nicely(conn); - } - - /* - * Since a cursor is used in the test case, a transaction block is required. - * Put all data in one "select * from pg_database" - * PQexec() is too simple and is not recommended. - */ - - /* Start a transaction block. */ - res = PQexec(conn, "BEGIN"); - if (PQresultStatus(res) != PGRES_COMMAND_OK) - { - fprintf(stderr, "BEGIN command failed: %s", PQerrorMessage(conn)); - PQclear(res); - exit_nicely(conn); - } - - /* - * PQclear PGresult should be executed when it is no longer needed, to avoid memory leakage. - */ - PQclear(res); - - /* - * Fetch data from the pg_database system catalog. - */ - res = PQexec(conn, "DECLARE myportal CURSOR FOR select * from pg_database"); - if (PQresultStatus(res) != PGRES_COMMAND_OK) - { - fprintf(stderr, "DECLARE CURSOR failed: %s", PQerrorMessage(conn)); - PQclear(res); - exit_nicely(conn); - } - PQclear(res); - - res = PQexec(conn, "FETCH ALL in myportal"); - if (PQresultStatus(res) != PGRES_TUPLES_OK) - { - fprintf(stderr, "FETCH ALL failed: %s", PQerrorMessage(conn)); - PQclear(res); - exit_nicely(conn); - } - - /* First, print out the attribute name. */ - nFields = PQnfields(res); - for (i = 0; i < nFields; i++) - printf("%-15s", PQfname(res, i)); - printf("\n\n"); - - /* Print lines. */ - for (i = 0; i < PQntuples(res); i++) - { - for (j = 0; j < nFields; j++) - printf("%-15s", PQgetvalue(res, i, j)); - printf("\n"); - } - - PQclear(res); - - /* Close the portal. We do not need to check for errors. */ - res = PQexec(conn, "CLOSE myportal"); - PQclear(res); - - /* End the transaction. */ - res = PQexec(conn, "END"); - PQclear(res); - - /* Close the database connection and clean up the database. */ - PQfinish(conn); - - return 0; -} -``` - -Example 2: - -```c - -/* - * testlibpq2.c - * Test out-of-line parameters and binary I/Os. - * - * Before running this example, run the following command to populate a database: - * - * - * CREATE TABLE test1 (i int4, t text); - * - * INSERT INTO test1 values (2, 'ho there'); - * - * The expected output is as follows: - * - * - * tuple 0: got - * i = (4 bytes) 2 - * t = (8 bytes) 'ho there' - * - */ -#include -#include -#include -#include -#include - -/* for ntohl/htonl */ -#include -#include - -static void -exit_nicely(PGconn *conn) -{ - PQfinish(conn); - exit(1); -} - -/* - * This function is used to print out the query results. The results are in binary format -* and fetched from the table created in the comment above. - */ -static void -show_binary_results(PGresult *res) -{ - int i; - int i_fnum, - t_fnum; - - /* Use PQfnumber to avoid assumptions about field order in the result. */ - i_fnum = PQfnumber(res, "i"); - t_fnum = PQfnumber(res, "t"); - - for (i = 0; i < PQntuples(res); i++) - { - char *iptr; - char *tptr; - int ival; - - /* Obtain the field value. (Ignore the possibility that they may be null). */ - iptr = PQgetvalue(res, i, i_fnum); - tptr = PQgetvalue(res, i, t_fnum); - - /* - * The binary representation of INT4 is the network byte order, - * which is better to be replaced with the local byte order. - */ - ival = ntohl(*((uint32_t *) iptr)); - - /* - * The binary representation of TEXT is text. Since libpq can append a zero byte to it, - * and think of it as a C string. - * - */ - - printf("tuple %d: got\n", i); - printf(" i = (%d bytes) %d\n", - PQgetlength(res, i, i_fnum), ival); - printf(" t = (%d bytes) '%s'\n", - PQgetlength(res, i, t_fnum), tptr); - printf("\n\n"); - } -} - -int -main(int argc, char **argv) -{ - const char *conninfo; - PGconn *conn; - PGresult *res; - const char *paramValues[1]; - int paramLengths[1]; - int paramFormats[1]; - uint32_t binaryIntVal; - - /* - * If the user provides a parameter on the command line, - * The value of this parameter is a conninfo character string. Otherwise, - * Use environment variables or default values. - */ - if (argc > 1) - conninfo = argv[1]; - else - conninfo = "dbname=postgres port=42121 host='10.44.133.171' application_name=test connect_timeout=5 sslmode=allow user='test' password='test_1234'"; - - /* Connect to the database. */ - conn = PQconnectdb(conninfo); - - /* Check whether the connection to the server was successfully established. */ - if (PQstatus(conn) != CONNECTION_OK) - { - fprintf(stderr, "Connection to database failed: %s", - PQerrorMessage(conn)); - exit_nicely(conn); - } - - /* Convert the integer value "2" to the network byte order. */ - binaryIntVal = htonl((uint32_t) 2); - - /* Set the parameter array for PQexecParams. */ - paramValues[0] = (char *) &binaryIntVal; - paramLengths[0] = sizeof(binaryIntVal); - paramFormats[0] = 1; /* Binary */ - - res = PQexecParams(conn, - "SELECT * FROM test1 WHERE i = $1::int4", - 1, /* One parameter */ - NULL, /* Enable the backend to deduce the parameter type. */ - paramValues, - paramLengths, - paramFormats, - 1); /* require binary results. */ - - if (PQresultStatus(res) != PGRES_TUPLES_OK) - { - fprintf(stderr, "SELECT failed: %s", PQerrorMessage(conn)); - PQclear(res); - exit_nicely(conn); - } - - show_binary_results(res); - - PQclear(res); - - /* Close the database connection and clean up the database. */ - PQfinish(conn); - - return 0; -} -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/link-parameters.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/link-parameters.md deleted file mode 100644 index 80dfe22c..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4-development-based-on-libpq/link-parameters.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Link Parameters -summary: Link Parameters -author: Guo Huan -date: 2022-04-26 ---- - -# Link Parameters - -**Table 1** Link parameters - -| Parameter | Description | -| :------------------------ | :----------------------------------------------------------- | -| host | Name of the host to connect to. If the host name starts with a slash (/), Unix-domain socket communications instead of TCP/IP communications are used. The value is the directory where the socket file is stored. If **host** is not specified, the default behavior is to connect to the Unix-domain socket in the **/tmp** directory (or the socket directory specified during database installation). On a machine without a Unix-domain socket, the default behavior is to connect to **localhost**.
You can specify multiple host names by using a character string separated by commas (,). Multiple host names can be specified. | -| hostaddr | IP address of the host to connect to. The value is in standard IPv4 address format, for example, 172.28.40.9. If the machine supports IPv6, IPv6 address can also be used. If a non-null string is specified, TCP/IP communications are used.
You can specify multiple IP addresses by using a character string separated by commas (,). Multiple IP addresses can be specified.
Replacing **host** with **hostaddr** can prevent applications from querying host names, which may be important for applications with time constraints. However, a host name is required for GSSAPI or SSPI authentication methods. Therefore, the following rules are used:
1. If **host** is specified but **hostaddr** is not, a query for the host name will be executed.
2. If **hostaddr** is specified but **host** is not, the value of **hostaddr** is the server network address. If the host name is required by authentication, the connection attempt fails.
3. If both **host** and **hostaddr** are specified, the value of **hostaddr** is the server network address. The value of **host** is ignored unless it is required by authentication, in which case it is used as the host name.
NOTICE:
- If **host** is not the server name in the network address specified by **hostaddr**, the authentication may fail.
- If neither **host** nor **hostaddr** is specified, libpq will use a local Unix-domain socket for connection. If the machine does not have a Unix-domain socket, it will attempt to connect to **localhost**. | -| port | Port number of the host server, or the socket file name extension for Unix-domain connections.
You can specify multiple port numbers by using a character string separated by commas (,). Multiple port numbers can be specified. | -| user | Name of the user to connect as. By default, the username is the same as the operating system name of the user running the application. | -| dbname | Database name. The default value is the same as the username. | -| password | Password to be used if the server requires password authentication. | -| connect_timeout | Maximum timeout period of the connection, in seconds (in decimal integer string). The value **0** or null indicates infinity. You are not advised to set the connection timeout period to a value less than 2 seconds. | -| client_encoding | Client encoding for the connection. In addition to the values accepted by the corresponding server options, you can use **auto** to determine the correct encoding from the current environment in the client (the *LC_CTYPE* environment variable in the Unix system). | -| tty | This parameter can be ignored. (This parameter was used to specify the location to which the debugging output of the server was sent). | -| options | Adds command-line options to send to the server at runtime. | -| application_name | Current user identity. | -| fallback_application_name | Specifies a backup value for the **application_name** parameter. This value is used if no value is set for **application_name** through a connection parameter or the *PGAPPNAME* environment variable. It is useful to specify a backup value in a common tool program that wants to set a default application name but does not want it to be overwritten by the user. | -| keepalives | Whether TCP keepalive is enabled on the client side. The default value is **1**, indicating that the function is enabled. The value **0** indicates that the function is disabled. Ignore this parameter for Unix-domain connections. | -| keepalives_idle | The number of seconds of inactivity after which TCP should send a keepalive message to the server. The value **0** indicates that the default value is used. Ignore this parameter for Unix-domain connections or if keep-alive is disabled. | -| keepalives_interval | The number of seconds after which a TCP keepalive message that is not acknowledged by the server should be retransmitted. The value **0** indicates that the default value is used. Ignore this parameter for Unix-domain connections or if keep-alive is disabled. | -| keepalives_count | Controls the number of times for TCP to send information to keep the activation. **0** indicates that the default value is used. If the link is made using the Unix domain socket or `keepalives` is disabled, ignore this parameter. | -| tcp_user_timeout | Specifies the maximum time duration for the transmitted data being kept in unconfirmed staus before the TCP connection is forcibly disabled on an OS supporting the TCP_USER_TIMEOUT socket option. **0** indicates that the default value is used. If the link is made using the Unix domain socket, ignore this parameter. | -| rw_timeout | Sets the read and write timeout interval of the client connection.
Note: when this parameter value is less than 5s, the timeout is handled as 5s.| -| sslmode | Specifies whether to enable SSL encryption.
- **disable**: SSL connection is disabled.
- **allow**: If the database server requires SSL connection, SSL connection can be enabled. However, authenticity of the database server will not be verified.
- **prefer**: If the database supports SSL connection, SSL connection is preferred. However, authenticity of the database server will not be verified.
- **require**: SSL connection is required and data is encrypted. However, authenticity of the database server will not be verified.
- **verify-ca**: SSL connection is required. Currently, Windows ODBC does not support cert authentication.
- **verify-full**: SSL connection is required. Currently, Windows ODBC does not support cert authentication. | -| sslcompression | If this parameter is set to **1** (default value), the data transmitted over the SSL connection is compressed (this requires that the OpenSSL version be 0.9.8 or later). If this parameter is set to **0**, compression will be disabled (this requires OpenSSL 1.0.0 or later). If a connection without SSL is established, this parameter is ignored. If the OpenSSL version in use does not support this parameter, it will also be ignored. Compression takes up CPU time, but it increases throughput when the bottleneck is the network. If CPU performance is a limiting factor, disabling compression can improve response time and throughput. | -| sslcert | This parameter specifies the file name of the client SSL certificate. It replaces the default **~/.postgresql/postgresql.crt**. If no SSL connection is established, this parameter is ignored. | -| sslkey | This parameter specifies the location of the key used for the client certificate. It can specify the name of a file used to replace the default **~/.postgresql/postgresql.key**, or specify a key obtained from an external “engine” that is a loadable module of OpenSSL. The description of an external engine should consist of a colon-separated engine name and an engine-related key identifier. If no SSL connection is established, this parameter is ignored. | -| sslrootcert | This parameter specifies the name of a file that contains the SSL Certificate Authority (CA) certificate. If the file exists, the system authenticates the server certificate issued by one of these authorities. The default value is **~/.postgresql/root.crt**. | -| sslcrl | This parameter specifies the file name of the SSL Certificate Revocation List (CRL). If a certificate listed in this file exists, the server certificate authentication will be rejected. The default value is **~/.postgresql/root.crl**. | -| requirepeer | This parameter specifies the OS user of the server, for example, **requirepeer=postgres**. When a Unix-domain socket connection is established, if this parameter is set, the client checks whether the server process is running under the specified username at the beginning of the connection. If not, the connection will be interrupted by an error. This parameter can be used to provide server authentication similar to that of the SSL certificate on TCP/IP connections. (Note that if the Unix domain socket is in **/tmp** or another public writable location, any user can start a server for listening to the location. Use this parameter to ensure that your device is connected to a server that is run by a trusted user.) This option is supported only on platforms that implement the peer authentication method. | -| krbsrvname | This parameter specifies the Kerberos service name used for GSSAPI authentication. For successful Kerberos authentication, this value must match the service name specified in the server configuration. | -| gsslib | This parameter specifies the GSS library used for GSSAPI authentication. It is used only in the Windows OS. If this parameter is set to **gssapi**, **libpq** is forced to use the GSSAPI library to replace the default SSPI for authentication. | -| service | This parameter specifies the name of the service for which the additional parameter is used. It specifies a service name in **pg_service.conf** that holds the additional connection parameters. This allows the application to specify only one service name so that the connection parameters can be centrally maintained. | -| authtype | **authtype** is no longer used, so it is marked as a parameter not to be displayed. It is retained in an array so as not to reject the **conninfo** string from old applications that might still try to set it. | -| remote_nodename | Specifies the name of the remote node connected to the local node. | -| localhost | Specifies the local host in a connection channel. | -| localport | Specifies the local port in a connection channel. | -| fencedUdfRPCMode | Specifies whether the fenced udf RPC protocol uses UNIX domain sockets or special socket file names. The default value is **0**, indicating that the UNIX domain socket mode is used and the file type is .s.PGSQL.%d. To use the fenced UDF mode, set this parameter to **1**. In this case, the file type is .s.fencedMaster_unixdomain. | -| replication | Specifies whether the connection should use replication protocols instead of common protocols. Protocols with this parameter configured are internal protocols used for PostgreSQL replication connections and tools such as **pg_basebackup**, while they can also be used by third-party applications. The following values, which are case-insensitive, are supported:
- **true**, **on**, **yes**, and **1**: Specify that the physical replication mode is connected.
- **database**: Specifies that the logical replication mode and the database specified by **dbname** are connected.
- **false**, **off**, **no**, and **0**: Specify that the connection is a regular connection, which is the default behavior.
In physical or logical replication mode, only simple query protocols can be used. | -| backend_version | Specifies the backend version to be passed to the remote end. | -| prototype | Sets the current protocol level. The default value is **PROTO_TCP**. | -| enable_ce | Specifies whether a client is allowed to connect to a fully encrypted database. The default value is **0**. To enable this function, change the value to **1**. | -| connection_info | The value of `connection_info` is a JSON character string consisting of `driver_name`, `driver_version`, `driver_path`, and `os_user`.
If the value is not null, use `connection_info` and ignore `connectionExtraInf`.
If the value is null, a connection information string related to `libpq` is generated. When `connectionExtraInf` is set to `false`, the value of `connection_info` consists of only `driver_name` and `driver_version`. | -| connectionExtraInf | Specifies whether the value of **connection_info** contains extension information. The default value is **0**. If the value contains other information, set this parameter to **1**. | -| target_session_attrs | Specifies the type of the host to be connected. The connection is successful only when the host type is the same as the configured value. The rules for setting **target_session_attrs** are as follows:
- **any** (default value): All types of hosts can be connected.
- **read-write**: The connection is set up only when the connected host is readable and writable.
- **read-only**: Only readable hosts can be connected.
- **primary**: Only the primary server in the primary/standby systems can be connected.
- **standby**: Only the standby server in the primary/standby systems can be connected.
- **prefer-standby**: The system first attempts to find a standby node for connection. If all hosts in the **hosts** list fail to be connected, try the **any** mode. | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/1-psycopg-based-development.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/1-psycopg-based-development.md deleted file mode 100644 index f7efe29a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/1-psycopg-based-development.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Psycopg-Based Development -summary: Psycopg-Based Development -author: Zhang Cuiping -date: 2021-10-11 ---- - -# Psycopg-Based Development - -> MogDB Psycopg2 is based on open source projects [Psycopg2](https://github.com/psycopg/psycopg2) v2.9 version for MogDB a branch of the version of the database development. So in the basic usage is same to the original driver. - -Psycopg is a Python API used to execute SQL statements and provides a unified access API for PostgreSQL and MogDB. Applications can perform data operations based on psycopg. Psycopg2 is an encapsulation of libpq and is implemented using the C language, which is efficient and secure. It provides cursors on both clients and servers, asynchronous communication and notification, and the COPY TO and COPY FROM functions. Psycopg2 supports multiple types of Python out-of-the-box and adapts to PostgreSQL data types. Through the flexible object adaptation system, you can extend and customize the adaptation. - -MogDB supports the psycopg2 feature and allows psycopg2 to be connected in SSL mode. - -**Table 1** Platforms supported by Psycopg - -| Platform | -| :------- | -| x86_64位 | -| ARM64位 | - -+ **[Psycopg2 Package](2-psycopg-package.md)** -+ **[Development Process](3.1-development-process.md)** -+ **[Connecting to a Database](4-connecting-to-a-database.md)** -+ **[Adaptation of Python values to SQL types](5-adaptation-of-python-values-to-sql-types.md)** -+ **[New Features](6-new-features-in-mogdb.md)** -+ **[Connecting to the Database (Using SSL)](9-connecting-to-the-database-using-ssl.md)** -+ **[Examples: Common Operations](10.1-example-common-operations.md)** -+ **[Psycopg2 API Reference](11-psycopg-api-reference/psycopg-api-reference.md)** -+ **[Psycopg2 Release Notes](12-psycopg2-release-notes.md)** diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md deleted file mode 100644 index 2550181b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Common Operations -summary: Common Operations -author: Yao Qian -date: 2023-12-18 ---- - -# Examples: Common Operations - -## basic usage - -```python -import psycopg2 - -# key-value format -conn = psycopg2.connect(dbname="postgres", user="user", password="password", host="localhost", port=port) -# DSN format -conn = psycopg2.connect("dbname=postgres user=user password=password host=localhost port=port") - - -# Create a connection object -conn=psycopg2.connect(database="postgres",user="user",password="password",host="localhost",port=port) -cur=conn.cursor() # Create a cursor object - -# Creating a connection object (SSl connection) -conn = psycopg2.connect(dbname="postgres", user="user", password="password", host="localhost", port=port, - sslmode="verify-ca", sslcert="client.crt",sslkey="client.key",sslrootcert="cacert.pem") -# Note:sslcert,sslkey,sslrootcert are the file paths of the user certificate, user private key, and root certificate,if not given,the default value is client.crt,client.key and root.crt under ~/.postgresql direcotry。 - - -# Create table -cur.execute("CREATE TABLE student(id integer,name varchar,sex varchar);") - -# Insert data -cur.execute("INSERT INTO student(id,name,sex) VALUES(%s,%s,%s)",(1,'Aspirin','M')) -cur.execute("INSERT INTO student(id,name,sex) VALUES(%s,%s,%s)",(2,'Taxol','F')) -cur.execute("INSERT INTO student(id,name,sex) VALUES(%s,%s,%s)",(3,'Dixheral','M')) - -# Get results -cur.execute('SELECT * FROM student') -results=cur.fetchall() -print(results) - -conn.commit() - -# Close connection -cur.close() -conn.close() -``` - -## with statement - -When a connection exits the with block, if no exception has been raised by the block, the transaction is committed. In case of exception the transaction is rolled back. - -When a cursor exits the with block it is closed, releasing any resource eventually associated with it. The state of the transaction is not affected. - -A connection can be used in more than a with statement and each with block is effectively wrapped in a separate transaction: - -```python -conn = psycopg2.connect(DSN) - -with conn: - with conn.cursor() as curs: - curs.execute(SQL1) - -with conn: - with conn.cursor() as curs: - curs.execute(SQL2) - -conn.close() -``` - -Warning: Unlike file objects or other resources, exiting the connection’s with block doesn’t close the connection, but only the transaction associated to it. If you want to make sure the connection is closed after a certain point, you should still use a try-catch block: - -```python -conn = psycopg2.connect(DSN) -try: - # connection usage -finally: - conn.close() -``` - -## Using loging - -```python -import logging -import psycopg2 -from psycopg2.extras import LoggingConnection - -logging.basicConfig(level=logging.DEBUG) # log level -logger = logging.getLogger(__name__) - -db_settings = { - "user": "user", - "password": "password", - "host": "localhost", - "database": "postgres", - "port": port -} -conn = psycopg2.connect(connection_factory=LoggingConnection, **db_settings) -conn.initialize(logger) -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/1-psycopg2-connect.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/1-psycopg2-connect.md deleted file mode 100644 index c251e496..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/1-psycopg2-connect.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: psycopg2.connect() -summary: psycopg2.connect() -author: Zhang Cuiping -date: 2021-10-11 ---- - -# psycopg2.connect() - -## Function - -This method creates a database session and returns a new connection object. - -## Prototype - -``` -conn=psycopg2.connect(dbname="test",user="postgres",password="secret",host="127.0.0.1",port="5432") -``` - -## Parameter - -**Table 1** psycopg2.connect parameters - -| **Keyword** | **Description** | -| :---------- | :----------------------------------------------------------- | -| dbname | Database name. | -| user | Username. | -| password | Password. | -| host | Database IP address. The default type is UNIX socket. | -| port | Connection port number. The default value is **5432**. | -| sslmode | SSL mode, which is used for SSL connection. | -| sslcert | Path of the client certificate, which is used for SSL connection. | -| sslkey | Path of the client key, which is used for SSL connection. | -| sslrootcert | Path of the root certificate, which is used for SSL connection. | - -## Return Value - -Connection object (for connecting to the openGauss DB instance) - -## Examples - -For details, see [Example: Common Operations](../../../../developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/10-connection-close.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/10-connection-close.md deleted file mode 100644 index 579401de..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/10-connection-close.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: connection.close() -summary: connection.close() -author: Zhang Cuiping -date: 2021-10-11 ---- - -# connection.close() - -## Function - -This method closes the database connection. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** This method closes the database connection and does not automatically call **commit()**. If you just close the database connection without calling **commit()** first, changes will be lost. - -## Prototype - -``` -connection.close() -``` - -## Parameter - -None - -## Return Value - -None - -## Examples - -For details, see [Example: Common Operations](../../../../developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/2-connection-cursor.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/2-connection-cursor.md deleted file mode 100644 index df03d7ac..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/2-connection-cursor.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: connection.cursor() -summary: connection.cursor() -author: Zhang Cuiping -date: 2021-10-11 ---- - -# connection.cursor() - -## Function - -This method returns a new cursor object. - -## Prototype - -``` -cursor(name=None, cursor_factory=None, scrollable=None, withhold=False) -``` - -## Parameter - -**Table 1** connection.cursor parameters - -| **Keyword** | **Description** | -| :------------- | :----------------------------------------------------------- | -| name | Cursor name. The default value is **None**. | -| cursor_factory | Creates a non-standard cursor. The default value is **None**. | -| scrollable | Sets the SCROLL option. The default value is **None**. | -| withhold | Sets the HOLD option. The default value is **False**. | - -## Return Value - -Cursor object (used for cusors that are programmed using Python in the entire database) - -## Examples - -For details, see [Example: Common Operations](../../../../developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/3-cursor-execute-query-vars-list.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/3-cursor-execute-query-vars-list.md deleted file mode 100644 index f2bae52d..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/3-cursor-execute-query-vars-list.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: cursor.execute(query,vars_list) -summary: cursor.execute(query,vars_list) -author: Zhang Cuiping -date: 2021-10-11 ---- - -# cursor.execute(query,vars_list) - -## Function - -This method executes the parameterized SQL statements (that is, placeholders instead of SQL literals). The psycopg2 module supports placeholders marked with **%s**. - -## Prototype - -``` -curosr.execute(query,vars_list) -``` - -## Parameters - -**Table 1** curosr.execute parameters - -| **Keyword** | **Description** | -| :---------- | :----------------------------------------------------------- | -| query | SQL statement to be executed. | -| vars_list | Variable list, which matches the **%s** placeholder in the query. | - -## Return Value - -None - -## Examples - -For details, see [Example: Common Operations](../../../../developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/4-curosr-executemany-query-vars-list.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/4-curosr-executemany-query-vars-list.md deleted file mode 100644 index 39b66520..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/4-curosr-executemany-query-vars-list.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: curosr.executemany(query,vars_list) -summary: curosr.executemany(query,vars_list) -author: Zhang Cuiping -date: 2021-10-11 ---- - -# curosr.executemany(query,vars_list) - -## Function - -This method executes an SQL command against all parameter sequences or mappings found in the sequence SQL. - -## Prototype - -``` -curosr.executemany(query,vars_list) -``` - -## Parameter - -**Table 1** curosr.executemany parameters - -| **Keyword** | **Description** | -| :---------- | :----------------------------------------------------------- | -| query | SQL statement that you want to execute. | -| vars_list | Variable list, which matches the **%s** placeholder in the query. | - -## Return Value - -None - -## Examples - -For details, see [Example: Common Operations](../../../../developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/5-connection-commit.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/5-connection-commit.md deleted file mode 100644 index 6d058394..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/5-connection-commit.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: connection.commit() -summary: connection.commit() -author: Zhang Cuiping -date: 2021-10-11 ---- - -# connection.commit() - -## Function - -This method commits the currently pending transaction to the database. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** By default, Psycopg opens a transaction before executing the first command. If **commit()** is not called, the effect of any data operation will be lost. - -## Prototype - -``` -connection.commit() -``` - -## Parameter - -None - -## Return Value - -None - -## Examples - -For details, see [Example: Common Operations](../../../../developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/6-connection-rollback.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/6-connection-rollback.md deleted file mode 100644 index 764c9869..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/6-connection-rollback.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: connection.rollback() -summary: connection.rollback() -author: Zhang Cuiping -date: 2021-10-11 ---- - -# connection.rollback() - -## Function - -This method rolls back the current pending transaction. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-caution.gif) **CAUTION:** If you close the connection using **close()** but do not commit the change using **commit()**, an implicit rollback will be performed. - -## Prototype - -``` -connection.rollback() -``` - -## Parameter - -None - -## Return Value - -None - -## Examples - -For details, see [Example: Common Operations](../../../../developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/7-cursor-fetchone.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/7-cursor-fetchone.md deleted file mode 100644 index acb0411f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/7-cursor-fetchone.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: cursor.fetchone() -summary: cursor.fetchone() -author: Zhang Cuiping -date: 2021-10-11 ---- - -# cursor.fetchone() - -## Function - -This method extracts the next row of the query result set and returns a tuple. - -## Prototype - -``` -cursor.fetchone() -``` - -## Parameter - -None - -## Return Value - -A single tuple is the first result in the result set. If no more data is available, **None** is returned. - -## Examples - -For details, see [Example: Common Operations](../../../../developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/8-cursor-fetchall.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/8-cursor-fetchall.md deleted file mode 100644 index 9933d690..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/8-cursor-fetchall.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: cursor.fetchall() -summary: cursor.fetchall() -author: Zhang Cuiping -date: 2021-10-11 ---- - -# cursor.fetchall() - -## Function - -This method obtains all the (remaining) rows of the query result and returns them as a list of tuples. - -## Prototype - -``` -cursor.fetchall() -``` - -## Parameter - -None - -## Return Value - -Tuple list, which contains all results of the result set. An empty list is returned when no rows are available. - -## Examples - -For details, see [Example: Common Operations](../../../../developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/9-cursor-close.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/9-cursor-close.md deleted file mode 100644 index 8c55d5f2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/9-cursor-close.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: cursor.close() -summary: cursor.close() -author: Zhang Cuiping -date: 2021-10-11 ---- - -# cursor.close() - -## Function - -This method closes the cursor of the current connection. - -## Prototype - -``` -cursor.close() -``` - -## Parameter - -None - -## Return Value - -None - -## Examples - -For details, see [Example: Common Operations](../../../../developer-guide/dev/4.1-psycopg-based-development/10.1-example-common-operations.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/psycopg-api-reference.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/psycopg-api-reference.md deleted file mode 100644 index 326baccc..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/11-psycopg-api-reference/psycopg-api-reference.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Psycopg API Reference -summary: Psycopg API Reference -author: Guo Huan -date: 2023-05-18 ---- - -# Psycopg API Reference - -Psycopg APIs are a set of methods provided for users. This section describes some common APIs. - -+ **[psycopg2.connect()](1-psycopg2-connect.md)** -+ **[connection.cursor()](2-connection-cursor.md)** -+ **[cursor.execute(query,vars_list)](3-cursor-execute-query-vars-list.md)** -+ **[curosr.executemany(query,vars_list)](4-curosr-executemany-query-vars-list.md)** -+ **[connection.commit()](5-connection-commit.md)** -+ **[connection.rollback()](6-connection-rollback.md)** -+ **[cursor.fetchone()](7-cursor-fetchone.md)** -+ **[cursor.fetchall()](8-cursor-fetchall.md)** -+ **[cursor.close()](9-cursor-close.md)** -+ **[connection.close()](10-connection-close.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/12-psycopg2-release-notes.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/12-psycopg2-release-notes.md deleted file mode 100644 index 9e908ea4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/12-psycopg2-release-notes.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Psycopg2 Release Notes -summary: Psycopg2 Release Notes -author: Qian Yao -date: 2023-12-18 ---- - -# MogDB Psycopg2 Release Notes - -The MogDB Psycopg2 release notes contains new features, improvements, BUG fixes, and other change descriptions. Please read the details below carefully to understand all changes. - -## 5.0.0.4 (2024-04-15) - -**Fix:** - -- Fix the autosavepoint not work when the server parameter compat_oracle_txn_control is set - -> While establishing a connection, you can enable compat_oracle_txn_control by options parameter: -> `conn = psycopg2.connect(..., options="-c behavior_compat_options=compat_oracle_txn_control")` - -## 5.0.0.3 (2024.03.28) - -**Features:** - -- Added oracle transaction compatibility (Active When the guc parameter `behavior_compat_options` contains `compat_oracle_txn_control`) - -## 5.0.0.2 (2023.12.28) - -**Fix:** - -- Fixed import error in Python 3.11 - -## 5.0.0.1 (2023.10.11) - -**Features:** - -- Add the `autosavepoint` property, which supports calling rollback only to roll back failed statements after a single SQL failure in a transaction -- Add '$' placeholder support -- (extras) Add `execute_prepared_batch` and `execute_params_batch` functions -- Supports parsing of boolean fields for '1' and '0' - -**Fix:** - -- Empty strings are converted to NULL by default in A compatible mode diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/2-psycopg-package.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/2-psycopg-package.md deleted file mode 100644 index 7365784d..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/2-psycopg-package.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Psycopg Package -summary: Psycopg Package -author: Zhang Cuiping -date: 2021-10-11 ---- - -# Psycopg Package - -Visit download page of MogDB [https://www.mogdb.io/downloads/psycopg2/all](https://www.mogdb.io/downloads/psycopg2/all) to get installation package。 - -You can download the wheel installation package that matches the python version of your system. Only python3.6 or later is supported. - -After downloading, run the following command to install: - -```bash -python3 -m pip install -``` - -After the installation is successful, you can run the following commands to check whether the installation is successful and psycopg2 version number: - -```bash -python3 -m pip list -``` - -Or go to the python interactive terminal and execute import to verify that the installation was successful: - -```python -import psycopg2 -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/3.1-development-process.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/3.1-development-process.md deleted file mode 100644 index 9e2f3d5b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/3.1-development-process.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Development Process -summary: Development Process -author: Zhang Cuiping -date: 2021-10-11 ---- - -# Development Process - -**Figure 1** Application development process based on psycopg2 - -![application-development-process-based-on-psycopg2](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/development-process-2.png) diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/4-connecting-to-a-database.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/4-connecting-to-a-database.md deleted file mode 100644 index f12e02e2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/4-connecting-to-a-database.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: Connecting to a Database -summary: Connecting to a Database -author: Zhang Cuiping -date: 2021-10-11 ---- - -# Connecting to a Database - -## basic usage - -Here is a basic usage example of Psycogp2 - -```python ->>> import psycopg2 - -# Connect to an existing database ->>> conn = psycopg2.connect("dbname=test user=omm") - -# Open a cursor to perform database operations ->>> cur = conn.cursor() - -# Execute a command: this creates a new table ->>> cur.execute("CREATE TABLE test (id serial PRIMARY KEY, num integer, data varchar);") - -# Pass data to fill a query placeholders and let Psycopg perform -# the correct conversion (no more SQL injections!) ->>> cur.execute("INSERT INTO test (num, data) VALUES (%s, %s)", -... (100, "abc'def")) - -# Query the database and obtain data as Python objects ->>> cur.execute("SELECT * FROM test;") ->>> cur.fetchone() -(1, 100, "abc'def") - -# Make the changes to the database persistent ->>> conn.commit() - -# Close communication with the database ->>> cur.close() ->>> conn.close() -``` - -The main entry points of Psycopg are: - -- The function `connect()` creates a new database session and returns a new connection instance. - -- The class `connection` encapsulates a database session. It allows to: - - 1. create new cursor instances using the cursor() method to execute database commands and queries, - - 2. terminate transactions using the methods commit() or rollback(). - -- The class `cursor` allows interaction with the database: - - 1. send commands to the database using methods such as `execute()` and `executemany()`, - - 2. retrieve data from the database by iteration or using methods such as `fetchone()`, `fetchmany()`, `fetchall()`. - -## Passing parameters to SQL queries - -Psycopg converts Python variables to SQL values using their types: the Python type determines the function used to convert the object into a string representation suitable for PostgreSQL. Many standard Python types are already adapted to the correct SQL representation. - -Passing parameters to an SQL statement happens in functions such as cursor.execute() by using `%s` placeholders in the SQL statement, and passing a sequence of values as the second argument of the function. For example the Python function call: - -```python ->>> cur.execute(""" -... INSERT INTO some_table (an_int, a_date, a_string) -... VALUES (%s, %s, %s); -... """, -... (10, datetime.date(2005, 11, 18), "O'Reilly")) -``` - -is converted into a SQL command similar to: - -```SQL -INSERT INTO some_table (an_int, a_date, a_string) -VALUES (10, '2005-11-18', 'O''Reilly'); -``` - -> Using characters `%`, `(`, `)` in the argument names is not supported. - -When parameters are used, in order to include a literal % in the query you can use the %% string: - -```python ->>> cur.execute("SELECT (%s % 2) = 0 AS even", (10,)) # WRONG ->>> cur.execute("SELECT (%s %% 2) = 0 AS even", (10,)) # correct -``` - -While the mechanism resembles regular Python strings manipulation, there are a few subtle differences you should care about when passing parameters to a query. - -- The Python string operator % must not be used: the execute() method accepts a tuple or dictionary of values as second parameter. Never use % or + to merge values into queries: - - ```python - >>> cur.execute("INSERT INTO numbers VALUES (%s, %s)" % (10, 20)) # WRONG - >>> cur.execute("INSERT INTO numbers VALUES (%s, %s)", (10, 20)) # correct - ``` - -- For positional variables binding, the second argument must always be a sequence, even if it contains a single variable (remember that Python requires a comma to create a single element tuple): - - ```python - >>> cur.execute("INSERT INTO foo VALUES (%s)", "bar") # WRONG - >>> cur.execute("INSERT INTO foo VALUES (%s)", ("bar")) # WRONG - >>> cur.execute("INSERT INTO foo VALUES (%s)", ("bar",)) # correct - >>> cur.execute("INSERT INTO foo VALUES (%s)", ["bar"]) # correct - ``` - -- The placeholder must not be quoted. Psycopg will add quotes where needed: - - ```python - >>> cur.execute("INSERT INTO numbers VALUES ('%s')", (10,)) # WRONG - >>> cur.execute("INSERT INTO numbers VALUES (%s)", (10,)) # correct - ``` - -- The variables placeholder must always be a %s, even if a different placeholder (such as a %d for integers or %f for floats) may look more appropriate: - - ```python - >>> cur.execute("INSERT INTO numbers VALUES (%d)", (10,)) # WRONG - >>> cur.execute("INSERT INTO numbers VALUES (%s)", (10,)) # correct - ``` - -- Only query values should be bound via this method: it shouldn’t be used to merge table or field names to the query (Psycopg will try quoting the table name as a string value, generating invalid SQL). If you need to generate dynamically SQL queries (for instance choosing dynamically a table name) you can use the facilities provided by the psycopg2.sql module: - - ```python - >>> cur.execute("INSERT INTO %s VALUES (%s)", ('numbers', 10)) # WRONG - >>> cur.execute(SQL("INSERT INTO {} VALUES (%s)").format(Identifier('numbers')), (10,)) # correct - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/5-adaptation-of-python-values-to-sql-types.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/5-adaptation-of-python-values-to-sql-types.md deleted file mode 100644 index 0d4b26b6..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/5-adaptation-of-python-values-to-sql-types.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: 处理结果集 -summary: 处理结果集 -author: Zhang Cuiping -date: 2021-10-11 ---- - -# Adaptation of Python values to SQL types - -Many standard Python types are adapted into SQL and returned as Python objects when a query is executed. - -The following table shows the default mapping between Python and PostgreSQL types: - -| Python | PostgreSQL | -| :----------------------------------------------------------- | :--------------------------- | -| None | NULL | -| bool | bool | -| float | real,double | -| int, long | smallint, integer, bigint | -| [`Decimal`](https://docs.python.org/3/library/decimal.html#decimal.Decimal) | numeric | -| str, unicode | varchar, text | -| buffer, [`memoryview`](https://docs.python.org/3/library/stdtypes.html#memoryview), [`bytearray`](https://docs.python.org/3/library/stdtypes.html#bytearray), [`bytes`](https://docs.python.org/3/library/stdtypes.html#bytes), Buffer protocol | bytea | -| date | date | -| time | time, timetz | -| datetime | timestamp, timestamptz | -| timedelta | interval | -| list | ARRAY | -| tuple, namedtuple | Composite types, `IN` syntax | -| dict | hstore | -| Psycopg’s `Range` | range | -| Anything™ | json | -| [`UUID`](https://docs.python.org/3/library/uuid.html#uuid.UUID) | uuid | -| [`ipaddress`](https://docs.python.org/3/library/ipaddress.html#module-ipaddress) objects | inet, cidr | - -For more information, see [Community project documentation](https://www.psycopg.org/docs/usage.html#adaptation-of-python-values-to-sql-types)。 diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/6-new-features-in-mogdb.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/6-new-features-in-mogdb.md deleted file mode 100644 index ad5787ca..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/6-new-features-in-mogdb.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: 新增特性 -summary: 新增特性 -author: Yao Qian -date: 2023-12-18 ---- - -# 新增特性 - -> 该文档主要描述本驱动基于原驱动基础上新增的特性 - -## 自动 savepoint - -> 前提: `connect.autocommit=False` 关闭自动提交时,该特性才会生效 - -当不开启自动提交时,用户需显式的调用 `connect.commit()` 来提交,在提交之前的所有sql都将视为同一个事务,如果执行某个 SQL 报错时,再调用 rollback 时会将所有未提交的操作都回滚掉。 - -用户可以通过设置 `connect.autosavepoint=True` (默认为 False) 来启用自动创建 savepoint 功能。当启用该参数后,在未提交前每执行一个 SQL,都会自动创建一个 savepoint(一个事务中仅保留一个savepoint),如果执行 SQL 报错时,驱动会自动回滚当前报错的SQL,而不会回滚所有操作,用户捕获到SQL执行报错后,仍旧可以调用 `connect.commit()` 来提交报错SQL之前的所有的操作。 - -以下为四种设置方式任选一种即可: - -> 在字符串中设置 autosavepoint 参数时,'', '0', 'off', 'false', 'n', 'no' 这些值会被视为 False, 其他值均会视为 True - -```python -# 第一种:在 URL 形式的DSN中设置参数 -conn = psycopg2.connect("postgres://user:password@ip1:port,ip2:port:.../dbname?autosavepoint=true") - -# 第二种:在键值对形式的DSN中添加 -conn = psycopg2.connect("user=username host=ip dbname=xxx autosavepoint=true") - -# 第三种:连接时传递参数 -conn = psycopg2.connect(user=username,host=ip, dbname=xxx, autosavepoint=True) - -# 第四种:创建连接后设置 -conn = psycopg2.connect(user=username,host=ip, dbname=xxx) -conn.autosavepoint = True -``` - -## 新增占位符 - -扩展了原有的占位符,默认使用 `%` 作为占位符,现在可以使用美元符号 `$` 作为占位符。 - -示例: - -```python -import psycopg2 as pg -conn = pg.connect(database = 'testdb' ...) -cursor = conn.cursor() - -cursor.execute("delete from map") - -for i in range(0, 10): - cursor.execute("insert into map values($1, $2)", [i, i + 1], place_holder = '$') - -cursor.execute("select * from map") -print(cursor.fetchall()) - -cursor.execute("select key from map where key > $2 and value > $1", [3, 5], place_holder = '$') -print(cursor.fetchall()) - -for i in range(0, 10): - if i % 2 == 1: - cursor.execute("delete from map where key = $1 and value = $2", [i, i + 1], place_holder = '$') - -cursor.execute("select * from map") -print(cursor.fetchall()) -``` - -特别的,新增的占位符参数 `place_holder` 可以作为一个参数传入类似 `execute` 这样的函数,默认值是百分号。也就是说如果要使用美元符号作为占位符,那么必须指定参数 `place_holder = '$'`。 - -要强调的一点是,占位符同一时刻只有一个可以生效,例如当设定 `place_holder = '%'` 时,所有的美元符号都将作为普通字符而不是转义字符处理。反之亦然。 - -当使用 % 作为占位符是,SQL 中如果要包含 `%` 字符,需使用 `%%` 转义,同样的,当 `place_holder = '$'` 时要输出一个美元符号,就需要使用 `$$` 进行转义。 - -## extras 模块新增批处理函数 - -- extras.execute_prepared_batch -- extras.execute_params_batch - -使用示例: - -相比提交单条数据,批量提交时需使用二维数据作为数据参数 - -> 注意:由于这两个批处理接口都是提交给服务端进行解析和绑定变量,所以 SQL 中的占位符需要是数据库中支持的格式,需使用 `$1, $2` 这样的格式进行指定。 - -### execute_prepared_batch - -```python -# 创建一个 prepare statement: prep -cur.execute("prepare prep as insert into test values ($1, $2)") -# 执行 prep 这个函数,将批量参数传递进去 -execute_prepared_batch(cur, "prep", [[1, '1'], [2, '2'], [3, '3']]) -``` - -### execute_params_batch - -```python -# 无需提前创建 prepare statement, 直接将 sql 和数据一起提交 -execute_params_batch(cur, "insert into t3 values ($1, $2)", [[1, '1'], [2, '2'], [3, '3']]) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/9-connecting-to-the-database-using-ssl.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/9-connecting-to-the-database-using-ssl.md deleted file mode 100644 index 17dffd02..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/4.1-psycopg-based-development/9-connecting-to-the-database-using-ssl.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Connecting to the Database -summary: Connecting to the Database -author: Zhang Cuiping -date: 2021-10-11 ---- - -# Connecting to the Database (Using SSL) - -When you use psycopy2 to connect to the MogDB server, you can enable SSL to encrypt the communication between the client and server. To enable SSL, you must have the server certificate, client certificate, and private key files. For details on how to obtain these files, see related documents and commands of OpenSSL. - -1. Use the .ini file (the **configparser** package of Python can parse this type of configuration file) to save the configuration information about the database connection. -2. Add SSL connection parameters **sslmode**, **sslcert**, **sslkey**, and **sslrootcert** to the connection options. - 1. **sslmode**: [Table 1](#table1.1) - 2. **sslcert**: client certificate path - 3. **sslkey**: client key path - 4. **sslrootcert**: root certificate path -3. Use the **psycopg2.connect** function to obtain the connection object. -4. Use the connection object to create a cursor object. - -**Table 1** sslmode options - -| sslmode | Whether SSL Encryption Is Enabled | Description | -| :---------- | :-------------------------------- | :----------------------------------------------------------- | -| disable | No | SSL connection is not enabled. | -| allow | Possible | If the database server requires SSL connection, SSL connection can be enabled. However, authenticity of the database server will not be verified. | -| prefer | Possible | If the database supports SSL connection, SSL connection is preferred. However, authenticity of the database server will not be verified. | -| require | Yes | SSL connection is required and data is encrypted. However, authenticity of the database server will not be verified. | -| verify-ca | Yes | The SSL connection must be enabled. | -| verify-full | Yes | The SSL connection must be enabled, which is not supported by MogDB currently. | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/5-commissioning.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/5-commissioning.md deleted file mode 100644 index 3fc96e9b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/5-commissioning.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Commissioning -summary: Commissioning -author: Guo Huan -date: 2021-04-27 ---- - -# Commissioning - -To control the output of log files and better understand the operating status of the database, modify specific configuration parameters in the **postgresql.conf** file in the instance data directory. - -[Table 1](#Configuration parameters) describes the adjustable configuration parameters. - -**Table 1** Configuration parameters - -| Parameter | Description | Value Range | Remarks | -| ------------------------------------------------------------ | ------------------------------------- | ------------------------------------- | ------------------------------------- | -| client_min_messages | Level of messages to be sent to clients. | - DEBUG5
- DEBUG4
- DEBUG3
- DEBUG2
- DEBUG1
- LOG
- NOTICE
WARNING
- ERROR
- FATAL
- PANIC
Default value: NOTICE | Messages of the set level or lower will be sent to clients. The lower the level is, the fewer the messages will be sent. | -| log_min_messages | Level of messages to be recorded in server logs. | - DEBUG5
- DEBUG4
- DEBUG3
- DEBUG2
- DEBUG1
- INFO
- NOTICE
- WARNING
- ERROR
- LOG
- FATAL
- PANIC
Default value: WARNING | Messages higher than the set level will be recorded in logs. The higher the level is, the fewer the server logs will be recorded. | -| log_min_error_statement | Level of SQL error statements to be recorded in server logs. | - DEBUG5
- DEBUG4
- DEBUG3
- DEBUG2
- DEBUG1
- INFO
- NOTICE
- WARNING
- ERROR
- FATAL
- PANIC
Default value: ERROR。 | SQL error statements of the set level or higher will be recorded in server logs.Only a system administrator is allowed to modify this parameter. | -| log_min_duration_statement | Minimum execution duration of a statement. If the execution duration of a statement is equal to or longer than the set milliseconds, the statement and its duration will be recorded in logs. Enabling this function can help you track the query attempts to be optimized. | INT type
Default value: 30min
Unit: millisecond | The default value (-1) indicates that the function is disabled.Only a system administrator is allowed to modify this parameter. | -| log_connections/log_disconnections | Whether to record a server log message when each session is connected or disconnected. | - **on**: The system records a log server when each session is connected or disconnected.
- **off**: The system does not record a log server when each session is connected or disconnected.
Default value: off | - | -| log_duration | Whether to record the duration of each executed statement. | - **on**: The system records the duration of each executed statement.
- **off**: The system does not record the duration of each executed statement.
Default value: on | Only a system administrator is allowed to modify this parameter. | -| log_statement | SQL statements to be recorded in logs. | - **none**: The system does not record any SQL statements.
- **ddl**: The system records data definition statements.
- **mod**: The system records data definition statements and data operation statements.
- **all**: The system records all statements.
Default value: none | Only a system administrator is allowed to modify this parameter. | -| log_hostname | Whether to record host names. | - **on**: The system records host names.
- **off**: The system does not record host names.
Default value: off | By default, connection logs only record the IP addresses of connected hosts. With this function, the host names will also be recorded.This parameter affects parameters in **Querying Audit Results**, GS_WLM_SESSION_HISTORY, PG_STAT_ACTIVITY, and **log_line_prefix**. | - -[Table 2](#description) describes the preceding parameter levels. - -**Table 2** Description of log level parameters - -| Level | Description | -| ---------- | ------------------------------------------------------------ | -| DEBUG[1-5] | Provides information that can be used by developers. Level 1 is the lowest level whereas level 5 is the highest level. | -| INFO | Provides information about users' hidden requests, for example, information about the VACUUM VERBOSE process. | -| NOTICE | Provides information that may be important to users, for example, truncations of long identifiers or indexes created as a part of a primary key. | -| WARNING | Provides warning information for users, for example, COMMIT out of transaction blocks. | -| ERROR | Reports an error that causes a command to terminate. | -| LOG | Reports information that administrators may be interested in, for example, the activity levels of check points. | -| FATAL | Reports the reason that causes a session to terminate. | -| PANIC | Reports the reason that causes all sessions to terminate. | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/application-development-tutorial.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/application-development-tutorial.md deleted file mode 100644 index d771ba7f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/application-development-tutorial.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Application Development Guide -summary: Application Development Guide -author: Guo Huan -date: 2023-05-18 ---- - -# Application Development Guide - -- **[Development Specifications](./development-specifications/introduction-to-development-specifications.md)** -- **[Development Based on JDBC](./2-development-based-on-jdbc/development-based-on-jdbc.md)** -- **[Development Based on ODBC](./3-development-based-on-odbc/1-development-based-on-odbc.md)** -- **[Development Based on libpq](./4-development-based-on-libpq/development-based-on-libpq.md)** -- **[Psycopg-Based Development](./4.1-psycopg-based-development/1-psycopg-based-development.md)** -- **[Commissioning](5-commissioning.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/design-specification.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/design-specification.md deleted file mode 100644 index 4db8c066..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/design-specification.md +++ /dev/null @@ -1,351 +0,0 @@ ---- -title: Design Specification -summary: Design Specification -author: Guo Huan -date: 2021-04-27 ---- - -# Design Specification - -## Database Design - -- It is recommended to name the database after the business function, which is simple and intuitive. - -- The database is preferentially created using the PG compatibility type. - -- The recommended database encoding is utf8. - -## Tablespace Design - -- The frequently used tables and indexes are stored in a separate tablespace, which should be created on a disk with good performance. - -- Tables and indexes that are dominated by historical data or are less active can be stored in tablespaces with poor disk performance. - -- Tables and indexes can be stored separately in different tablespaces. - -- Tablespaces can also be divided by database, by schema, or by business function. - -- Each database/schema corresponds to a tablespace and a corresponding index tablespace. - -## Schema Design - -- When you perform a user creation under a database, a schema with the same name will be created under that database by default. -- It is not recommended to create database objects under the default public schema. -- Create a schema that is different from the username for the business to use. - -## Table Design - -- When designing the table structure, it should be planned to avoid adding fields frequently or modifying field types or lengths. - -- Comment information must be added to the table, with the table name matching the comment information. - -- The use of the unlogged/ temp/temporary keyword to create business tables is prohibited. - -- The data type must be strictly consistent for the fields that are used as join relationships between tables to avoid indexes not working properly. - -- It is forbidden to use VARCHAR or other character types to store date values, and if used, operations cannot be done on this field and need to be strictly defined in the data specification. - -- For astore tables with frequent updates, it is recommended to specify the table fillfactor=85 when building the table to reserve space for HOT. - -- Tables used for frequent updates should be placed separately in a tablespace with good storage performance. - -- It is recommended to consider partitioning for tables with data volume over billion or occupying more than 10GB on disk. - -- The data types defined in the fields in the table structure are consistent with those defined in the application, and the field proofreading rules are consistent between tables to avoid error reporting or the inability to use indexes. - - > Note: For example, the data type of the **user_id** field of table A is defined as **varchar**, but the SQL statement is **where user_id=1234;** - -## Partitioned Table Design - -- The number of partitioned tables is not recommended to exceed 1000. - -- Partitioned tables can be selected with different tablespaces by frequency of use. - -- The primary key or unique index must contain partitioned keys. - -- For tables with larger data volume, partition according to the attributes of table data to get better performance. - -- To convert a normal table into a partitioned table, you need to create a new partitioned table, and then import the data from the normal table into the newly created partitioned table. Therefore, when you initially design the table, please plan in advance whether to use partitioned tables according to your business. - -- It is recommended that for businesses with regular historical data deletion needs, the tables are partitioned by time, and when deleting, do not use the DELETE operation, but DROP or TRUNCATE the corresponding table. - -- It is not recommended to use global indexes in partitioned tables, because doing partition maintenance operations may cause global indexes to fail, making it difficult to maintain. - -### Use of Partitioned Table - -Operate on the range partitioned table as follows. - -- Create a tablespace - -```sql -MogDB=# CREATE TABLESPACE example1 RELATIVE LOCATION 'tablespace1/tablespace_1'; -MogDB=# CREATE TABLESPACE example2 RELATIVE LOCATION 'tablespace2/tablespace_2'; -MogDB=# CREATE TABLESPACE example3 RELATIVE LOCATION 'tablespace3/tablespace_3'; -MogDB=# CREATE TABLESPACE example4 RELATIVE LOCATION 'tablespace4/tablespace_4'; -``` - -When the following information is displayed, it means the creation is successful. - -```sql -CREATE TABLESPACE -``` - -- Create a partitioned table - -```sql -MogDB=# CREATE TABLE mogdb_usr.customer_address -( - ca_address_sk integer NOT NULL , - ca_address_id character(16) NOT NULL , - ca_street_number character(10) , - ca_street_name character varying(60) , - ca_street_type character(15) , - ca_suite_number character(10) , - ca_city character varying(60) , - ca_county character varying(30) , - ca_state character(2) , - ca_zip character(10) , - ca_country character varying(20) , - ca_gmt_offset numeric(5,2) , - ca_location_type character(20) -) -TABLESPACE example1 - -PARTITION BY RANGE (ca_address_sk) -( - PARTITION P1 VALUES LESS THAN(5000), - PARTITION P2 VALUES LESS THAN(10000), - PARTITION P3 VALUES LESS THAN(15000), - PARTITION P4 VALUES LESS THAN(20000), - PARTITION P5 VALUES LESS THAN(25000), - PARTITION P6 VALUES LESS THAN(30000), - PARTITION P7 VALUES LESS THAN(40000), - PARTITION P8 VALUES LESS THAN(MAXVALUE) TABLESPACE example2 -) -ENABLE ROW MOVEMENT; -``` - -When the following information is displayed, it means the creation is successful. - -```sql -CREATE TABLE -``` - - ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) It is recommended that the number of column-based partitioned tables does not exceed 1,000. - -- Insert data - -Insert data from table **mogdb_usr.customer_address** into table **mogdb_usr.customer_address_bak**. For example, you have created a backup table **mogdb_usr.customer_address_bak** of table **mogdb_usr.customer_address** in the database, and now you need to insert the data from table **mogdb_usr.customer_address** into table **mogdb_usr. customer_address_bak**, then you can run the following command. - -```sql -MogDB=# CREATE TABLE mogdb_usr.customer_address_bak -( - ca_address_sk integer NOT NULL , - ca_address_id character(16) NOT NULL , - ca_street_number character(10) , - ca_street_name character varying(60) , - ca_street_type character(15) , - ca_suite_number character(10) , - ca_city character varying(60) , - ca_county character varying(30) , - ca_state character(2) , - ca_zip character(10) , - ca_country character varying(20) , - ca_gmt_offset numeric(5,2) , - ca_location_type character(20) -) -TABLESPACE example1 -PARTITION BY RANGE (ca_address_sk) -( - PARTITION P1 VALUES LESS THAN(5000), - PARTITION P2 VALUES LESS THAN(10000), - PARTITION P3 VALUES LESS THAN(15000), - PARTITION P4 VALUES LESS THAN(20000), - PARTITION P5 VALUES LESS THAN(25000), - PARTITION P6 VALUES LESS THAN(30000), - PARTITION P7 VALUES LESS THAN(40000), - PARTITION P8 VALUES LESS THAN(MAXVALUE) TABLESPACE example2 -) -ENABLE ROW MOVEMENT; -CREATE TABLE -MogDB=# INSERT INTO mogdb_usr.customer_address_bak SELECT * FROM mogdb_usr.customer_address; -INSERT 0 0 -``` - -- Alter partitioned table row movement properties - -```sql -MogDB=# ALTER TABLE mogdb_usr.customer_address_bak DISABLE ROW MOVEMENT; -ALTER TABLE -``` - -- Delete a partition - -Delete partition P8。 - -```sql -MogDB=# ALTER TABLE mogdb_usr.customer_address_bak DROP PARTITION P8; -ALTER TABLE -``` - -- Add a partition - -Add partition P8. The range is 40000<= P8<=MAXVALUE. - -```sql -MogDB=# ALTER TABLE mogdb_usr.customer_address_bak ADD PARTITION P8 VALUES LESS THAN (MAXVALUE); -ALTER TABLE -``` - -- Rename a partition - -Rename partition P8 as P_9. - -```sql -MogDB=# ALTER TABLE mogdb_usr.customer_address_bak RENAME PARTITION P8 TO P_9; -ALTER TABLE -``` - -Rename partition P_9 as P8. - -```sql -MogDB=# ALTER TABLE mogdb_usr.customer_address_bak RENAME PARTITION FOR (40000) TO P8; -ALTER TABLE -``` - -- Alter the tablespace of partition - -Alter the tablespace of partition P6 to example3. - -```sql -MogDB=# ALTER TABLE mogdb_usr.customer_address_bak MOVE PARTITION P6 TABLESPACE example3; -ALTER TABLE -``` - -Alter the tablespace of partition P4 to example4. - -```sql -MogDB=# ALTER TABLE mogdb_usr.customer_address_bak MOVE PARTITION P4 TABLESPACE example4; -ALTER TABLE -``` - -- Query a partition - -Query partition P6. - -```sql -MogDB=# SELECT * FROM mogdb_usr.customer_address_bak PARTITION (P6); -MogDB=# SELECT * FROM mogdb_usr.customer_address_bak PARTITION FOR (35888); -``` - -- Delete a partitioned table and tablespace - -```sql -MogDB=# DROP TABLE mogdb_usr.customer_address_bak; -DROP TABLE -MogDB=# DROP TABLESPACE example1; -MogDB=# DROP TABLESPACE example2; -MogDB=# DROP TABLESPACE example3; -MogDB=# DROP TABLESPACE example4; -DROP TABLESPACE -``` - -## Column Design - -- Avoid duplication of column names with system tables. - -- Field meanings and data types should be consistent with the program code design. - -- All fields must have comment information added. - -- Do not use character types when you can use numeric types. - -- It is forbidden to store date data in character types. - -- Use timestamptz for time type fields. - -- Try to require not null for fields and provide default values for fields. - -- MogDB new database is compatible with oracle by default, not null constraint does not allow to pass empty string, empty string will be converted to null by default, compatible with PG mode database will not have this problem. - -## Sequence Design - -- Manual creation of table-related sequences is prohibited and should be specified in the serial/bingserial type way. - -- It is recommended to set the step size of the sequence to 1. - -- It is not recommended to set minvalue and maxvalue. - -- It is not recommended to set cache, the serial number is not consecutive after setting cache. - -- It is prohibited to turn on cycle. - -- Serial should be consistent with the type and range of variable definition in the code to prevent the inability to insert data. - -## Constraint Design - -### Primary Key Constraint - -- Each table must include a primary key. - -- It is not recommended that the name of the primary key has the service meaning, such as identification certificate or country name although the name is unique. - -- It is recommended that a primary key is written as `id serial primary key` or `id bigserial primary key`. - -- It is recommended that the primary key in a large-sized table can be written as follows, which is easy to maintain later. - - ```sql - create table test(id serial not null ); - create unique index CONCURRENTLY ON test (id); - ``` - -### Unique Constraint - -Apart from the primary key, unique constraint is needed. You can create a unique index with **uk_** as the prefix to create unique constraint. - -### Foreign Key Constraint - -- You'd better create foreign key constraints for a table with foreign key relationship. -- The use of foreign keys is not recommended for systems with high performance requirements and security under your control. -- When using the foreign key, you must set the action of the foreign key, such as cascade, set null, or set default. - -### Non-Empty Column - -- All non-null columns must have the not null constraint added - -### Check Constraint - -- For fields with the check constraint, it is required to specify the check rules, such as the gender and status fields. - -## Index Design - -- The number of table indexes for frequent DML operations is not recommended to exceed 5. -- Add concurrently parameter when create/drop index. -- Virtual indexes can be used to determine the validity of indexes before actually creating them. -- Create indexes for fields that frequently appear after the keywords order by, group by, and distinguish. -- Fields that are often used as query selections to create indexes. -- Indexes on attributes that are often used as table joins. -- The number of fields in a composite index is not recommended to exceed 3. -- Composite indexes should have one field that is a common search condition. -- The first field of a composite index should not have a single-field index. -- For tables where data is rarely updated and only a few of the fields are frequently queried, consider using index overrides. -- Do not create indexes on fields that have a large number of identical fetch values. -- It is recommended to use unique index instead of unique constraints for subsequent maintenance. -- It is recommended to build compound indexes with multiple fields for high frequency queries with multiple fields and conditions in where, with reference to the data distribution. -- Useless indexes and duplicate indexes should be deleted to avoid negative impact on the execution plan and database performance. - -## View Design - -- You'd better use simple views and use less complex views. - - Simple view: Data comes from a single table and a simple view does not contain groups of data and functions. - - Complex view: Data comes from multiple tables, or a complex view contains groups of data or functions. A complex view can contain three tables at most. - -- You'd better not use nested views. If nested views have to be used, it is advised to have two levels of nesting at most. - -## Function Design - -- A function must retrieve database table records or other database objects, or even modify database information, such as Insert, Delete, Update, Drop, or Create. -- If a function does not relate to a database, it cannot be realized using a database function. -- It is not advised to use DML or DDL statements in a function. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/introduction-to-development-specifications.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/introduction-to-development-specifications.md deleted file mode 100644 index 6dd56c63..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/introduction-to-development-specifications.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Development Specifications -summary: Development Specifications -author: Guo Huan -date: 2021-04-27 ---- - -# Development Specifications - -If the connection pool mechanism is used during application development, comply with the following specifications: - -- If GUC parameters are set in the connection, run **SET SESSION AUTHORIZATION DEFAULT;RESET ALL;** to clear the connection status before you return the connection to the connection pool. -- If a temporary table is used, delete the temporary table before you return the connection to the connection pool. - -If you do not do so, the connection in the connection pool will be stateful, which affects subsequent operations on the connection pool. - -Compatibility: - -- The new driver is forward compatible with the database. To use the new features added to the driver and database, you must upgrade the database. -- Setting **behavior_compat_options** to **'proc_outparam_override'** is applicable only in A-compatible mode. - -- [Overview](overview-of-development-specifications.md) -- [Naming Specification](naming-specification.md) -- [Design Specification](design-specification.md) -- [Syntax Specification](syntax-specification.md) -- [Query Operations](query-operations.md) -- [PostgreSQL Compatibility](postgresql-compatibility.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/naming-specification.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/naming-specification.md deleted file mode 100644 index aa06e4af..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/naming-specification.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: 命名规范 -summary: 命名规范 -author: Guo Huan -date: 2021-04-27 ---- - -# Naming Specification - -## Unified Object Naming Specification - -The unified standards for naming database objects, such as database, schema, table, column, view, index, constraint, sequence, function, trigger, etc. are as follows: - -- The length cannot exceed 63 characters. -- It is advised to use meaningful English vocabularies. - -- It is advised to use a combination of lowercase letters, numbers, and underscores. -- It is not advised to use double quotation marks (") unless it must contain special characters such as uppercase letters or spaces. -- It is not advised to start with PG, GS (to avoid confusion with the system DB object), and it is not advised to start with a number. -- It is forbidden to use [reserved words](../../../reference-guide/sql-reference/keywords/keywords.md). Refer to official documents for reserved keywords. -- The number of columns that a table can contain varies from 250 to 1600 depending on the field type. - -## Temporary and Backup Object Naming - -- It is recommended to add a date to the names of temporary or backup database objects (such as table), for example, `dba.trade_record_1970_01_01`(where dba is the DBA-specific schema, trade_record is the table name, and 1970_01_01 is the backup date). - -## Tablespace Naming - -- The user tablespace of the database is represented by **ts_\**, where the **tablespace name** contains the following two categories: - 1. Data space: For the user's default tablespace, it is represented by **default**. For other tablespaces, it is represented according to the category of the tables hosted on the tablespace. For example, the table that stores code is represented by **code**. The table that stores customer information is represented by **customer**. Try to use one tablespace to host the tables of that category. If a table is particularly large, consider using a separate tablespace. - 2. Index space: add **idx_** in front of the name of the corresponding data tablespace. For example, the index space for the user's default tablespace is represented by **ts_idx_default**. For index tablespace of code table, use **ts_idx_code**. - -## Index Naming - -- Index object naming rules: **table_column_idx**, such as **student_name_idx**, the index naming method is the default naming method when the index name is not explicitly specified when an index is created for the MogDB database. - - Therefore, it is advised to create indexes without naming them explicitly, but using DBMS defaults. - -```sql -create unique index on departments(department_id); -CREATE INDEX - \di -+----------+-------------------------------+--------+---------+ -| Schema | Name | Type | Owner | -|----------+-------------------------------+--------+---------| -| mogdb | departments_department_id_idx | index | mogdb | -+----------+-------------------------------+--------+---------+ -SELECT 1 -``` - -## Variables Naming - -- English words should be used for naming, and pinyin should be avoided, especially pinyin abbreviations should not be used. Chinese or special characters are not allowed in the naming. - -- If no complicated operations are involved, simple applications such as counting are always defined by number. - -## Partitioned Table Naming - -- The name of the partitioned table follows the naming rules of ordinary tables. - -- A table is partitioned by time range (one partition per month), and the partition name is **PART_YYYYMM**. - - For example, PART_201901 and PART_201902 - -## Function Naming - -- The name should be consistent with its actual function. A verb should be used as a prefix command to cause an action to take place. - - Example: The following naming conforms to the specification: - - ``` - func_addgroups (Add multiple groups) - func_addgroup (Add one group) - ``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/overview-of-development-specifications.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/overview-of-development-specifications.md deleted file mode 100644 index 706f0db7..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/overview-of-development-specifications.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2021-04-27 ---- - -# Overview - -## Introduction - -Although ISO has issued SQL-92, SQL:1999, SQL:2006, and other standards for SQL, due to the characteristics of different databases, the same functions are not the same in the implementation of their products, which also makes the relevant grammatical rules different. Therefore, when formulating specific development specifications, it is necessary to write corresponding specifications for different databases. - -This specification emphasizes practicability and operability. According to the common problems and mistakes easily made by developers in the coding process, detailed and clear specifications and constraints are carried out on all aspects of code writing. It mainly includes the following content: - -- Naming specification -- Design specification -- Syntax specification -- Optimization-related specification -- PG compatibility - -In addition, specific examples are given for each detailed rule of the specification. - -## Application Scope - -This specification applies to MogDB 1.1.0 and later versions. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/postgresql-compatibility.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/postgresql-compatibility.md deleted file mode 100644 index 5a34a3b2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/postgresql-compatibility.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: PostgreSQL Compatibility -summary: PostgreSQL Compatibility -author: Guo Huan -date: 2021-04-27 ---- - -# PostgreSQL Compatibility - -## Database Creation Specifications - -During MogDB database creation, the following PG compatibility mode is used: - -```sql -create database dbnam DBCOMPATIBILITY='PG' encoding=’utf8’; -``` - -## Data Type - -### Value Type - -During development and usage, MogDB supports only the smallint, integer, bigint, numeric[(p[,s])], serial, and bigserial value types. - -| Type | PostgreSQL | MogDB | Storage Length | Remarks | -| :--------------- | :--------- | :-------- | :------------- | :----------------------------------------------------------- | -| tinyint | / | Supported | 1 byte | 0 to 255 | -| smallint | Supported | Supported | 2 bytes | -32,768 to +32,767 | -| integer | Supported | Supported | 4 bytes | -2,147,483,648 to +2,147,483,647 | -| binary_integer | / | Supported | / | integer alias | -| bigint | Supported | Supported | 8 bytes | -9,223,372,036,854,775,808 to +9,223,372,036,854,775,807 | -| decimal[(p[,s])] | Supported | Supported | Variable byte | A maximum of 131072 before the decimal point and 16383 after the decimal point | -| numeric[(p[,s])] | Supported | Supported | Variable byte | A maximum of 131072 before the decimal point and 16383 after the decimal point | -| number[(p[,s])] | / | Supported | / | Numeric alias | -| real | Supported | Supported | 4 bytes | Accurate to six decimal digits | -| float4 | / | Supported | 4 bytes | Accurate to six decimal digits | -| double precision | Supported | Supported | 8 bytes | Accurate to fifteen decimal digits | -| binary_double | / | Supported | 8 bytes | Double precision alias | -| float8 | / | Supported | 8 bytes | Accurate to fifteen decimal digits | -| float[(p )] | / | Supported | 4 or 8 bytes | | -| dec[(p,[s])] | / | Supported | / | A maximum of 131072 before the decimal point and 16383 after the decimal point | -| integer[(p,[s])] | / | Supported | / | A maximum of 131072 before the decimal point and 16383 after the decimal point | -| smallserial | Supported | Supported | 2 bytes | 1 to 32,767 | -| serial | Supported | Supported | 4 bytes | 1 to 2,147,483,647 | -| bigserial | Supported | Supported | 8 bytes | 1 to 9,223,372,036,854,775,807 | -| tinyint | / | Supported | 1 byte | 0 to 255 | - -### Character Type - -During the development, MogDB supports only the char(n), varchar(n), and text character types. - -| Type | PostgreSQL | MogDB | Storage Length | Remarks | -| :----------- | :--------- | :-------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| char(n) | Supported | Supported | A maximum of 1 GB in postgreSQL
A maximum of 10 MB in MogDB | In postgreSQL, *n* indicates the number of characters.
In MogDB, *n* indicates the number of bytes.
In the compatibility PG mode, *n* indicates the number of characters. | -| nchar(n) | / | Supported | A maximum of 10 MB | *n* indicates the number of bytes.
In the compatibility PG mode, *n* indicates the number of characters. | -| varchar(n) | Supported | Supported | A maximum of 1 GB in postgreSQL
A maximum of 10 MB in MogDB | In postgreSQL, *n* indicates the number of characters.
In MogDB, *n* indicates the number of bytes.
In the compatibility PG mode, *n* indicates the number of characters. | -| varchar2(n) | / | Supported | A maximum of 10 MB | varchar(n) alias | -| nvarchar2(n) | / | Supported | A maximum of 10 MB | *n* indicates the number of characters. | -| text | Supported | Supported | 1 GB - 1 | | -| clob | / | Supported | 1 GB - 1 | text alias | - -### Time Type - -During the development, MogDB supports only the timestamp[(p )][with time zone] and date time types. - -| Type | PostgreSQL | MogDB | Storage Length | Remarks | -| :--------------------------------- | :--------- | :-------- | :------------- | :----------------------------------------------------------- | -| timestamp[(p )][without time zone] | Supported | Supported | 8 bytes | 4713 BC to 294276 AD | -| timestamp[(p )][with time zone] | Supported | Supported | 8 bytes | 4713 BC to 294276 AD | -| date | Supported | Supported | 4 bytes | 4713 BC to 5874897 AD (The actual storage size is 8 bytes in MogDB) | -| time[(p )][without time zone] | Supported | Supported | 8 bytes | 00:00:00 to 24:00:00 | -| time[(p )][with time zone] | Supported | Supported | 12 bytes | 00:00:00+1459 to 24:00:00-1459 | -| interval[fields][(p )] | Supported | Supported | 16 bytes | -178000000 to 178000000 years | -| smalldatetime | / | Supported | 8 bytes | Date and time without timezone, accurating to the minute, 30s equaling one minute | -| interval day(1) to second(p ) | / | Supported | 16 bytes | | -| reltime | / | Supported | 4 bytes | | - -### JSON Type - -| Type | PostgreSQL | MogDB | Storage Length | Remarks | -| :---- | :--------- | :-------------------------- | :------------- | :------ | -| json | Supported | Supported | / | | -| jsonb | Supported | Supported since version 2.1 | / | | - -## Keywords - -In the following table, **Reserved** indicates that keywords in a database are reserved and cannot be customized. **Non-reserved** or **N/A** indicates that keywords can be customized. - -| Keyword | MogDB | PostgreSQL | -| :------------ | :----------------------------------------------- | :----------------------------------------------- | -| AUTHID | Reserved | N/A | -| BUCKETS | Reserved | N/A | -| COMPACT | Reserved | N/A | -| DATE | Non-reserved (function or type is not supported) | | -| DELTAMERGE | Reserved | N/A | -| EXCLUDED | Reserved | N/A | -| FENCED | Reserved | N/A | -| GROUPING | | Non-reserved (function or type is not supported) | -| HDFSDIRECTORY | Reserved | N/A | -| IS | Reserved | Reserved (function or type is supported) | -| ISNULL | Non-reserved | Reserved (function or type is supported) | -| LATERAL | | Reserved | -| LESS | Reserved | N/A | -| MAXVALUE | Reserved | Non-reserved | -| MINUS | Reserved | N/A | -| MODIFY | Reserved | N/A | -| NLSSORT | Reserved | N/A | -| NUMBER | Non-reserved (function or type is not supported) | | -| PERFORMANCE | Reserved | N/A | -| PROCEDURE | Reserved | Non-reserved | -| REJECT | Reserved | N/A | -| ROWNUM | Reserved | N/A | -| SYSDATE | Reserved | N/A | -| VERIFY | Reserved | N/A | - -## Implicit Conversion Comparison Table - -| Input Type | Target Type | MogDB | PG | -| :---------- | :----------------------------------------------------------- | :------------------------ | ------------------------------------------------------------ | -| bool | int2, int4, int8 | Supported | int4 is not supported, others are the same | -| int2 | bool, text, bpchar, varchar,interval | Supported (except bpchar) | NA | -| int4 | bool, int2, text, bpchar, varchar, interval | Supported (except bpchar) | bool is not supported, int2 is in assignment, others are the same | -| int8 | bool, text, bpchar, varchar | Supported (except bpchar) | NA | -| text | int8, int4, int2, float4, float8, date, timestamp, nemeric | Supported | NA | -| float4 | int8, int4, int2, text, bpchar, varchar | Supported (except bpchar) | First three are in assignment, others are the same | -| float8 | int8, int4, int2, text, float4, bpchar, varchar, interval, numeric | Supported (except bpchar) | int8, int4, int2, float4, numeric are in assignment, others are the same | -| bpchar | int8, int4, date, timestamp, numeric | | | -| date | text, bpchar, varchar | Supported (except bpchar) | NA | -| timestamp | text, varchar | Supported | NA | -| timestamptz | text | Supported | NA | -| numeric | int8, int4, int2, text, bpchar, varchar, interval | Supported (except bpchar) | First three are in assignment, others are the same | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/query-operations.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/query-operations.md deleted file mode 100644 index e2894bd2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/query-operations.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Query Operations -summary: Query Operations -author: Guo Huan -date: 2021-04-27 ---- - -# Query Operations - -## DDL Operation - -- Any DDL operations on existing tables are prohibited during peak business periods - -- All production DDL operations must be verified by the development test environment - -- Concurrently should be used when maintaining indexes - -- pg_repack should be used instead of vacuum full to rebuild the table - -## DML Operation - -- The SQL statement for updating data is prohibited to appear `where 1=1` -- The amount of data operated by a single DML statement should not exceed 100,000 - -- When clearing the data in the table, `truncate` should be used - -- For risky operations, you should open the transaction and confirm it before committing. - -- The SQL logic in the transaction should be as simple as possible, and the operation should be submitted in time after execution to avoid `idle in transaction` status. - -- Use `copy` instead of `insert` when importing a large amount of data. - -- Consider deleting indexes before importing data, and rebuild them after importing. - -## DQL Operation - -- Prohibit the use of `select *`, apply the specific required field substitution -- Prohibit the use of `where 1=1` to avoid full table scan or Cartesian product - -- The search condition value should be consistent with the field type to prevent not going to the index - -- Fields to the left of the equal sign should be consistent with the index, especially conditional or functional indexes - -- Pay attention to the execution plan of slow SQL, if it is not consistent with the expectation, change it as soon as possible - -- Use `count(*)` or `count(1)` to count rows, `count(column)` will not count `null` rows - -- Limit the number of `join`, no more than 3 are recommended - -- Recursive queries need to be limited to prevent infinite loops - -- For `or` operations, you should use `union all` or `union` instead - -## Data Import - -- When a large amount of data needs to be stored in a table, it is recommended that COPY is used rather than INSERT. This can improve the data write speed. -- Before data is imported, delete related indexes. After the import is complete, recreate indexes. This can improve the data import speed. - -## Transaction Operation - -- Make sure that the SQL logic in a transaction is simple, the granularity of each transaction is small, less resources are locked, lock and deadlock can be avoided, and transaction can be committed in a timely manner after being executed. -- For DDL operations, especially multiple DDL operations, including CRAETE, DROP, and ALTER, do not explicitly start a transaction because the lock mode value is very high and deadlock easily occurs. -- If the state of the master node is idle in transaction, related resources will be locked, thereby leading to lock, even deadlock. If the state of the slave node is idle in transaction, synchronization between the master and slave nodes will be suspended. - -## Others - -- For instances running in SSDs, it is recommended that the value of **random_page_cost** (default value: **4**) is set to a value ranging from 1.0 to 2.0. This can make the query planner preferably use the index to perform scanning. -- In the scenario where EXPLAIN ANALYZE needs to be used to view the actual execution plan and time, if a write query is to be performed, it is strongly recommended that a transaction is started first and then rollback is performed. -- For tables frequently updated and with the data size largely increased, table reorganization should be performed in appropriate time to lower the high water mark. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/syntax-specification.md b/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/syntax-specification.md deleted file mode 100644 index 7315ebac..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/dev/development-specifications/syntax-specification.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Syntax Specification -summary: Syntax Specification -author: Guo Huan -date: 2021-04-27 ---- - -# Syntax Specification - -## About NULL - -- Description: `NULL` judgment: `IS NULL`, `IS NOT NULL`. - -- Description: Beware of `boolean` types taking the values `true`, `false`, `NULL`. - -- Description: Beware of `NOT IN` collections with `NULL` elements. - -- Recommendation: Use `count(1)` or `count(*)` to count rows, but not `count(col)` to count rows, because `NULL` values will not be counted. - -- Rule: When `count(multi-column names)`, the multi-column names must be enclosed in parentheses, e.g. `count( (col1,col2,col3) )`. - -- Note: With multi-column `count`, the row is counted even if all columns are `NULL`, so the effect is the same as `count(*)`. - -- Note: `count(distingu col)` counts the number of non-`NULL` non-repeats of a column, `NULL` is not counted; `count(distingu (col1,col2,...) )` counts the unique values of multiple columns, `NULL` is counted, while `NULL` and `NULL` are considered the same. - -- Note: Distinction between count and sum of NULL - - ```sql - select count(1), count(a), sum(a) from (SELECT * FROM (VALUES (NULL), (2) ) v(a)) as foo where a is NULL; - count | count | sum - -------+-------+----- - 1 | 0 | - (1 row) - ``` - -- Check whether two values are the same (NULL is considered as the same value). - - ```sql - select null is distinct from null; - ?column? - \--------- - f - (1 row) - - select null is distinct from 1; - ?column? - \--------- - t - (1 row) - - select null is not distinct from null; - ?column? - \--------- - t - (1 row) - - select null is not distinct from 1; - ?column? - \--------- - f - (1 row) - ``` - -## About Invalid Indexes - -- During SQL statement writing, functions and expressions are usually used in query operations. It is not recommended that functions and expressions are used in condition columns. Using a function or expression in a condition column will make indexes of the condition column unused, thereby affecting the SQL query efficiency. It is recommended that functions or expressions are used in condition values. For example, - - ```sql - select name from tab where id+100>1000; - ``` - - This statement can be changed to the following: - - ```sql - select name from tab where id>1000-100; - ``` - -- Do not use left fuzzy query. For example, - - ```sql - select id from tab where name like '%ly'; - ``` - -- Do not use the negative query, such as not in/like. For example, - - ```sql - select id from tab where name not in ('ly','ty'); - ``` - -## Ensuring That All Variables and Parameters Are Used - -- Declare-variable also generates certain system overhead and makes code look loose. If some variables are not used in compilation, they will report alarms. Make sure that no any alarm is reported. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/developer-guide.md b/product/en/docs-mogdb/v5.2/developer-guide/developer-guide.md deleted file mode 100644 index 62601416..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/developer-guide.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Developer Guide -summary: Developer Guide -author: Guo Huan -date: 2023-05-22 ---- - -# Developer Guide - -- **[Application Development Guide](dev/application-development-tutorial.md)** -- **[Stored Procedure](1-1-stored-procedure.md)** -- **[User-defined Functions](user-defined-functions.md)** -- **[Overview of PL/pgSQL Functions](plpgsql/1-1-plpgsql-overview.md)** -- **[Scheduled Jobs](scheduled-jobs/scheduled-jobs.md)** -- **[Autonomous Transaction](autonomous-transaction/1-introduction-to-autonomous-transaction.md)** -- **[Logical Replication](logical-replication/logical-replication.md)** -- **[Extension](extension/extension.md)** -- **[MySQL Compatible Description](mysql-compatibility-description/mysql-compatible-description.md)** -- **[Materialized View Overview](materialized-view/1-materialized-view-overview.md)** -- **[Partition Management](partition-management/partition-management.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/extension.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/extension.md deleted file mode 100644 index 5064c8c7..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/extension.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Extension -summary: Extension -author: Guo Huan -date: 2023-05-19 ---- - -# Extension - -- **[PostGIS Extension](postgis-extension/postgis-extension.md)** -- **[Foreign Data Wrapper](foreign-data-wrapper/fdw-introduction.md)** -- **[pg_bulkload](pg_bulkload-user-guide.md)** -- **[pg_prewarm](pg_prewarm-user-guide.md)** -- **[pg_repack](pg_repack-user-guide.md)** -- **[pg_trgm](pg_trgm-user-guide.md)** -- **[wal2json](wal2json-user-guide.md)** -- **[whale](whale.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/1-oracle_fdw.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/1-oracle_fdw.md deleted file mode 100644 index 44f5e289..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/1-oracle_fdw.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: oracle_fdw -summary: oracle_fdw -author: Zhang Cuiping -date: 2021-05-17 ---- - -# oracle_fdw - -oracle_fdw is an open-source plug-in. MogDB is developed and adapted based on the open-source [oracle_fdw Release 2.2.0](https://github.com/laurenz/oracle_fdw/archive/ORACLE_FDW_2_2_0.tar.gz). oracle_fdw is provided as an extension. - -  - -## Install Oracle Client - -1. The oracle_fdw relies on the Oracle client `so` file, the client package needs to be downloaded using the official address and then unzipped. - - x86: [https://www.oracle.com/database/technologies/instant-client/linux-x86-64-downloads.html](https://www.oracle.com/database/technologies/instant-client/linux-x86-64-downloads.html) - - ARM: [https://www.oracle.com/database/technologies/instant-client/linux-arm-aarch64-downloads.html](https://www.oracle.com/database/technologies/instant-client/linux-arm-aarch64-downloads.html) - - For example: - - ```shell - [root@mogdb-kernel-002 ~]# cd /opt - [root@mogdb-kernel-002 opt]# unzip instantclient-basic-linux.x64-21.11.0.0.0dbru.zip - ``` - -2. Use the database installation user omm to modify the configuration file to add the path to ORACLE_HOME and add $ORACLE_HOME to the PATH and LD_LIBRARY_PATH. - - For example: - - ```shell - [root@mogdb-kernel-002 opt]# su - omm - [omm@mogdb-kernel-002 ~]$ vim /home/omm/.ptk_mogdb_env - ``` - - ```shell - export ORACLE_HOME=/opt/instantclient_21_11 - export PATH=$GPHOME/ptk_tool/bin:$GAUSSHOME/bin:$GPHOME/script:$ORACLE_HOME:$PATH - export LD_LIBRARY_PATH=$ORACLE_HOME:$GAUSSHOME/lib:$GPHOME/lib:$GPHOME/script/gspylib/clib:$LD_LIBRARY_PATH - ``` - - ```shell - [omm@mogdb-kernel-002 ~]$ source ~/.ptk_mogdb_env - ``` - -3. Restart the database - - ```shell - [omm@mogdb-kernel-002 ~]# ptk cluster -n restart - ``` - -  - -## Install oracle_fdw - -1. Access [Download page](https://www.mogdb.io/downloads/mogdb/) of the MogDB official website, download the oracle_fdw extension for the version you need. - -2. Unpack the package, for example: - - ```SHELL - tar -xzvf oracle_fdw-1.0-5.0.0-01-Kylin-x86_64.tar.gz - ``` - -3. Go to the directory where the extension is located and execute the `make install` command. - - ```SHELL - cd oracle_fdw/ - make install - ``` - -4. Connect to the database and execute the `create extension oracle_fdw;` to use it. - - ```sql - MogDB=# CREATE EXTENSION oracle_fdw; - CREATE EXTENSION - ``` - -
- -## Using oracle_fdw - -- To use oracle_fdw, install and connect to the Oracle server. - -- Load the oracle_fdw extension using [CREATE EXTENSION oracle_fdw;](../../../reference-guide/sql-syntax/CREATE-EXTENSION.md) - -- Create a server object using [CREATE SERVER](../../../reference-guide/sql-syntax/CREATE-SERVER.md) - -- Create a user mapping using [CREATE USER MAPPING](../../../reference-guide/sql-syntax/CREATE-USER-MAPPING.md) - -- Directly use the @ symbol plus dblink_name for DML operations on external tables, i.e. @. See [Oracle DBLink Syntax Compatibility](../../../characteristic-description/compatibility/oracle-dblink-syntax-compatibility.md) for a detailed example. - - > Note: You can also create a foreign table using [CREATE FOREIGN TABLE](../../../reference-guide/sql-syntax/CREATE-FOREIGN-TABLE.md) and perform normal operations on the foreign table, such as [INSERT](../../../reference-guide/sql-syntax/INSERT.md), [UPDATE](../../../reference-guide/sql-syntax/UPDATE.md), [DELETE](../../../reference-guide/sql-syntax/DELETE.md), [SELECT](../../../reference-guide/sql-syntax/SELECT.md), [EXPLAIN](../../../reference-guide/sql-syntax/EXPLAIN.md), [ANALYZE](../../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md) and [COPY](../../../reference-guide/sql-syntax/COPY.md). The structure of the foreign table must be the same as that of the Oracle table. The first column in the table on the Oracle server must be unique, for example, **PRIMARY KEY** and **UNIQUE**. Configure the external table's schema and table for Oralce both require uppercase, perform UPDATE or DELETE must set options(key 'true'), that is, set the primary key of the external table. - -- Drop a foreign table using[DROP FOREIGN TABLE](../../../reference-guide/sql-syntax/DROP-FOREIGN-TABLE.md) - -- Drop a user mapping using[DROP USER MAPPING](../../../reference-guide/sql-syntax/DROP-USER-MAPPING.md) - -- Drop a server object using [DROP SERVER](../../../reference-guide/sql-syntax/DROP-SERVER.md) - -- Drop an extension using [DROP EXTENSION oracle_fdw;](../../../reference-guide/sql-syntax/DROP-EXTENSION.md) - -
- -## Common Issues - -- When a foreign table is created on the MogDB, the table is not created on the Oracle server. You need to use the Oracle client to connect to the Oracle server to create a table. -- The Oracle user used for executing **CREATE USER MAPPING** must have the permission to remotely connect to the Oracle server and perform operations on tables. Before using a foreign table, you can use the Oracle client on the machine where the MogDB server is located and use the corresponding user name and password to check whether the Oracle server can be successfully connected and operations can be performed. -- When **CREATE EXTENSION oracle_fdw;** is executed, the message `libclntsh.so: cannot open shared object file: No such file or directory` is displayed. The reason is that the Oracle development library **libclntsh.so** is not in the related path of the system. You can find the specific path of **libclntsh.so**, and then add the folder where the **libclntsh.so** file is located to **/etc/ld.so.conf**. For example, if the path of **libclntsh.so** is **/usr/lib/oracle/11.2/client64/lib/libclntsh.so.11.1**, add **/usr/lib/oracle/11.2/client64/lib/** to the end of **/etc/ld.so.conf**. Run the **ldconfig** command for the modification to take effect. Note that this operation requires the **root** permission. - -
- -## Precautions - -- **SELECT JOIN** between two Oracle foreign tables cannot be pushed down to the Oracle server for execution. Instead, **SELECT JOIN** is divided into two SQL statements and transferred to the Oracle server for execution. Then the processing result is summarized in the MogDB. -- The **IMPORT FOREIGN SCHEMA** syntax is not supported. -- **CREATE TRIGGER** cannot be executed for foreign tables. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/2-mysql_fdw.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/2-mysql_fdw.md deleted file mode 100644 index e80813f6..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/2-mysql_fdw.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: mysql_fdw -summary: mysql_fdw -author: Zhang Cuiping -date: 2021-05-17 ---- - -# mysql_fdw - -mysql_fdw is an open-source plug-in. MogDB is developed and adapted based on the open-source [mysql_fdw Release 2.5.3](https://github.com/EnterpriseDB/mysql_fdw/archive/REL-2_5_3.tar.gz). mysql_fdw is provided as an extension. - -
- -## Install mysql_fdw - -1. Using mysql_fdw relies on MySQL `so` file. - - Visit the [MySQL official website](https://downloads.mysql.com/archives/community/) to download the MySQL installation package and unzip it. Place the unzipped libmysqlclient.so file under $GAUSSHOME/lib. - - For example: - - ```shell - [omm@mogdb-kernel-002 ~]$ mkdir /opt/mysql/ - [omm@mogdb-kernel-002 ~]$ cd /opt/mysql/ - [omm@mogdb-kernel-002 mysql]$ tar -zxvf mysql-8.0.33-linux-glibc2.28-x86_64.tar.gz - [omm@mogdb-kernel-002 mysql]$ cd mysql-8.0.33-linux-glibc2.28-x86_64/lib - [omm@mogdb-kernel-002 lib]$ cp libmysqlclient.so $GAUSSHOME/lib - ``` - -2. Access [Download page](https://www.mogdb.io/downloads/mogdb/) of the MogDB official website, download the mysql_fdw extension for the version you need. - -3. Unpack the package, for example: - - ```SHELL - tar -xzvf mysql_fdw-1.1-5.0.0-01-Kylin-x86_64.tar.gz - ``` - -4. Go to the directory where the extension is located and execute the `make install` command. - - ```SHELL - cd mysql_fdw/ - make install - ``` - -5. Connect to the database and execute the `create extension mysql_fdw;` to use it. - - ```sql - MogDB=# create extension mysql_fdw; - CREATE EXTENSION - ``` - -
- -## Using mysql_fdw - -- To use mysql_fdw, install and connect to MariaDB or MySQL server. -- Load the mysql_fdw extension using [CREATE EXTENSION mysql_fdw;](../../../reference-guide/sql-syntax/CREATE-EXTENSION.md) -- Create a server object using [CREATE SERVER](../../../reference-guide/sql-syntax/CREATE-SERVER.md) -- Create a user mapping using [CREATE USER MAPPING](../../../reference-guide/sql-syntax/CREATE-USER-MAPPING.md) -- Create a foreign table using [CREATE FOREIGN TABLE](../../../reference-guide/sql-syntax/CREATE-FOREIGN-TABLE.md). The structure of the foreign table must be the same as that of the MySQL or MariaDB table. The first column in the table on the MySQL or MariaDB must be unique, for example, **PRIMARY KEY** and **UNIQUE**. -- Perform normal operations on the foreign table, such as [INSERT](../../../reference-guide/sql-syntax/INSERT.md), [UPDATE](../../../reference-guide/sql-syntax/UPDATE.md)、[DELETE](../../../reference-guide/sql-syntax/DELETE.md), [SELECT](../../../reference-guide/sql-syntax/SELECT.md), [EXPLAIN](../../../reference-guide/sql-syntax/EXPLAIN.md), [ANALYZE](../../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md) and [COPY](../../../reference-guide/sql-syntax/COPY.md). -- Drop a foreign table using [DROP FOREIGN TABLE](../../../reference-guide/sql-syntax/DROP-FOREIGN-TABLE.md) -- Drop a user mapping using [DROP USER MAPPING](../../../reference-guide/sql-syntax/DROP-USER-MAPPING.md) -- Drop a server object using [DROP SERVER](../../../reference-guide/sql-syntax/DROP-SERVER.md) -- Drop an extension using [DROP EXTENSION mysql_fdw;](../../../reference-guide/sql-syntax/DROP-EXTENSION.md) - -
- -## Common Issues - -- When a foreign table is created on the MogDB, the table is not created on the MariaDB or MySQL server. You need to use the MariaDB or MySQL server client to connect to the MariaDB or MySQL server to create a table. -- The MariaDB or MySQL server user used for creating **USER MAPPING** must have the permission to remotely connect to the MariaDB or MySQL server and perform operations on tables. Before using a foreign table, you can use the MariaDB or MySQL server client on the machine where the MogDB server is located and use the corresponding user name and password to check whether the MariaDB or MySQL server can be successfully connected and operations can be performed. -- The **Can't initialize character set SQL_ASCII (path: compiled_in)** error occurs when the DML operation is performed on a foreign table. MariaDB does not support the **SQL_ASCII** encoding format. Currently, this problem can be resolved only by modifying the encoding format of the MogDB database. Change the database encoding format to **update pg_database set encoding = pg_char_to_encoding('UTF-8') where datname = 'postgres';**. Set **datname** based on the actual requirements. After the encoding format is changed, start a gsql session again so that mysql_fdw can use the updated parameters. You can also use `-locale=LOCALE` when running **gs_initdb** to set the default encoding format to non-SQL_ASCII. - -
- -## Precautions - -- **SELECT JOIN** between two MySQL foreign tables cannot be pushed down to the MariaDB or MySQL server for execution. Instead, **SELECT JOIN** is divided into two SQL statements and transferred to the MariaDB or MySQL server for execution. Then the processing result is summarized in the MogDB. -- The **IMPORT FOREIGN SCHEMA** syntax is not supported. -- **CREATE TRIGGER** cannot be executed for foreign tables. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/3-postgres_fdw.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/3-postgres_fdw.md deleted file mode 100644 index d280aa17..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/3-postgres_fdw.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: postgres_fdw -summary: postgres_fdw -author: Zhang Cuiping -date: 2021-05-17 ---- - -# postgres_fdw - -postgres_fdw is an open-source plug-in. Its code is released with the PostgreSQL source code. MogDB is developed and adapted based on the open-source postgres_fdw source code () in PostgreSQL 9.4.26. postgres_fdw is provided as an extension. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** Currently, postgres_fdw supports only connection between MogDB databases. - -
- -## Install postgres_fdw - -postgres_fdw is involved in the compilation by default, and can be used by creating the extension through the `CREATE EXTENSION postgres_fdw;` statement after logging into the database. - -You can also install postgres_fdw separately by following these steps. - -1. Access [Download page](https://www.mogdb.io/downloads/mogdb/) of the MogDB official website, download the postgres_fdw extension for the version you need. - -2. Unpack the package, for example: - - ```SHELL - tar -xzvf postgres_fdw-1.0-5.0.0-01-Kylin-x86_64.tar.gz - ``` - -3. Go to the directory where the extension is located and execute the `make install` command. - - ```SHELL - cd postgres_fdw/ - make install - ``` - -4. Connect to the database and execute the `create extension postgres_fdw;` to use it. - - ```sql - MogDB=# create extension postgres_fdw; - CREATE EXTENSION - ``` - -
- -## Using postgres_fdw - -- Load the postgres_fdw extension using [CREATE EXTENSION postgres_fdw;](../../../reference-guide/sql-syntax/CREATE-EXTENSION.md) - -- Create a server object using [CREATE SERVER](../../../reference-guide/sql-syntax/CREATE-SERVER.md) - -- Create a user mapping using [CREATE USER MAPPING](../../../reference-guide/sql-syntax/CREATE-USER-MAPPING.md) - -- DML operations on external tables can be performed directly using the @ symbol plus server_name, i.e. @. - - > Note: You can alse create a foreign table using [CREATE FOREIGN TABLE](../../../reference-guide/sql-syntax/CREATE-FOREIGN-TABLE.md), then perform normal operations on the foreign table, such as [INSERT](../../../reference-guide/sql-syntax/INSERT.md), [UPDATE](../../../reference-guide/sql-syntax/UPDATE.md), [DELETE](../../../reference-guide/sql-syntax/DELETE.md), [SELECT](../../../reference-guide/sql-syntax/SELECT.md), [EXPLAIN](../../../reference-guide/sql-syntax/EXPLAIN.md), [ANALYZE](../../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md) and [COPY](../../../reference-guide/sql-syntax/COPY.md). The structure of the foreign table must be the same as that of the remote MogDB table. - -- Drop a foreign table using [DROP FOREIGN TABLE](../../../reference-guide/sql-syntax/DROP-FOREIGN-TABLE.md) - -- Drop a user mapping using [DROP USER MAPPING](../../../reference-guide/sql-syntax/DROP-USER-MAPPING.md) - -- Drop a server object using [DROP SERVER](../../../reference-guide/sql-syntax/DROP-SERVER.md) - -- Drop an extension using [DROP EXTENSION postgres_fdw;](../../../reference-guide/sql-syntax/DROP-EXTENSION.md) - -
- -## Common Issues - -- When a foreign table is created on the MogDB, the table is not created on the remote MogDB database. You need to use the Oracle client to connect to the remote MogDB database to create a table. -- The MogDB user used for executing **CREATE USER MAPPING** must have the permission to remotely connect to the MogDB database and perform operations on tables. Before using a foreign table, you can use the gsql client on the local machine and use the corresponding user name and password to check whether the remote MogDB database can be successfully connected and operations can be performed. - -
- -## Precautions - -- **SELECT JOIN** between two postgres_fdw foreign tables cannot be pushed down to the remote MogDB database for execution. Instead, **SELECT JOIN** is divided into two SQL statements and transferred to the remote MogDB database for execution. Then the processing result is summarized locally. -- The **IMPORT FOREIGN SCHEMA** syntax is not supported. -- **CREATE TRIGGER** cannot be executed for foreign tables. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/dblink.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/dblink.md deleted file mode 100644 index 2ab60334..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/dblink.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: dblink -summary: dblink -author: Guo Huan -date: 2021-11-22 ---- - -# dblink - -dblink is used to execute queries in a remote database. It usually refers to `SELECT`, and it can also be any SQL statement that returns rows. - -When two `text` parameters are given, the first one is searched by the name for persistent connection; if it is found, the command is executed on the connection. If not found, the first parameter is treated as the same connection information string as `dblink_connect`, and the connection is only implemented during execution of this command. - -
- -## Install dependency packages - -Version 5.0.0 and 5.0.1 of dblink require libtool-ltdl-devel. - -Uninstall libtool-ltdl-devel: - -```shell -sudo rpm -e libtool-ltdl libtool-ltdl-devel -``` - -Check if the package is installed: - -```shell -sudo rpm -qa |grep libtool-ltdl -``` - -Install the dependency packages: - -```shell -sudo yum install -y libtool-ltdl-devel -``` - -
- -## Install dblink - -dblink participates in the compilation by default, and can be used by creating the extension through the `CREATE EXTENSION dblink;` statement after logging into the database. - -You can also install dblink separately by following these steps. - -1. Access [Download page](https://www.mogdb.io/downloads/mogdb/) of the MogDB official website, download the dblink extension for the version you need. - -2. Unpack the package, for example: - - ```SHELL - tar -xzvf dblink-1.0-x.x.x-01-CentOS-x86_64.tar.gz - ``` - -3. Go to the directory where the extension is located and execute the `make install` command. - - ```SHELL - cd dblink/ - make install - ``` - -4. Connect to the database and execute the `create extension dblink;` to use it. - - ```sql - MogDB=# create extension dblink; - CREATE EXTENSION - ``` - -
- -## dblink Usage - -- Create Extensions In the Database - - ```sql - create extension dblink; - ``` - -- Check Whether the Extension Was Created Successfully - - ``` - \dx - ``` - -- Connect to the Remote Database to Perform Query Operations - - ```sql - select * from dblink('dbname=postgres host=127.0.0.1 port=12345 user=test password=Test123456'::text, 'select * from dblink_test'::text)t(id int, name varchar); - ``` - -- Create Connections - - ```sql - select dblink_connect('dblink_conn','hostaddr=127.0.0.1 port=12345 dbname=postgres user=test password=Test123456'); - ``` - -- Operate on Database Tables - - (View does not support query operations) - - ```sql - select dblink_exec('dblink_conn', 'create table ss(id int, name int)'); - select dblink_exec('dblink_conn', 'insert into ss values(2,1)'); - select dblink_exec('dblink_conn', 'update ss set name=2 where id=1'); - select dblink_exec('dblink_conn', 'delete from ss where id=1'); - ``` - -- Close Connections - - ```sql - select dblink_disconnect('dblink_conn') - ``` - -
- -## Common dblink Functions - -- Load the dblink extension. - - ```sql - CREATE EXTENSION dblink; - ``` - -- Open a persistent connection to a remote database. - - ```sql - SELECT dblink_connect(text connstr); - ``` - -- Close a persistent connection to a remote database. - - ```sql - SELECT dblink_disconnect(); - ``` - -- Query data in a remote database. - - ```sql - SELECT * FROM dblink(text connstr, text sql); - ``` - -- Execute commands in a remote database. - - ```sql - SELECT dblink_exec(text connstr, text sql); - ``` - -- Return the names of all opened dblinks. - - ```sql - SELECT dblink_get_connections(); - ``` - -- Send an asynchronous query to a remote database. - - ```sql - SELECT dblink_send_query(text connname, text sql); - ``` - -- Check whether the connection is busy with an asynchronous query. - - ```sql - SELECT dblink_is_busy(text connname); - ``` - -- Delete the extension. - - ```sql - DROP EXTENSION dblink; - ``` - -
- -## Precautions - -- Currently, dblink allows only the MogDB database to access another MogDB database and does not allow the MogDB database to access a PostgreSQL database. -- Currently, dblink does not support the thread pool mode. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/fdw-introduction.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/fdw-introduction.md deleted file mode 100644 index 336dd99c..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/fdw-introduction.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Foreign Data Wrapper -summary: Foreign Data Wrapper -author: Zhang Cuiping -date: 2021-05-17 ---- - -# Foreign Data Wrapper - -The foreign data wrapper (FDW) of the MogDB can implement cross-database operations between MogDB databases and remote databases. Currently, the following remote servers are supported: Oracle, MySQL(MariaDB), PostgreSQL/openGauss/MogDB(postgres_fdw), file_fdw, dblink. - -+ **[oracle_fdw](1-oracle_fdw.md)** -+ **[mysql_fdw](2-mysql_fdw.md)** -+ **[postgres_fdw](3-postgres_fdw.md)** -+ **[file_fdw](file_fdw.md)** -+ **[dblink](dblink.md)** diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/file_fdw.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/file_fdw.md deleted file mode 100644 index 68c7ae79..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/foreign-data-wrapper/file_fdw.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: file_fdw -summary: file_fdw -author: Guo Huan -date: 2021-10-19 ---- - -# file_fdw - -The file_fdw module provides the external data wrapper file_fdw, which can be used to access data files in the file system of a server. The format of the data files must be readable by the **COPY FROM** command. For details, see [COPY](../../../reference-guide/sql-syntax/COPY.md). file_fdw is only used to access readable data files, but cannot write data to the data files. - -By default, the file_fdw is compiled in MogDB. During database initialization, the plug-in is created in the **pg_catalog** schema. - -When you create a foreign table using file_fdw, you can add the following options: - -- filename - - File to be read. This parameter is mandatory and must be an absolute path. - -- format - - File format of the remote server, which is the same as the **FORMAT** option in the **COPY** statement. The value can be **text**, **csv**, **binary**, or **fixed**. - -- header - - Whether the specified file has a header, which is the same as the **HEADER** option of the **COPY** statement. - -- delimiter - - File delimiter, which is the same as the **DELIMITER** option of the **COPY** statement. - -- quote - - Quote character of a file, which is the same as the **QUOTE** option of the **COPY** statement. - -- escape - - Escape character of a file, which is the same as the **ESCAPE** option of the **COPY** statement. - -- null - - Null string of a file, which is the same as the **NULL** option of the **COPY** statement. - -- encoding - - Encoding of a file, which is the same as the **ENCODING** option of the **COPY** statement. - -- force_not_null - - File-level null option, which is a Boolean option. If it is true, the value of the declared field cannot be an empty string. This option is the same as the **FORCE_NOT_NULL** option of the **COPY** statement. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - file_fdw does not support the **OIDS** and **FORCE\_QUOTE** options of the **COPY** statement. -> - These options can only be declared for a foreign table or the columns of the foreign table, not for the file_fdw itself, nor for the server or user mapping that uses file_fdw. -> - To modify table-level options, you must obtain the system administrator role permissions. For security reasons, only the system administrator can determine the files to be read. -> - For an external table that uses file_fdw, **EXPLAIN** displays the name and size (in bytes) of the file to be read. If the keyword **COSTS OFF** is specified, the file size is not displayed. - -
- -## Using file_fdw - -- To use file_fdw, you need to specify the file to be read. Prepare the file and grant the read permission on the file for the database to access the file. - -- To create a server object, use [CREATE SERVER](../../../reference-guide/sql-syntax/CREATE-SERVER.md) - -- To create a user mapping, use [CREATE USER MAPPING](../../../reference-guide/sql-syntax/CREATE-USER-MAPPING.md) - -- To create a foreign table, use [CREATE FOREIGN TABLE](../../../reference-guide/sql-syntax/CREATE-FOREIGN-TABLE.md) - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - The structure of the foreign table must be consistent with the data in the specified file. - > - When a foreign table is queried, no write operation is allowed. - -- To drop a foreign table, use [DROP FOREIGN TABLE](../../../reference-guide/sql-syntax/DROP-FOREIGN-TABLE.md) - -- To drop a user mapping, use [DROP USER MAPPING](../../../reference-guide/sql-syntax/DROP-USER-MAPPING.md) - -- To drop a server object, use [DROP SERVER](../../../reference-guide/sql-syntax/DROP-SERVER.md) - -
- -## Precautions - -- **DROP EXTENSION file_fdw** is not supported. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/pg_bulkload-user-guide.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/pg_bulkload-user-guide.md deleted file mode 100644 index 803adeab..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/pg_bulkload-user-guide.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: pg_bulkloab User Guide -summary: pg_bulkloab User Guide -author: Guo Huan -date: 2021-11-29 ---- - -# pg_bulkload - -## pg_bulkload Overview - -pg_bulkload is a high-speed data loading tool for MogDB. It is faster than the copy command and can skip shared buffer and wal buffer to write data into files directly. - -
- -## Install pg_bulkload - -### Install Manually - -1. Access [Download page](https://www.mogdb.io/downloads/mogdb/) of the MogDB official website, download the pg_bulkload extension for the version you need. - -2. Unpack the package, for example: - - ```SHELL - tar -xzvf pg_bulkload-1.0-3.1.0-01-CentOS-x86_64.tar.gz - ``` - -3. Go to the directory where the extension is located and execute the `make install` command. - - ```SHELL - cd pg_bulkload/ - make install - ``` - -### Install By PTK - -Please refer to [Installing Extensions](https://docs.mogdb.io/en/ptk/v1.1/usage-install-plugin). - -
- -## Use pg_bulkload - -```bash -pg_bulkload --help -gsql -p 5432 postgres -r -``` - -```sql -CREATE EXTENSION pg_bulkload; -create table test_bulkload(id int, name varchar(128)); -``` - -Create a TXT file and write 100,000 lines of data. - -```bash -seq 100000| awk '{print $0"|bulkload"}' > bulkload_output.txt -``` - -
- -### Using Parameters - -After the file is successfully created, run the following command: - -```bash -pg_bulkload -i ./bulkload_output.txt -O test_bulkload -l test_bulkload.log -p 5432 -o "TYPE=csv" -o "DELIMITER=|" -d postgres -U hlv -``` - -Connect to the database to check whether the data is imported successfully: - - ```sql - select count(1) from test_bulkload; - ``` - -
- -### Using the Control File - -Before using the control file for data import, you need to clear the imported data in the previous table. - -Write a **.ctl** file. - -```bash -INPUT=/vdb/MogDB-server/dest/bulkload_output.txt -LOGFILE = /vdb/MogDB-server/dest/test_bulkload.log -LIMIT = INFINITE -PARSE_ERRORS = 0 -CHECK_CONSTRAINTS = NO -TYPE = CSV -SKIP = 5 (This parameter sets how many lines to skip) -DELIMITER = | -QUOTE = "\"" -ESCAPE = "\"" -OUTPUT = test_bulkload -MULTI_PROCESS = NO -WRITER = DIRECT -DUPLICATE_ERRORS = 0 -ON_DUPLICATE_KEEP = NEW -TRUNCATE = YES -``` - -> Note: The code logic identifies parameters in the .ctl file with line breaks, so the last line of the .ctl file needs to be a line break to avoid incorrect parameter identification. - -Run following command: - -```bash -pg_bulkload ./lottu.ctl -d postgres -U hlv -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/pg_prewarm-user-guide.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/pg_prewarm-user-guide.md deleted file mode 100644 index eb652ff4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/pg_prewarm-user-guide.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: pg_prewarm User Guide -summary: pg_prewarm User Guide -author: Guo Huan -date: 2021-11-29 ---- - -# pg_prewarm - -## pg_prewarm Overview - -The pg_prewarm module provides a convenient way to load relational data into the operating system buffer cache or MogDB buffer cache. - -
- -## Install pg_prewarm - -### Install Manually - -1. Access [Download page](https://www.mogdb.io/downloads/mogdb/) of the MogDB official website, download the pg_prewarm extension for the version you need. - -2. Unpack the package, for example: - - ```SHELL - tar -xzvf pg_prewarm-1.1-3.1.0-01-CentOS-x86_64.tar.gz - ``` - -3. Go to the directory where the extension is located and execute the `make install` command. - - ```SHELL - cd pg_prewarm/ - make install - ``` - -### Install By PTK - -Please refer to [Installing Extensions](https://docs.mogdb.io/en/ptk/v1.1/usage-install-plugin). - -
- -## pg_prewarm loading mode - -- mode: The loading mode, the options are 'prefetch', 'read', 'buffer', the default is 'buffer' - -- prefetch: Asynchronously preload data into OS cache - -- read: The end result is the same as prefetch, but it is synchronous and supports all platforms - -- buffer: Preload data into database cache - -## Create and Use pg_prewarm - -1. Create extension pg_prewarm. - - ```sql - create extension pg_prewarm; - ``` - -2. Create a test table. - - ```sql - create table test_pre (id int4,name character varying(64),creat_time timestamp(6) without time zone); - ``` - -3. Insert data into the test table. - - ```sql - insert into test_pre select generate_series(1,100000),generate_series(1,100000)|| '_pre',clock_timestamp(); - ``` - -4. Check the table size. - - ```sql - mogdb=# select pg_size_pretty(pg_relation_size('test_pre')); - pg_size_pretty - ---------------- - 5136 kB - (1 row) - ``` - -5. Load data to the database cache. The result shows that pg_prewarm divides data into 642 data blocks. - - ```sql - mogdb=# select pg_prewarm('test_pre','buffer'); - pg_prewarm - ------------ - 642 - (1 row) - ``` - -6. Check the block size. - - ```sql - mogdb=# select current_setting('block_size'); - current_setting - ----------------- - 8192 - (1 row) - ``` - - The size of each data block is 8 KB in MogDB by default. - - ```sql - mogdb=# select 642*8; - ?column? - ---------- - 5136 - (1 row) - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/pg_repack-user-guide.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/pg_repack-user-guide.md deleted file mode 100644 index 825407c9..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/pg_repack-user-guide.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: pg_repack User Guide -summary: pg_repack User Guide -author: Guo Huan -date: 2021-11-29 ---- - -# pg_repack - -## pg_repack Overview - -The pg_repack extension is used to release the table space online, thereby effectively solving the problem of table expansion caused by the operation that a large amount of data in a table is updated. pg_repack does not need to acquire an exclusive lock, which is more lightweight than CLUSTER or VACUUM FULL. - -
- -## Install pg_repack - -### Install Manually - -1. Access [Download page](https://www.mogdb.io/downloads/mogdb/) of the MogDB official website, download the pg_repack extension for the version you need. - -2. Unpack the package, for example: - - ```SHELL - tar -xzvf pg_repack-1.4.6-3.1.0-01-CentOS-x86_64.tar.gz - ``` - -3. Go to the directory where the extension is located and execute the `make install` command. - - ```SHELL - cd pg_repack/ - make install - ``` - -### Install By PTK - -Please refer to [Installing Extensions](https://docs.mogdb.io/en/ptk/v1.1/usage-install-plugin). - -
- -## Use pg_repack - -1. Connect to the database and create a user. - - ```bash - gsql postgres -r -p 5001 - ``` - - ```sql - create user test password 'Test123456' login sysadmin; - ``` - -2. In the database, create extension pg_repack. - - ```sql - create extension pg_repack; - ``` - -3. Create the pg_repack table which must contain a primary key. - - ```sql - create table repack_test(id int primary key, name name); - ``` - -4. Run **\dx** to check whether pg_repack is successfully created. - - ``` - \dx - ``` - -5. Insert 2,000,000 rows of data into the table **repack_test**. - - ```sql - insert into repack_test select generate_series(1,2000000),'a'; - ``` - -6. Check the size of the table **repack_test**. - - ```sql - select pg_size_pretty(pg_relation_size('repack_test')); - ``` - -7. Delete some data and then check the table size. - - ```sql - Delete from repack_test where id>1000000; - ``` - - The figure shows that the table size does not change after 1,000,000 rows of data is deleted. In this case, pg_repack needs to be used. - -8. Use the pg_repack tool. - - ```bash - pg_repack -d postgres -t test.repack_test -h 127.0.0.1 -U test -p 5001 - ``` - - Parameters: - - - -d dbname - - Specifies the database name. - - - -t test.repack_test -h hostip - - Specifies the server IP address. - - - -U user - - Specifies the username. - - - -p port - - Specifies the port. - - - -e - - Outputs all SQL statements executed during repack. - - - -E DEBUG - - Sets the log level to DEBUG. - - - -a, --all - - Specifies all tables and indexes that are to be cleared. - - - -t, --table=TABLE - - Specifies a specified table to be cleared. - - - -I, --parent-table=TABLE repack - - Specifies the parent table and all sub-tables that inherit the parent table. - - - -c, --schema=SCHEMA repack - - Specifies all tables and indexes in a specified schema. - - - -s, --tablespace=TBLSPC - - Specifies the tablespace to which a single or multiple tables that are cleared is to be migrated. - - - -S, --moveidx - - Specifies the tablespace to which the index that is cleared is to be migrated. - - - -o, --order-by=COLUMNS - - Replaces cluster keys with order by columns. - - - -n, --no-order - - Performs vacuum full to replace cluster. - - - -N, --dry-run - - Outputs all content that is cleared by pg_repack. - - - -j, --jobs=NUM - - Specifies the number of concurrent jobs. If indexes are processed, pg_repack chooses the smaller value between the number of indexes and number of concurrent jobs specified by the parameter `-j` as the actual number of concurrent jobs. - - - -i, --index=INDEX - - Processes the specified index. - - - -x, --only-indexes - - Processes all indexes of a specified table. - - - -T, --wait-timeout=SECS - - Sets the life cycle of a backend conflict. If there is a conflict, the program is directly ended upon timeout (s). - - - -D, --no-kill-backend - - Specifies that other backed processes and threads are not killed when the extension times out. - - - -Z, --no-analyze - - Specifies that the `analyze table` operation is not performed after the clearing operation. - - - -k, --no-superuser-check - - Skips the super user check on the client. - - - -C, --exclude-extension - - Skips tables that belong to a specified extension. - - - -e, --echo - - Outputs all SQL statements executed by pg_repack. - - - -E, --elevel=LEVEL - - Outputs the log level, including debug and warning. - -9. Check the size of the table **repack_test**. The table space is released successfully. - - ```sql - select pg_size_pretty(pg_relation_size('repack_test')); - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/pg_trgm-user-guide.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/pg_trgm-user-guide.md deleted file mode 100644 index 9d331d57..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/pg_trgm-user-guide.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: pg_trgm User Guide -summary: pg_trgm User Guide -author: Guo Huan -date: 2021-11-29 ---- - -# pg_trgm - -## pg_trgm Overview - -The pg_trgm module provides functions and operators to determine the similarity of the alphanumeric text based on the matching of the ternary model, and an index operator class that supports quick search of similar strings. - -
- -## Install pg_trgm - -### Install Manually - -1. Access [Download page](https://www.mogdb.io/downloads/mogdb/) of the MogDB official website, download the pg_trgm extension for the version you need. - -2. Unpack the package, for example: - - ```SHELL - tar -xzvf pg_trgm-1.0-3.1.0-01-CentOS-x86_64.tar.gz - ``` - -3. Go to the directory where the extension is located and execute the `make install` command. - - ```SHELL - cd pg_trgm/ - make install - ``` - -### Install By PTK - -Please refer to [Installing Extensions](https://docs.mogdb.io/en/ptk/v1.1/usage-install-plugin). - -
- -## Use pg_trgm - -1. In the database, create the following extension: - - ```sql - -- pg_trgm: - create extension pg_trgm; - ``` - -2. Create a table and insert 1,000,000 rows of data into the table. - - ```sql - create table trgm_test(id int, name varchar); - insert into trgm_test select generate_series(1,1000000),md5(random()::name); - ``` - -3. Query the table without using pg_trgm. - - ```sql - explain analyze select * from trgm_test where name like '%69089%'; - ``` - - ```sql - select * from trgm_test where name like '%69089%'; - ``` - -4. Create an index. - - ```sql - create index idx_trgm_test_1 on trgm_test using gin(name gin_trgm_ops); - ``` - - ```sql - explain analyze select * from trgm_test where name like '%ad44%'; - ``` - - ```sql - select * from trgm_test where name like '%305696%'; - ``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-extension.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-extension.md deleted file mode 100644 index ef904782..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-extension.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: PostGIS Extension -summary: PostGIS Extension -author: Guo Huan -date: 2022-04-29 ---- - -# PostGIS Extension - -+ **[Overview](postgis-overview.md)** -+ **[Using PostGIS](using-postgis.md)** -+ **[PostGIS Support and Constraints](postgis-support-and-constraints.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-overview.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-overview.md deleted file mode 100644 index 60df68ce..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-overview.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2022-04-29 ---- - -# Overview - -MogDB provides PostGIS Extension (PostGIS-2.4.2). PostGIS Extension is a spatial database extender for PostgreSQL. It provides the following spatial information services: spatial objects, spatial indexes, spatial functions, and spatial operators. PostGIS Extension complies with the OpenGIS specifications. - -PostGIS Extension depends on the following third-party open-source software: - -- Geos 3.6.2 -- Proj 4.9.2 -- Json 0.12.1 -- Libxml2 2.7.1 -- Gdal 1.11.0 \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-support-and-constraints.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-support-and-constraints.md deleted file mode 100644 index 584454f5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/postgis-support-and-constraints.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: PostGIS Support and Constraints -summary: PostGIS Support and Constraints -author: Guo Huan -date: 2022-04-29 ---- - -# PostGIS Support and Constraints - -PostGIS extension of MogDB supports the following data types: - -- box2d -- box3d -- geometry_dump -- geometry -- geography - -## Supported Operators and Functions - -**Table 1** Operators and functions supported by PostGIS Extension - -| Category | Function | -| :------------------------------------- | :----------------------------------------------------------- | -| Management Functions | AddGeometryColumn, DropGeometryColumn, DropGeometryTable, PostGIS_Full_Version, PostGIS_GEOS_Version, PostGIS_Liblwgeom_Version, PostGIS_Lib_Build_Date, PostGIS_Lib_Version, PostGIS_PROJ_Version, PostGIS_Scripts_Build_Date, PostGIS_Scripts_Installed, PostGIS_Version, PostGIS_LibXML_Version, PostGIS_Scripts_Released, Populate_Geometry_Columns, UpdateGeometrySRID | -| Geometry Constructors | ST_BdPolyFromText, ST_BdMPolyFromText, ST_Box2dFromGeoHash, ST_GeogFromText, ST_GeographyFromText, ST_GeogFromWKB, ST_GeomCollFromText, ST_GeomFromEWKB, ST_GeomFromEWKT, ST_GeometryFromText, ST_GeomFromGeoHash, ST_GeomFromGML, ST_GeomFromGeoJSON, ST_GeomFromKML, ST_GMLToSQL, ST_GeomFromText, ST_GeomFromWKB, ST_LineFromMultiPoint, ST_LineFromText, ST_LineFromWKB, ST_LinestringFromWKB, ST_MakeBox2D, ST_3DMakeBox, ST_MakeEnvelope, ST_MakePolygon, ST_MakePoint, ST_MakePointM, ST_MLineFromText, ST_MPointFromText, ST_MPolyFromText, ST_Point, ST_PointFromGeoHash, ST_PointFromText, ST_PointFromWKB, ST_Polygon, ST_PolygonFromText, ST_WKBToSQL, ST_WKTToSQL | -| Geometry Accessors | GeometryType, ST_Boundary, ST_CoordDim, ST_Dimension, ST_EndPoint, ST_Envelope, ST_ExteriorRing, ST_GeometryN, ST_GeometryType, ST_InteriorRingN, ST_IsClosed, ST_IsCollection, ST_IsEmpty, ST_IsRing, ST_IsSimple, ST_IsValid, ST_IsValidReason, ST_IsValidDetail, ST_M, ST_NDims, ST_NPoints, ST_NRings, ST_NumGeometries, ST_NumInteriorRings, ST_NumInteriorRing, ST_NumPatches, ST_NumPoints, ST_PatchN, ST_PointN, ST_SRID, ST_StartPoint, ST_Summary, ST_X, ST_XMax, ST_XMin, ST_Y, ST_YMax, ST_YMin, ST_Z, ST_ZMax, ST_Zmflag, ST_ZMin | -| Geometry Editors | ST_AddPoint, ST_Affine, ST_Force2D, ST_Force3D, ST_Force3DZ, ST_Force3DM, ST_Force4D, ST_ForceCollection, ST_ForceSFS, ST_ForceRHR, ST_LineMerge, ST_CollectionExtract, ST_CollectionHomogenize, ST_Multi, ST_RemovePoint, ST_Reverse, ST_Rotate, ST_RotateX, ST_RotateY, ST_RotateZ, ST_Scale, ST_Segmentize, ST_SetPoint, ST_SetSRID, ST_SnapToGrid, ST_Snap, ST_Transform, ST_Translate, ST_TransScale | -| Geometry Outputs | ST_AsBinary, ST_AsEWKB, ST_AsEWKT, ST_AsGeoJSON, ST_AsGML, ST_AsHEXEWKB, ST_AsKML, ST_AsLatLonText, ST_AsSVG, ST_AsText, ST_AsX3D, ST_GeoHash | -| Operators | &&, &&&, &<, &<\|, &>, <<, <<\|, =, >>, @ , \|&> , \|>>, ~, ~=, <->, <#> | -| Spatial Relationships and Measurements | ST_3DClosestPoint, ST_3DDistance, ST_3DDWithin, ST_3DDFullyWithin, ST_3DIntersects, ST_3DLongestLine, ST_3DMaxDistance, ST_3DShortestLine, ST_Area, ST_Azimuth, ST_Centroid, ST_ClosestPoint, ST_Contains, ST_ContainsProperly, ST_Covers, ST_CoveredBy, ST_Crosses, ST_LineCrossingDirection, ST_Disjoint, ST_Distance, ST_HausdorffDistance, ST_MaxDistance, ST_DistanceSphere, ST_DistanceSpheroid, ST_DFullyWithin, ST_DWithin, ST_Equals, ST_HasArc, ST_Intersects, ST_Length, ST_Length2D, ST_3DLength, ST_Length_Spheroid, ST_Length2D_Spheroid, ST_3DLength_Spheroid, ST_LongestLine, ST_OrderingEquals, ST_Overlaps, ST_Perimeter, ST_Perimeter2D, ST_3DPerimeter, ST_PointOnSurface, ST_Project, ST_Relate, ST_RelateMatch, ST_ShortestLine, ST_Touches, ST_Within | -| Geometry Processing | ST_Buffer, ST_BuildArea, ST_Collect, ST_ConcaveHull, ST_ConvexHull, ST_CurveToLine, ST_DelaunayTriangles, ST_Difference, ST_Dump, ST_DumpPoints, ST_DumpRings, ST_FlipCoordinates, ST_Intersection, ST_LineToCurve, ST_MakeValid, ST_MemUnion, ST_MinimumBoundingCircle, ST_Polygonize, ST_Node, ST_OffsetCurve, ST_RemoveRepeatedPoints, ST_SharedPaths, ST_Shift_Longitude, ST_Simplify, ST_SimplifyPreserveTopology, ST_Split, ST_SymDifference, ST_Union, ST_UnaryUnion | -| Linear Referencing | ST_LineInterpolatePoint, ST_LineLocatePoint, ST_LineSubstring, ST_LocateAlong, ST_LocateBetween, ST_LocateBetweenElevations, ST_InterpolatePoint, ST_AddMeasure | -| Miscellaneous Functions | ST_Accum, Box2D, Box3D, ST_Expand, ST_Extent, ST_3Dextent, Find_SRID, ST_MemSize | -| Exceptional Functions | PostGIS_AddBBox, PostGIS_DropBBox, PostGIS_HasBBox | - -## Spatial Indexes - -PostGIS extension of MogDB supports Generalized Search Tree (GIST) spatial indexes (except for partitioned tables). Different from B-tree indexes, GiST indexes are adaptable to all kinds of irregular data structures, which can effectively improve the retrieval efficiency for geometry and geographic data. - -Run the following command to create a GiST index: - -```sql -MogDB=# CREATE INDEX indexname ON tablename USING GIST ( geometryfield ); -``` - -## Extension Constraints - -- Only row-store tables are supported. -- Topology (object management module) and Raster (grid data processing module) are not supported. -- BRIN indexes are not supported. -- The **spatial_ref_sys** table can only be queried during scale-out. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/using-postgis.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/using-postgis.md deleted file mode 100644 index b8ebe0b7..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/postgis-extension/using-postgis.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Using PostGIS -summary: Using PostGIS -author: Guo Huan -date: 2022-04-29 ---- - -# Using PostGIS - -## Installing PostGIS Extension - -### Install Manually - -1. Access [Download page](https://www.mogdb.io/downloads/mogdb/) of the MogDB official website, download the PostGIS extension for the version you need. - -2. Unpack the package, for example: - - ```SHELL - tar -xzvf postgis-2.4.2-3.1.0-01-CentOS-x86_64.tar.gz - ``` - -3. Go to the directory where the extension is located and execute the `make install` command. - - ```SHELL - cd postgis/ - make install - ``` - -### Install By PTK - -Please refer to [Installing Extensions](https://docs.mogdb.io/en/ptk/v1.1/usage-install-plugin). - -
- -## Creating PostGIS Extension - -Run the **CREATE EXTENSION** command to create postgis, postgis_raster, and postgis_topology, respectively. - -```sql -MogDB=# CREATE EXTENSION postgis; -``` - -```sql -MogDB=# CREATE EXTENSION postgis_raster; -``` - -```sql -MogDB=# set behavior_compat_options='bind_procedure_searchpath'; -CREATE EXTENSION postgis_topology; -``` - -## Using PostGIS Extension - -Use the following function to invoke PostGIS Extension: - -```sql -MogDB=# SELECT GisFunction (Param1, Param2,......); -``` - -**GisFunction** is the function, and **Param1** and **Param2** are function parameters. The following SQL statements are a simple illustration for PostGIS use. For details about related functions, see [PostGIS 2.4.2 Manual](https://download.osgeo.org/postgis/docs/postgis-2.4.2.pdf). - -Example 1: Create a geometry table. - -```sql -MogDB=# CREATE TABLE cities ( id integer, city_name varchar(50) ); -MogDB=# SELECT AddGeometryColumn('cities', 'position', 4326, 'POINT', 2); -``` - -Example 2: Insert geometry data. - -```sql -MogDB=# INSERT INTO cities (id, position, city_name) VALUES (1,ST_GeomFromText('POINT(-9.5 23)',4326),'CityA'); -MogDB=# INSERT INTO cities (id, position, city_name) VALUES (2,ST_GeomFromText('POINT(-10.6 40.3)',4326),'CityB'); -MogDB=# INSERT INTO cities (id, position, city_name) VALUES (3,ST_GeomFromText('POINT(20.8 30.3)',4326), 'CityC'); -``` - -Example 3: Calculate the distance between any two cities among three cities. - -```sql -MogDB=# SELECT p1.city_name,p2.city_name,ST_Distance(p1.position,p2.position) FROM cities AS p1, cities AS p2 WHERE p1.id > p2.id; -``` - -## Deleting PostGIS Extension - -Run the following command to delete a PostGIS extension from MogDB: - -```sql -MogDB=# DROP EXTENSION postgis [CASCADE]; -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**NOTE:** If PostGIS Extension is the dependee of other objects (for example, geometry tables), you need to add the **CASCADE** keyword to delete all these objects. - -To completely delete PostGIS extension, run **gs_om** as user **omm** to delete PostGIS and the dynamic link libraries it depends on. - -```bash -gs_om -t postgis -m rmlib -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/wal2json-user-guide.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/wal2json-user-guide.md deleted file mode 100644 index d0cb8c89..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/wal2json-user-guide.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: wal2json User Guide -summary: wal2json User Guide -author: Guo Huan -date: 2021-11-29 ---- - -# wal2json - -## wal2json Overview - -wal2json is a logical decoding extension. With this extension, you can access the tuples generated by INSERT and UPDATE and parse the content in WAL. - -The wal2json extension will generate a JSON object in each transaction. All new/old tuples are provided in the JSON object, and additional options can also include such attributes as transaction timestamp, restricted structure, data type, and transaction ID. - -
- -## Install wal2json - -### Install Manually - -1. Access [Download page](https://www.mogdb.io/downloads/mogdb/) of the MogDB official website, download the wal2json extension for the version you need. - -2. Unpack the package, for example: - - ```SHELL - tar -xzvf wal2json-2.3-3.1.0-01-CentOS-x86_64.tar.gz - ``` - -3. Go to the directory where the extension is located and execute the `make install` command. - - ```SHELL - cd wal2json/ - make install - ``` - -### Install By PTK - -Please refer to [Installing Extensions](https://docs.mogdb.io/en/ptk/v1.1/usage-install-plugin). - -
- -## Configure Database Parameters - -1. Modify the **postgresql.conf** file in the data directory of the database (both the primary and standby databases need to be modified). - - ```sql - wal_level = logical - max_replication_slots = 10 - max_wal_senders = 10 - ``` - -2. Modify **pg_hba.conf** (both the primary and standby databases need to be modified). - - ```sql - host replication all 127.0.0.1/32 trust - ``` - -3. Restart the database. - -
- -## Use wal2json - -1. Open a new terminal and use wal2json to connect to the database. - - ```bash - pg_recvlogical -d postgres --slot test2_slot --create -P wal2json -h 127.0.0.1 -p 5001 -U test - pg_recvlogical -d postgres -h 127.0.0.1 -p 5001 -U test --slot test2_slot --start -f - - ``` - -2. Operate with the database. - - ```sql - create table ff(id int, name int); - insert into ff values(1,2); - insert into ff values(2,4); - select * from ff; - ``` - -3. Output the operation process on wal2json. - - ```json - {"change":[]} - {"change":[]} - {"change":[{"kind":"insert","schema":"test","table":"ff","columnnames":["id","name"],"columntypes":["integer","integer"],"columnvalues":[1,10]}]} - {"change":[{"kind":"insert","schema":"test","table":"ff","columnnames":["id","name"],"columntypes":["integer","integer"],"columnvalues":[2,2]}]} - ``` - -
- -## Wal2json Parameters - -Run the following command in the data directory of the database: - -```sql -pg_recvlogical --help - -f, --file=FILE receive log into this file. - for stdout - -n, --no-loop do not loop on connection lost - -v, --verbose output verbose messages - -V, --version output version information, then exit - -?, --help show this help, then exit - -Connection options: - -d, --dbname=DBNAME database to connect to - -h, --host=HOSTNAME database server host or socket directory - -p, --port=PORT database server port number - -U, --username=NAME connect as specified database user - -w, --no-password never prompt for password - -W, --password force password prompt (should happen automatically) - -Replication options: - -F --fsync-interval=INTERVAL - frequency of syncs to the output file (in seconds, defaults to 10) - -o, --option=NAME[=VALUE] - Specify option NAME with optional value VAL, to be passed - to the output plugin - -P, --plugin=PLUGIN use output plugin PLUGIN (defaults to mppdb_decoding) - -s, --status-interval=INTERVAL - time between status packets sent to server (in seconds, defaults to 10) - -S, --slot=SLOT use existing replication slot SLOT instead of starting a new one - -I, --startpos=PTR Where in an existing slot should the streaming start - -Action to be performed: - --create create a new replication slot (for the slotname see --slot) - --start start streaming in a replication slot (for the slotname see --slot) - --drop drop the replication slot (for the slotname see --slot) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/extension/whale.md b/product/en/docs-mogdb/v5.2/developer-guide/extension/whale.md deleted file mode 100644 index 86fd7020..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/extension/whale.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -title: whale -summary: whale -author: Zhang Cuiping -date: 2022-06-27 ---- - -# whale - -## Introduction - -whale is an Oracle compatibility extension package of MogDB, in which 15 Oracle functions and 7 packages are added. The functions include instrb, nls_charset_id, nls_charset_name, nls_lower, etc. The oracle packages include dbms_random, dbms_output, dbms_lock, dbms_application_info, dbms_metadata, dbms_job, and dbms_utility. - -> **Note**: The whale extension cannot be together used with the orafce extension. This issue will be resolved in the subsequent patch version. - -
- -## whale Installation - -### Install Manually - -1. Access [Download page](https://www.mogdb.io/downloads/mogdb/) of the MogDB official website, download the whale extension for the version you need. - -2. Unpack the package, for example: - - ```SHELL - tar -xzvf whale-x.x.x-01-CentOS-x86_64.tar.gz - ``` - -3. Go to the directory where the extension is located and execute the `make install` command. - - ```SHELL - cd whale/ - make install - ``` - -### Install By PTK - -Please refer to [Installing Extensions](https://docs.mogdb.io/en/ptk/v1.1/usage-install-plugin). - -
- -## whale Usage - -Create the whale extension. - -```sql -CREATE EXTENSION whale; -``` - -> Note: After the extension is created and submitted, all existing connections that need to be reconnected can find whale functions without specifying the whale schema. - -### Function - -The Oracle packages currently supported include instrb, nls_charset_id, nls_charset_name, nls_lower, nls_upper, ora_hash, remainder, replace, show, show_parameter, to_timestamp, to_yminterval, tz_offset, nullif, and ratio_to_report. - -### Package - -Functions commonly used by users are stored in packages. To call certain functions, you can only specify a package to use them. - -Package is usually used in stored procedure. The following introduces dbms_random, dbms_output, dbms_lock, dbms_application_info, dbms_metadata, dbms_job, and dbms_utility. - -#### DBMS_RANDOM - -DBMS_RANDOM provides a built-in random data generator, including the following built-in interfaces: - -- DBMS_RANDOM.INITIALIZE(val IN BINARY_INTEGER): initializes a package. - - ```sql - select DBMS_RANDOM.INITIALIZE(101); - ``` - -- DBMS_RANDOM.NORMAL: returns random data in standard normal distribution. - - ```sql - select DBMS_RANDOM.NORMAL(); - ``` - -- DBMS_RANDOM.RANDOM: generates and returns a random integer ranging from -2^31 to 2^31. - - ```sql - select DBMS_RANDOM.RANDOM();s - ``` - -- DBMS_RANDOM.SEED: generates the seed of random data. - - ``` - select DBMS_RANDOM.SEED(1); - ``` - -- DBMS_RANDOM.STRING(opt IN CHAR,len IN NUMBER)RETURN VARCHAR2: generates and returns a random string. - - ```sql - select DBMS_RANDOM.STRING('A', 10); - select DBMS_RANDOM.STRING('x', 10); - ``` - - This function generates a random string in a specified mode. The optional modes are as follows: - - 'u' or 'U': returns only uppercase letters. - 'l' or 'L': returns only lowercase letters. - 'a' or 'A': returns a mixed string with uppercase and lowercase letters. - 'x' or 'X': returns a mixed string of uppercase letters and digits. - 'p' or 'P': returns any string that can be displayed. - -- DBMS_RANDOM.TERMINATE: This function has been discarded. It is supported but you are not advised to use it. It will be called after a package is finished. - -- DBMS_RANDOM.VALUE RETURN NUMBER: returns random data greater than or equal to 0 and less than 1. There are 15 places to the right of the decimal point. - -#### DBMS_OUTPUT - -DBMS_OUTPUT allows you to send information from a stored procedure, package, and trigger. DBMS_OUTPUT includes the following built-in interfaces: - -> **Note**: -> -> - DBMS_OUTPUT applies to only stored procedures and does not apply to gsql. -> -> - set serveroutput on is not supported. -> -> - set serveroutput off is not supported. - -- DBMS_OUTPUT.ENABLE(buffer_size IN INTEGER DEFAULT 20000): The greatest value of **buff_size** is 1000000 and the smallest value is 2000. Before using DBMS_OUTPUT, DBMS_OUTPUT.ENABLE must be executed. - -- DBMS_OUTPUT.GET_LINE(line INOUT text, status INOUT INTEGER): retrieves row arrays from the cache area. - -- DBMS_OUTPUT.GET_LINES(lines INOUT text[], numlines INOUT INTEGER): retrieves row arrays from the cache area. - - **Note**: After executing DBMS_OUTPUT.GET_LINE and DBMS_OUTPUT.GET_LINES, clear the cache area. - -- DBMS_OUTPUT.NEW_LINE: puts a space. -- DBMS_OUTPUT.PUT(item IN VARCHAR2): puts some rows in the cache area. -- DBMS_OUTPUT.PUT_LINE(item IN VARCHAR2): puts one row in the cache area. - -- DBMS_OUTPUT.disable (): clears the requested space. - -- DBMS_OUTPUT.data type: DBMS_OUTPUT package includes a built-in CHARARR data type. You can call DBMS_OUTPUT.CHARARR to use the data type. DBMS_OUTPUT package does not support the DBMSOUTPUT_LINESARRAY data type. - -#### DBMS_LOCK - -DBMS_LOCK provides an interface for the Oracle lock management service. - -dbms_lock.sleep:pg_sleep() - -#### DBMS_APPLICATION_INFO - -DBMS_APPLICATION_INFO is used for recording the name of a module or transaction being executed in a database so that the module performance can be traced and the module can be used in debugging. - -DBMS_APPLICATION_INFO includes the following built-in interfaces: - -- DBMS_APPLICATION_INFO.READ_CLIENT_INFO (client_info OUT VARCHAR2): reads the value of client_infor of the current session. - -- DBMS_APPLICATION_INFO.READ_MODULE (module_name OUT VARCHAR2, action_name OUT VARCHAR2): reads the module and the value of the related operation field of the current session. -- DBMS_APPLICATION_INFO.SET_CLIENT_INFO (client_info IN VARCHAR2): provides additional information related to client applications. -- DBMS_APPLICATION_INFO.SET_MODULE ( module_name IN VARCHAR2, action_name IN VARCHAR2): sets the name of the current application or module. - -- DBMS_APPLICATION_INFO.SET_ACTION (action_name IN VARCHAR2): sets the name of the current action under the current module. - -#### DBMS_METADATA - -DBMS_METADATA provides a method of creating and submitting a XML file including metadata retrieved from the database dictionary or creating DDL to recreate an object. - -dbms_metadata.get_ddl() - -#### DBMS_JOB - -DBMS_JOB calls and manages jobs in a job queue. - -DBMS_JOB includes the following built-in interfaces: - -- DBMS_JOB.BROKEN( job int8, broken bool, next_date timestamp default sysdate): sets the break marker. Broken indicates whether to terminate a job. To terminate a job, set its status (job_status) to d. -- DBMS_JOB.CHANGE( job int8, what text, next_date timestamp, "interval" text, instance int4 default null, force bool default false): modifies any fields that can be set in a job. - - At least one variable of what, next_date, and interval of CHANGE is not empty. If the parameter is not empty, it will not be modified. - - instance: is useless in dbms_job. - - force: is useless in dbms_job. - -- DBMS_JOB.INSTANCE( job int8, instance int4, force bool default false)(): does not perform any operation. - -- DBMS_JOB.INTERVAL( job int8, "interval" text): modifies the running frequency of a job. - -- DBMS_JOB.NEXT_DATE( job int8, next_date timestamp): modifies the running time of a job next time. - - **Note**: next_date is usually set to a time later than the current system time. If it is set to the current system time, the task will be executed at once and next_date will be set to the current system time plus interval. - -- DBMS_JOB.REMOVE(job int8): deletes a job from a job queue. -- DBMS_JOB.RUN( job int8, force boolean default false): runs a job. The force parameter is useless. - -- DBMS_JOB.SUBMIT( job out int8, what in text, next_date in timestamp default sysdate, "interval" in text default null, no_parse in bool default false, instance in int4 default null, force in bool default false): submits a new job. Parameters no_parse, instance, and force are not used. To use them, a prompt will be displayed. -- DBMS_JOB.USER_EXPORT(job IN int8, mycall OUT text): generates a call text to recreate a specified job. -- DBMS_JOB.WHAT( job int8, what text): modifies the job function and updates its environment. If what exists, an error will be reported. - -#### DBMS_UTILITY - -DBMS_UTILITY processes and calculates data types. It includes the following built-in interfaces: - -- DBMS_UTILITY.CANONICALIZE( name IN text, canon_name OUT text, canon_len IN int4): standardizes a given string. It processes a single reserved word or keyword, such as table, and clear the space of a single marker so that table will be changed to TABLE. - -- DBMS_UTILITY.COMMA_TO_TABLE( list IN text, tablen OUT int4, tab OUT text[]): replaces the list of names separated with commas with the PL/SQL table of the names. - -- DBMS_UTILITY.TABLE_TO_COMMA(tab IN text[], tablen OUT int4, val OUT text): converts the PL/SQL table of the names to the list of the names separated with commas. -- DBMS_UTILITY.DB_VERSION(INOUT version text, INOUT compatibility text): returns the database version. -- DBMS_UTILITY.EXEC_DDL_STATEMENT(IN parse_string text): executes the DDL statements in parse_string. - -The DBMS_UTILITY package supports the INSTANCE_RECORD, DBLINK_ARRAY, INDEX_TABLE_TYPE, INSTANCE_TABLE, LNAME_ARRAY, NAME_ARRAY, NUMBER_ARRAY, and UNCL_ARRAY data types. - -## Example - -### Function - -- INSTRB - - ```sql - MogDB=# select INSTRB('123456123', '123', 4); - instrb - -------- - 7 - (1 row) - ``` - -- NLS_CHARSET_ID - - ```sql - MogDB=# SELECT NLS_CHARSET_ID('gbk'); - nls_charset_id - ---------------- - 6 - (1 row) - ``` - -- dbms_random provides built-in random-number generator. - - ```sql - MogDB=# select DBMS_RANDOM.VALUE(1, '100'); - value - ------------------ - 92.4730090592057 - ``` - -### package - -The following uses dbms_random as an example. - -```sql -MogDB=# select DBMS_RANDOM.VALUE(); - value -------------------- - 0.482205999083817 -(1 row) -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/1-logical-decoding.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/1-logical-decoding.md deleted file mode 100644 index 388d2c74..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/1-logical-decoding.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Overview -summary: Overview -author: Zhang Cuiping -date: 2021-05-10 ---- - -# Overview - -## Function - -The data replication capabilities supported by MogDB are as follows: - -Data is periodically synchronized to heterogeneous databases (such as Oracle databases) using a data migration tool. Real-time data replication is not supported. Therefore, the requirements for real-time data synchronization to heterogeneous databases are not satisfied. - -MogDB provides the logical decoding function to generate logical logs by decoding Xlogs. A target database parses logical logs to replicate data in real time. For details, see Figure 1. Logical replication reduces the restrictions on target databases, allowing for data synchronization between heterogeneous databases and homogeneous databases with different forms. It allows data to be read and written during data synchronization on a target database, reducing the data synchronization latency. - -**Figure 1** Logical replication - -![image-20210512181021060](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/logical-decoding-2.png) - -Logical replication consists of logical decoding and data replication. Logical decoding outputs logical logs by transaction. The database service or middleware parses the logical logs to implement data replication. Currently, MogDB supports only logical decoding. Therefore, this section involves only logical decoding. - -Logical decoding provides basic transaction decoding capabilities for logical replication. MogDB uses SQL functions for logical decoding. This method features easy function calling, requires no tools to obtain logical logs, and provides specific interfaces for interconnecting with external replay tools, saving the need of additional adaptation. - -Logical logs are output only after transactions are committed because they use transactions as the unit and logical decoding is driven by users. Therefore, to prevent Xlogs from being reclaimed by the system when transactions start and prevent required transaction information from being reclaimed by **VACUUM**, MogDB uses logical replication slots to block Xlog recycling. - -A logical replication slot means a stream of changes that can be replayed in other databases in the order they were generated in the original database. Each owner of logical logs maintains one logical replication slot. - -## Precautions - -- DDL statement decoding is not supported. When a specific DDL statement (for example, to truncate an ordinary table or exchange a partitioned table) is executed, decoded data may be lost. -- Decoding for column-store data and data page replication is not supported. -- Logical decoding is not supported on the cascaded standby node. -- After a DDL statement (for example, **ALTER TABLE**) is executed, the physical logs that are not decoded before the DDL statement execution may be lost. -- The size of a single tuple cannot exceed 1 GB, and decoded data may be larger than inserted data. Therefore, it is recommended that the size of a single tuple be less than or equal to 500 MB. -- openGauss supports the following data types for decoding: **INTEGER**, **BIGINT**, **SMALLINT**, **TINYINT**, **SERIAL**, **SMALLSERIAL**, **BIGSERIAL**, **FLOAT**, **DOUBLE PRECISION**, **DATE**, **TIME[WITHOUT TIME ZONE]**, **TIMESTAMP[WITHOUT TIME ZONE]**, **CHAR(***n***)**, **VARCHAR(***n***)**, and **TEXT**. -- If SSL connections are required, make sure that the GUC parameter **ssl** is set to **on**. -- The logical replication slot name must contain fewer than 64 characters and contain only one or more types of the following characters: lowercase letters, digits, and underscores (_). -- Currently, logical replication does not support the MOT feature. -- After the database where a logical replication slot resides is deleted, the replication slot becomes unavailable and needs to be manually deleted. -- Only the UTF-8 character set is supported. -- To decode multiple databases, you need to create a stream replication slot in each database and start decoding. Logs need to be scanned for decoding of each database. -- Forcible startup is not supported. After forcible startup, you need to export all data again. -- During decoding on the standby node, the decoded data may increase during switchover and failover, which needs to be manually filtered out. When the quorum protocol is used, switchover and failover should be performed on the standby node that is to be promoted to primary, and logs must be synchronized from the primary node to the standby node. -- The same replication slot for decoding cannot be used between the primary node and standby node or between different standby nodes at the same time. Otherwise, data inconsistency occurs. -- Replication slots can only be created or deleted on hosts. -- After the database is restarted due to a fault or the logical replication process is restarted, duplicate decoded data exists. You need to filter out the duplicate data. -- If the computer kernel is faulty, garbled characters are displayed during decoding which need to be manually or automatically filtered out. -- Currently, the logical decoding on the standby node does not support enabling the ultimate RTO. -- Ensure that the long transaction is not started during the creation of the logical replication slot. If the long transaction is started, the creation of the logical replication slot will be blocked. -- Interval partitioned tables cannot be replicated. -- Global temporary tables are not supported. -- After a DDL statement is executed in a transaction, the DDL statement and subsequent statements are not decoded. -- To perform decoding on the standby node, set the GUC parameter **enable_slot_log** to **on** on the corresponding host. -- Do not perform operations on the replication slot on other nodes when the logical replication slot is in use. To delete a replication slot, stop decoding in the replication slot first. - -## Performance - -When pg_logical_slot_get_changes is used in the BenchmarkSQL 5.0 with 100 warehouses: - -- If 4000 lines of data (about 5 MB to 10 MB logs) are decoded at a time, the decoding performance ranges from 0.3 MB/s to 0.5 MB/s. -- If 32000 lines of data (about 40 MB to 80 MB logs) are decoded at a time, the decoding performance ranges from 3 MB/s to 5 MB/s. -- If 256000 lines of data (about 320 MB to 640 MB logs) are decoded at a time, the decoding performance ranges from 3 MB/s to 5 MB/s. -- If the amount of data to be decoded at a time still increases, the decoding performance is not significantly improved. - -Compared with the decoding performance in pg\_logical\_slot\_get\_changes mode, the decoding performance in pg\_logical\_slot\_peek\_changes + pg\_replication\_slot\_advance mode decreases by 30% to 50%. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/2-logical-decoding-by-sql-function-interfaces.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/2-logical-decoding-by-sql-function-interfaces.md deleted file mode 100644 index 5c71cb48..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/2-logical-decoding-by-sql-function-interfaces.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Logical Decoding by SQL Function Interfaces -summary: Logical Decoding by SQL Function Interfaces -author: Zhang Cuiping -date: 2021-05-10 ---- - -# Logical Decoding by SQL Function Interfaces - -In MogDB, you can call SQL functions to create, delete, and push logical replication slots, as well as obtain decoded transaction logs. - -## Prerequisites - -- Currently, logical logs are extracted from host nodes. Since SSL connections are disabled by default, to perform logical replication, set the GUC parameter [ssl](../../../reference-guide/guc-parameters/connection-and-authentication/security-and-authentication.md#ssl) to **on** on host nodes. - - > **NOTE:** For security purposes, ensure that SSL connections are enabled. - -- The GUC parameter [wal_level](../../../reference-guide/guc-parameters/write-ahead-log/settings.md#wal_level) is set to **logical**. - -- The GUC parameter [max_replication_slots](../../../reference-guide/guc-parameters/ha-replication/sending-server.md#max_replication_slots) is set to a value greater than the number of physical replication slots and logical replication slots required by each node. - - Physical replication slots provide an automatic method to ensure that Xlogs are not removed from a primary node before they are received by all the standby nodes and secondary nodes. That is, physical replication slots are used to support HA clusters. The number of physical replication slots required by a cluster is equal to the ratio of standby and secondary nodes to the primary node. For example, if an HA cluster has 1 primary node, 1 standby node, and 1 secondary node, the number of required physical replication slots will be 2. If an HA cluster has 1 primary node and 3 standby nodes, the number of required physical replication slots will be 3. - - Plan the number of logical replication slots as follows: - - - A logical replication slot can carry changes of only one database for decoding. If multiple databases are involved, create multiple logical replication slots. - - If logical replication is needed by multiple target databases, create multiple logical replication slots in the source database. Each logical replication slot corresponds to one logical replication link. - -- Only initial users and users with the **REPLICATION** permission can perform this operation. When separation of duties is disabled, database administrators can perform logical replication operations. When separation of duties is enabled, database administrators are not allowed to perform logical replication operations. - -- Currently, primary/standby/secondary deployment is not supported by default. - -## Procedure - -1. Log in to the primary node of the MogDB cluster as the cluster installation user. - -2. Run the following command to connect to the default database **postgres**: - - ```bash - gsql -d postgres -p 16000 -r - ``` - - In this command, **16000** is the database port number. It can be replaced by an actual port number. - -3. Create a logical replication slot named **slot1**. - - ``` - MogDB=# SELECT * FROM pg_create_logical_replication_slot('slot1', 'mppdb_decoding'); - slotname | xlog_position - ----------+--------------- - slot1 | 0/601C150 - (1 row) - ``` - -4. Create a table **t** in the database and insert data into it. - - ``` - MogDB=# CREATE TABLE t(a int PRIMARY KEY, b int); - MogDB=# INSERT INTO t VALUES(3,3); - ``` - -5. Read the decoding result of **slot1**. The number of decoded records is 4096. - - ``` - MogDB=# SELECT * FROM pg_logical_slot_peek_changes('slot1', NULL, 4096); - location | xid | data - -----------+-------+------------------------------------------------------------------------------------------------------------------------------------------------- - ------------------------------------------- - 0/601C188 | 1010023 | BEGIN 1010023 - 0/601ED60 | 1010023 | COMMIT 1010023 CSN 1010022 - 0/601ED60 | 1010024 | BEGIN 1010024 - 0/601ED60 | 1010024 | {"table_name":"public.t","op_type":"INSERT","columns_name":["a","b"],"columns_type":["integer","integer"],"columns_val":["3","3"],"old_keys_name":[],"old_keys_type":[],"old_keys_val":[]} - 0/601EED8 | 1010024 | COMMIT 1010024 CSN 1010023 - (5 rows) - ``` - -6. Delete the logical replication slot **slot1**. - - ``` - MogDB=# SELECT * FROM pg_drop_replication_slot('slot1'); - pg_drop_replication_slot - -------------------------- - - (1 row) - ``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/logical-decoding.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/logical-decoding.md deleted file mode 100644 index 888e1702..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-decoding/logical-decoding.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Logical Decoding -summary: Logical Decoding -author: Guo Huan -date: 2023-05-19 ---- - -# Logical Decoding - -+ **[Overview](1-logical-decoding.md)** -+ **[Logical Decoding by SQL Function Interfaces](2-logical-decoding-by-sql-function-interfaces.md)** diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-replication.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-replication.md deleted file mode 100644 index 76684ddf..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/logical-replication.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Logical Replication -summary: Logical Replication -author: Guo Huan -date: 2023-05-19 ---- - -# Logical Replication - -- **[Logical Decoding](logical-decoding/logical-decoding.md)** -- **[Publication-Subscription](publication-subscription/publication-subscription.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/architecture.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/architecture.md deleted file mode 100644 index 7aeb8ed1..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/architecture.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Architecture -summary: Architecture -author: Guo Huan -date: 2022-04-29 ---- - -# Architecture - -Changes on publishers are sent to subscribers in real time as they occur. The subscriber applies data in the order in which it is committed on the publisher to ensure transactional consistency of publications in any single subscription. - -Logical replication is built with an architecture similar to physical streaming replication. It is implemented by the walsender and apply processes. The walsender process starts logical decoding of the WAL and loads the standard logical decoding plug-in (pgoutput). The plug-in transforms the changes read from the WAL into a logical replication protocol and filters the data according to the publication specifications. The data is then continuously transferred to the apply worker using the streaming replication protocol, and the apply worker maps the data to the local table and applies the changes they receive in the correct transactional order. - -The apply process in the subscriber database always runs with **session_replication_role** set to **replica**, which produces the usual effects on triggers and constraints. - -The logical replication apply process currently fires only row triggers, not statement triggers. However, the initial table synchronization is implemented through methods similar to **COPY** command execution, and therefore, row and statement triggers for INSERT are fired. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/configuration-settings.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/configuration-settings.md deleted file mode 100644 index f9073bab..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/configuration-settings.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Configuration Settings -summary: Configuration Settings -author: Guo Huan -date: 2022-04-29 ---- - -# Configuration Settings - -Publication-subscription requires some configuration options to be set. - -On the publisher side, **wal_level** must be set to **logical**, and the value of **max_replication_slots** must be at least the number of subscriptions expected to be connected plus the number of connections reserved for table synchronization. Value of **max_wal_senders** ≥ Value of **max_replication_slots** + Number of physical replication slots that are connected at the same time + 1 - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** If a subscriber is activated and subscribes to a publication, a temporary connection to the publisher needs to be established to check whether the publication subscribed to by the subscriber exists on the publisher. The publisher creates a temporary WAL sender. After the temporary connection is used up, it is disconnected and released immediately. - -**max_replication_slots** must also be set on the subscriber. It must be set to at least the number of subscriptions that will be added to the subscriber. **max_logical_replication_workers** must be set to at least the number of subscriptions plus the number of connections reserved for table synchronization. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/conflicts.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/conflicts.md deleted file mode 100644 index d06c4d6b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/conflicts.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Conflicts -summary: Conflicts -author: Guo Huan -date: 2022-04-29 ---- - -# Conflicts - -Logical replication behaves similarly to common DML operations. Even if the data is modified locally on the subscriber node, logical replication updates the data based on the received changes. If the incoming data violates any constraints, the replication will stop. This situation is called a conflict. When UPDATE or DELETE operations are replicated, the missing data will not cause conflicts and such operations will be simply skipped. - -A conflict will cause errors and stop the replication, which must be resolved manually by the user. Details about the conflict can be found in the subscriber's server log. - -The conflict can be resolved either by changing the data on the subscriber (so that the data does not conflict with incoming data) or by skipping the transaction that conflicts with the existing data. The transaction can be skipped by calling the **pg_replication_origin_advance()** function with **node_name** corresponding to the subscription name, and an Xlog LSN. The current position of the replication source can be seen in the **pg_replication_origin_status system** view. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/monitoring.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/monitoring.md deleted file mode 100644 index 701e70b4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/monitoring.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Monitoring -summary: Monitoring -author: Guo Huan -date: 2022-04-29 ---- - -# Monitoring - -Because logical replication is based on an architecture similar to physical streaming replication, monitoring on a publication node is similar to monitoring on a primary physical replication node. - -Monitoring information about subscriptions is available in the **pg_stat_subscription** view. This view contains one row for every subscription worker. A subscription can have zero or more active subscription workers depending on its state. - -Normally, a single apply process runs for an enabled subscription. A disabled or crashed subscription does not have rows in this view. If data synchronization of any table is in progress, there will be additional workers for the tables being synchronized. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/publication-subscription.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/publication-subscription.md deleted file mode 100644 index 1243d6a8..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/publication-subscription.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Publication-Subscription -summary: Publication-Subscription -author: Guo Huan -date: 2022-04-29 ---- - -# Publication-Subscription - -+ **[Publications](publications.md)** -+ **[Subscriptions](subscriptions.md)** -+ **[Conflicts](conflicts.md)** -+ **[Restrictions](restrictions.md)** -+ **[Architecture](architecture.md)** -+ **[Monitoring](monitoring.md)** -+ **[Security](security.md)** -+ **[Configuration Settings](configuration-settings.md)** -+ **[Quick Setup](quick-setup.md)** - -Publication-subscription is implemented based on logical replication, with one or more subscribers subscribing to one or more publications on a publisher node. The subscriber pulls data from the publication they subscribe to. - -Changes on the publisher are sent to the subscriber as they occur in real time. The subscriber applies the data in the same order as the publisher, so that transactional consistency is guaranteed for publications within a single subscription. This method of data replication is sometimes called transactional replication. - -The typical usage of publication-subscription is as follows: - -- Sending incremental changes in a database or a subset of a database to subscribers as they occur -- Firing triggers when changes reach subscribers -- Consolidating multiple databases into a single one (for example, for analysis purposes) - -The subscriber database behaves in the same way as any other MogDB instance and can be used as a publisher for other databases by defining its own publications. When the subscriber is treated as read-only by an application, there will be no conflicts in a single subscription. On the other side, conflicts may occur if other write operations are performed by the application or by other subscribers in the same set of tables. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/publications.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/publications.md deleted file mode 100644 index 62524bc7..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/publications.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Publications -summary: Publications -author: Guo Huan -date: 2022-04-29 ---- - -# Publications - -A publication can be defined on any primary physical replication server. The node where a publication is defined is called the publisher. A publication is a set of changes generated from a table or a group of tables. It can also be described as a change set or replication set. Each publication exists in only one database. - -Publications are different from schemas and do not affect how tables are accessed. Each table can be added to multiple publications if needed. Currently, publications can contain only tables. Objects must be explicitly added to a publication, unless the publication is created by running **ALL TABLES**. - -Publications can choose to limit the changes they produce to any combination of **INSERT**, **UPDATE**, and **DELETE**, which is similar to how triggers are fired by particular events. By default, all types of operations are replicated. - -A published table must be configured with a “replication identifier” in order to be able to replicate UPDATE and DELETE operations, so that appropriate rows to be updated or deleted can be identified on the subscriber side. By default, the replication identifier is the primary key (if any). You can also set another unique index (with certain additional requirements) to be the replication identifier. If the table does not have any suitable key, you can set the replication identifier to “full”, which indicates that the entire row becomes the key. However, this is very inefficient and should be used only when there are no other solutions. If a replication identifier other than “full” is set on the publisher side, a replication identifier comprising the same or fewer columns must also be set on the subscriber side. If a table without a replication identifier is added to a publication that replicates UPDATE or DELETE operations, subsequent UPDATE or DELETE operations on the subscriber side will cause an error. INSERT operations can proceed regardless of any replication identifier. - -Each publication can have multiple subscribers. - -A publication is created by running the **CREATE PUBLICATION** command and can be altered or dropped by running the corresponding commands. - -Tables can be added or removed dynamically by running the **ALTER PUBLICATION** command. ADD TABLE and DROP TABLE operations are transactional. Therefore, once the transaction is committed, table replication will start or stop using a proper snapshot. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/quick-setup.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/quick-setup.md deleted file mode 100644 index 268590ec..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/quick-setup.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Quick Setup -summary: Quick Setup -author: Guo Huan -date: 2022-04-29 ---- - -# Quick Setup - -Set the following configuration items in the **postgresql.conf** file: - -```bash -wal_level = logical -``` - -For a basic setup, retain the default values for the other necessary configuration items. - -You need to adjust the **pg_hba.conf** file to allow replication (the value depends on the actual network configuration and the user used for connection). - -```bash -host all repuser 0.0.0.0/0 sha256 -``` - -In the publisher database: - -```sql -CREATE PUBLICATION mypub FOR TABLE users, departments; -``` - -In the subscriber database: - -```sql -CREATE SUBSCRIPTION mysub CONNECTION 'dbname=foo host=bar user=repuser' PUBLICATION mypub; -``` - -The above statements start the replication process, replicating incremental changes to those tables. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/restrictions.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/restrictions.md deleted file mode 100644 index 4b59dd0a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/restrictions.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Restrictions -summary: Restrictions -author: Guo Huan -date: 2022-04-29 ---- - -# Restrictions - -Publication-subscription is implemented based on logical replication and inherits all restrictions of logical replication. In addition, publication-subscription has the following additional restrictions or missing functions. - -- Database schemas and DDL commands are not replicated. Initial schemas can be manually copied by using **gs_dump --schema-only**. Subsequent schema changes need to be manually synchronized. -- Sequence data is not replicated. The data in serial or identifier columns backed by the sequence in the background will be replicated as part of the table, but the sequence itself will still display the start value on the subscriber. If the subscriber is used as a read-only database, this is usually not a problem. However, if some kind of switchover or failover to the subscriber database is intended, the sequence needs to be updated to the latest value, either by copying the current data from the publisher (perhaps using **gs_dump**) or by determining a sufficiently large value from the tables themselves. -- Only tables, including partitioned tables, can be replicated. Attempts to replicate other types of relations, such as views, materialized views, or foreign tables, will result in errors. -- Multiple subscriptions in the same database cannot subscribe to the same publication (that is, the same published table). Otherwise, duplicate data or primary key conflicts may occur. -- If a published table contains data types that do not support B-tree or hash indexes (such as the geography types), the table must have a primary key so that UPDATE and DELETE operations can be successfully replicated to the subscription side. Otherwise, the replication will fail, and the message “FATAL: could not identify an equality operator for type xx” will be displayed on the subscription side. -- When gs_probackup is used to back up the publisher, the original publication-subscription relationship cannot be established because no logical replication slot exists after the backup and restoration. gs_probackup does not support the backup of the logical replication slot. You are advised to use gs_basebackup to back up the publisher. -- Currently, gs_probackup can be used to back up logical replication slots for publication and subscription. Therefore, you can use gs_probackup or gs_basebackup to back up the publisher. When data is restored not to the latest time point, the value of `remote_lsn` recorded by the replication source at the subscriber may be greater than the value of current WAL insertion point at the publisher. Therefore, transactions committed during this period cannot be decoded and replicated, and only transactions committed after `remote_lsn` are decoded. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/security.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/security.md deleted file mode 100644 index 675aea85..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/security.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Security -summary: Security -author: Guo Huan -date: 2022-04-29 ---- - -# Security - -The role used for the replication connection must have the **REPLICATION** attribute (or be a user with the **SYSADMIN** permission). If the role lacks **SUPERUSER** and **BYPASSRLS**, the publisher's row security policies can be executed. The access permission of the role must be configured in the **pg_hba.conf** file and the role must have the **LOGIN** attribute. - -To create a publication, the user must have the **CREATE** permission on the database. - -To add tables to a publication, the user must have ownership of the table. To create a publication that automatically publishes all tables, the user must be a user with **SYSADMIN** permission. - -To create a subscription, the user must be a user with the **SYSADMIN** permission. - -The subscription apply process will run in the local database with the privileges of a user with the **SYSADMIN** permission. - -Privileges are only checked once at the start of the replication connection. They are not re-checked when each change record is read from the publisher and when each change is applied. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/subscriptions.md b/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/subscriptions.md deleted file mode 100644 index 299c0d8f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/logical-replication/publication-subscription/subscriptions.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Subscriptions -summary: Subscriptions -author: Guo Huan -date: 2022-04-29 ---- - -# Subscriptions - -A subscription is the downstream side of logical replication. The node where a subscription is defined is called the subscriber. A subscription defines the connection to another database and the set of publications (one or more) that it wants to subscribe to. - -The subscriber database behaves in the same way as any other MogDB instance and can be used as a publisher for other databases by defining its own publications. - -A subscriber node can have multiple subscriptions if needed. You can define multiple subscriptions between a pair of publishers and subscribers, in which case you need to ensure that the published objects do not overlap. - -Each subscription will receive changes through a replication slot. Currently, initial data in pre-existing tables cannot be synchronized. - -If the current user is a user with the **SYSADMIN** permission, subscriptions are dumped by **gs_dump**. Otherwise, the subscriptions are skipped and a warning is written because users without the **SYSADMIN** permission cannot read all subscription information from the **pg_subscription** directory. - -You can use **CREATE SUBSCRIPTION** to add a subscription, **ALTER SUBSCRIPTION** to alter a subscription, and **DROP SUBSCRIPTION** to drop a subscription. - -When a subscription is dropped and recreated, the synchronized information is lost. This means that the data must be resynchronized. - -The schema definitions are not replicated, and the published tables must exist on the subscriber. Only regular tables can be replicated. For example, a view cannot be replicated. - -The tables are matched between the publisher and the subscriber using fully qualified table names. Replication to differently-named tables on the subscriber is not supported. - -Columns of a table are also matched by name. The order of columns in the subscribed table does not need to be the same as that in the published table. The data types of the columns do not need to be the same, as long as the text representation of the data can be converted to the target type. For example, you can replicate from a column of the integer type to a column of the bigint type. The target table can also have additional columns that do not exist in the published table. The additional columns will be filled with the default values specified in the definition of the target table. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/1-materialized-view-overview.md b/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/1-materialized-view-overview.md deleted file mode 100644 index 7327f695..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/1-materialized-view-overview.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Materialized View Overview -summary: Materialized View Overview -author: Guo Huan -date: 2021-05-21 ---- - -# Materialized View Overview - -A materialized view is a special physical table, which is relative to a common view. A common view is a virtual table and has many application limitations. Any query on a view is actually converted into a query on an SQL statement, and performance is not actually improved. The materialized view actually stores the results of the statements executed by the SQL statement, and is used to cache the results. - -Currently, the Ustore engine does not support creating and using materialized views. - -- **[Full Materialized Views](2-full-materialized-view/full-materialized-view.md)** -- **[Incremental Materialized Views](3-incremental-materialized-view/incremental-materialized-view.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/1-full-materialized-view-overview.md b/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/1-full-materialized-view-overview.md deleted file mode 100644 index 721d69a4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/1-full-materialized-view-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Liuxu -date: 2021-05-21 ---- - -# Overview - -Full materialized views can be fully refreshed only. The syntax for creating a full materialized view is similar to the CREATE TABLE AS syntax. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/2-full-materialized-view-usage.md b/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/2-full-materialized-view-usage.md deleted file mode 100644 index b4a0ce51..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/2-full-materialized-view-usage.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Usage -summary: Usage -author: Guo Huan -date: 2021-05-21 ---- - -# Usage - -## Syntax - -- Create a full materialized view. - - ``` - CREATE MATERIALIZED VIEW [ view_name ] AS { query_block }; - ``` - -- Fullly refresh a materialized view. - - ``` - REFRESH MATERIALIZED VIEW [ view_name ]; - ``` - -- Delete a materialized view. - - ``` - DROP MATERIALIZED VIEW [ view_name ]; - ``` - -- Query a materialized view. - - ``` - SELECT * FROM [ view_name ]; - ``` - -## Examples - -``` --- Prepare data. -MogDB=# CREATE TABLE t1(c1 int, c2 int); -MogDB=# INSERT INTO t1 VALUES(1, 1); -MogDB=# INSERT INTO t1 VALUES(2, 2); - --- Create a full materialized view. -MogDB=# CREATE MATERIALIZED VIEW mv AS select count(*) from t1; - --- Query the materialized view result. -MogDB=# SELECT * FROM mv; - count -------- - 2 -(1 row) - --- Insert data into the base table in the materialized view. -MogDB=# INSERT INTO t1 VALUES(3, 3); - --- Fully refresh a full materialized view. -MogDB=# REFRESH MATERIALIZED VIEW mv; - --- Query the materialized view result. -MogDB=# SELECT * FROM mv; - count -------- - 3 -(1 row) - --- Delete a materialized view. -MogDB=# DROP MATERIALIZED VIEW mv; -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/3-full-materialized-view-support-and-constraints.md b/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/3-full-materialized-view-support-and-constraints.md deleted file mode 100644 index 2a672cc1..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/3-full-materialized-view-support-and-constraints.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Support and Constraints -summary: Support and Constraints -author: Liuxu -date: 2021-05-21 ---- - -# Support and Constraints - -## Supported Scenarios - -- Supports the same query scope as the CREATE TABLE AS statement does. -- Supports index creation in full materialized views. -- Supports ANALYZE and EXPLAIN. - -## Unsupported Scenarios - -Materialized views cannot be added, deleted, or modified. They support only query statements. - -## Constraints - -A high-level lock is added to the base table during the process of refreshing or deleting a full materialized view. If a materialized view involves multiple tables, pay attention to the service logic to avoid deadlocks. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/full-materialized-view.md b/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/full-materialized-view.md deleted file mode 100644 index 9fcc3f00..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/2-full-materialized-view/full-materialized-view.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Full Materialized Views -summary: Full Materialized Views -author: Guo Huan -date: 2023-05-19 ---- - -# Full Materialized Views - -+ **[Overview](1-full-materialized-view-overview.md)** -+ **[Usage](2-full-materialized-view-usage.md)** -+ **[Support and Constraints](3-full-materialized-view-support-and-constraints.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/1-incremental-materialized-view-overview.md b/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/1-incremental-materialized-view-overview.md deleted file mode 100644 index 8da9c3df..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/1-incremental-materialized-view-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Overview -summary: Overview -author: Guo Huan -date: 2021-05-21 ---- - -# Overview - -Incremental materialized views can be incrementally refreshed. You need to manually execute statements to incrementally refresh materialized views in a period of time. The difference between the incremental and the full materialized views is that the incremental materialized view supports only a small number of scenarios. Currently, only base table scanning statements or UNION ALL can be used to create materialized views. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/2-incremental-materialized-view-usage.md b/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/2-incremental-materialized-view-usage.md deleted file mode 100644 index eae768d4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/2-incremental-materialized-view-usage.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Usage -summary: Usage -author: Guo Huan -date: 2021-05-21 ---- - -# Usage - -## Syntax - -- Create a incremental materialized view. - - ``` - CREATE INCREMENTAL MATERIALIZED VIEW [ view_name ] AS { query_block }; - ``` - -- Fully refresh a materialized view. - - ``` - REFRESH MATERIALIZED VIEW [ view_name ]; - ``` - -- Incrementally refresh a materialized view. - - ``` - REFRESH INCREMENTAL MATERIALIZED VIEW [ view_name ]; - ``` - -- Delete a materialized view. - - ``` - DROP MATERIALIZED VIEW [ view_name ]; - ``` - -- Query a materialized view. - - ``` - SELECT * FROM [ view_name ]; - ``` - -## Examples - -``` --- Prepare data. -MogDB=# CREATE TABLE t1(c1 int, c2 int); -MogDB=# INSERT INTO t1 VALUES(1, 1); -MogDB=# INSERT INTO t1 VALUES(2, 2); - --- Create an incremental materialized view. -MogDB=# CREATE INCREMENTAL MATERIALIZED VIEW mv AS SELECT * FROM t1; -CREATE MATERIALIZED VIEW - --- Insert data. -MogDB=# INSERT INTO t1 VALUES(3, 3); -INSERT 0 1 - --- Incrementally refresh a materialized view. -MogDB=# REFRESH INCREMENTAL MATERIALIZED VIEW mv; -REFRESH MATERIALIZED VIEW - --- Query the materialized view result. -MogDB=# SELECT * FROM mv; - c1 | c2 -----+---- - 1 | 1 - 2 | 2 - 3 | 3 -(3 rows) - --- Insert data. -MogDB=# INSERT INTO t1 VALUES(4, 4); -INSERT 0 1 - --- Fullly refresh a materialized view. -MogDB=# REFRESH MATERIALIZED VIEW mv; -REFRESH MATERIALIZED VIEW - --- Query the materialized view result. -MogDB=# select * from mv; - c1 | c2 -----+---- - 1 | 1 - 2 | 2 - 3 | 3 - 4 | 4 -(4 rows) - --- Delete a materialized view. -MogDB=# DROP MATERIALIZED VIEW mv; -DROP MATERIALIZED VIEW -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/3-incremental-materialized-view-support-and-constraints.md b/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/3-incremental-materialized-view-support-and-constraints.md deleted file mode 100644 index f50aa595..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/3-incremental-materialized-view-support-and-constraints.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Support and Constraints -summary: Support and Constraints -author: Guo Huan -date: 2021-05-21 ---- - -# Support and Constraints - -## Supported Scenarios - -- Supports statements for querying a single table. -- Supports UNION ALL for querying multiple single tables. -- Supports index creation in materialized views. -- Supports the Analyze operation in materialized views. - -## Unsupported Scenarios - -- Multi-table join plans and subquery plans are not supported in materialized views. -- Except for a few ALTER operations, most DDL operations cannot be performed on base tables in materialized views. -- Materialized views cannot be added, deleted, or modified. They support only query statements. -- The temporary table, hashbucket, unlog, or partitioned table cannot be used to create materialized views. -- Materialized views cannot be created in nested mode (that is, a materialized view cannot be created in another materialized view). -- The column-store tables are not supported. Only row-store tables are supported. -- Materialized views of the UNLOGGED type are not supported, and the WITH syntax is not supported. - -## Constraints - -If the materialized view definition is UNION ALL, each subquery needs to use a different base table. - -A high-level lock is added to the base table during the creation, full refresh, and deletion of an incremental materialized view. If the materialized view is defined as a UNION ALL, pay attention to the service logic to avoid deadlocks. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/incremental-materialized-view.md b/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/incremental-materialized-view.md deleted file mode 100644 index 0acf52ef..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/materialized-view/3-incremental-materialized-view/incremental-materialized-view.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Incremental Materialized Views -summary: Incremental Materialized Views -author: Guo Huan -date: 2023-05-19 ---- - -# Incremental Materialized Views - -+ **[Overview](1-incremental-materialized-view-overview.md)** -+ **[Usage](2-incremental-materialized-view-usage.md)** -+ **[Support and Constraints](3-incremental-materialized-view-support-and-constraints.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/assessment-tool.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/assessment-tool.md deleted file mode 100644 index 31f9748d..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/assessment-tool.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: MySQL Syntax Compatibility Assessment Tool -summary: MySQL Syntax Compatibility Assessment Tool -author: zhang cuiping -date: 2022-10-24 ---- - -# MySQL Syntax Compatibility Assessment Tool - -This tool can use the existing MogDB node to assess the compatibility of data SQL text in MogDB. The restrictions include but are not limited to the following: - -- Only SQL text files are supported, and SQL statements are separated by semicolons (;). -- If compatible plug-ins such as Dolphin and Whale are not used, the error information about incompatible statements may be inaccurate. If the corresponding plug-in is used, comply with the plug-in usage restrictions. -- The comment tag (#) is not supported currently. Replace the comment tag (#) in the text with `--` or delete it. -- Stored procedures and function statements support only the validity check of the creation body and the syntax compatibility check of the function body. -- Accuracy of assessment results: - - Fully compatible: MogDB fully supports this syntax. The compatibility result may depend on the pre-processing result of the input SQL statement. Therefore, the statements may not be fully compatible when being executed in MogDB. - - Syntax compatible: MogDB supports this syntax. However, in actual use, problems may occur, for example, the column type is not supported or the function does not exist. - - Statement incompatible: MogDB does not support this syntax. - - Assessment not supported: Statements are not considered. Statement assessment (for example, cross-database impact statements such as CREATE DATABASE) will be supported in the future. - - Ignored statements: such as comments. - -For an A-compatible database, you are advised to perform the following settings in advance when exporting SQL statements: - -```sql -EXECUTE DBMS_METADATA.SET_TRANSFORM_PARAM(DBMS_METADATA.SESSION_TRANSFORM,'SEGMENT_ATTRIBUTES',false); -EXECUTE DBMS_METADATA.SET_TRANSFORM_PARAM(DBMS_METADATA.SESSION_TRANSFORM,'SQLTERMINATOR',true); -EXECUTE DBMS_METADATA.SET_TRANSFORM_PARAM(DBMS_METADATA.SESSION_TRANSFORM,'STORAGE',false); -EXECUTE DBMS_METADATA.SET_TRANSFORM_PARAM(DBMS_METADATA.SESSION_TRANSFORM,'TABLESPACE',false); -``` - -## Compiling Plug-ins - -- Mandatory Plug-ins - -| Mandatory Plug-in | Description | -| :----------------- | :----------------------------------------------------------- | -| contrib/assessment | Assessment plug-in, including the plug-in SO and executable files | - -- The following plug-in can be used to improve the overall compatibility when the database is running: - -| Optional Plug-in (Compatibility Plug-in) | Description | -| :--------------------------------------- | :---------------------------- | -| contrib/dolphin | B-compatible database plug-in | - -1. Download the MogDB source code and compile the MogDB source code according to the READMD.md file. -2. Copy the preceding plug-ins to the `contrib` directory in the MogDB source code path. Run the `cd` command to go to the corresponding directory and run the `make install -sj` command. -3. Copy the files required by the plug-in to the corresponding binary path. Generally, the files are `extesion.so`, `extension.sql`, and `extension.control`. The `assessment` plug-in contains the executable file `assessment_database`. In this example, the following files are involved: If the binary in step 1 is used, skip this step. - -**Dependency files of assessment** - -``` -Binary path -├── bin -│ └── ***assessment_database*** -├── lib -│ └── postgresql -│ └── ***assessment.so*** -└── share - └── postgresql - └── extension - ├── ***assessment--1.0.sql*** - └── ***assessment.control*** -``` - -**Dependency files of Dolphin** - -``` -Binary path -├── lib -│ └── postgresql -│ └── ***dolphin.so*** -└── share - └── postgresql - └── extension - ├── ***dolphin--1.0.sql*** - └── ***dolphin.control*** -``` - -### Running - -1. Ensure that a database is running and can be connected using the gsql command. - -2. Run the `assessment_database [args]` command, where **args** contains the following parameters: - - | Parameter | Description | Usage | | - | :----------------------- | :---------- | :----------------------------------------------------------- | ---------------- | - | Connection parameters | p | (Mandatory) Port | `-p 5432` | - | | d | (Optional) Database | `-d evaluation` | - | | U | (Optional) User name. If local connection is supported, leave this parameter blank. | `-U user` | - | | W | (Optional) Password. If local connection is supported, leave this parameter blank. | `-W ******` | - | Compatibility assessment | c | Specifies the compatibility type (A\B\C\PG). If the **d** parameter is specified, this parameter cannot be set. | `-c B` | - | File parameters | f | (Mandatory) Assesses the SQL file. | `-f intput.sql` | - | | o | (Mandatory) Output file. Generally, an HTML file is entered. | `-o result.html` | - -## Examples - -**case 1:** - -Use gs_initdb to initialize the database and start it. Assume that the startup port is 5432. In this case, you can run the `gsql -dpostgres -p5432` command to connect to the database. Assume that the input file is `test.sql`, the output report path is `result.html`, and the source database to be assessed is B. The command used for evaluation is as follows: - -```shell -assessment_database -p5432 -cB -ftest.sql -oresult.html -``` - -The following information is displayed: - -```shell -assessment_database: create database "assessment_197561" automatically. -assessment_database: Create plugin[dolphin] automatically. -assessment_database: Create extension[assessment] automatically. -assessment_database: parse[100.00%]:35/35 -assessment_database: Create database assessment_197561 automatically, clear it manually! -``` - -**case 2:** - -Assume that a database node already exists remotely. You can connect to the database through **gsql -dpostgres -p5432 -h127.0.0.2 -Utest -W*** on the compatibility assessment node. Assume that the input file is **test.sql**, the output report path is **result.html**, and the source database to be assessed is B. The command used for assessment is as follows: - -```shell -assessment_database -p5432 -cB -h127.0.0.2 -Utest -W***** -ftest.sql -oresult.html -``` - -**case 3:** - -Assume that a remote database node exists and the **evaluation** database has been created for compatibility assessment. On the compatibility assessment node, you can connect to the database through **gsql -devalution -p5432 -h127.0.0.2 -Utest -W***. Assume that the input file is **test.sql** and the output report path is **result.html**. The assessment command is as follows: - -```shell -assessment_database -p5432 -devaluation -h127.0.0.2 -Utest -W***** -ftest.sql -oresult.html -``` - -That is, replace **-cB** in case 2 with **-devaluation** to specify the database. - -### Results - -The assessment tool generates an assessment report in HTML format. The information includes the statement, compatibility type, and failure cause. The compatibility types include syntax compatible, fully compatible, syntax incompatible, and assessment not supported. The details are as follows: - -- Fully compatible: MogDB fully supports this syntax. The execution result depends on the existing tables, functions, and stored procedures in the database. -- Syntax compatible: MogDB supports this syntax. However, in actual use, problems may occur, for example, the column type is not supported or the function does not exist. -- Statement incompatible: MogDB does not support this syntax. -- Assessment not supported: Statements are not considered. Statement assessment (for example, cross-database impact statements such as CREATE DATABASE) will be supported in the future. - -### Principle - -1. A database node is running properly and can be initialized using gs_initdb. -2. The connection parameters are configured. The connection parameters are the same as those of the gsql connection mode of MogDB. -3. If the **-c** compatibility type is specified, the tool uses the preceding connection parameters to connect to the database. You need to manually create the corresponding compatibility assessment database, and then run the CREATE EXTENSION command to create necessary plug-ins (such as `assessment` and `dolphin`). -4. If **-d database** is specified, the tool creates a plug-in in the corresponding database. -5. The assessment is performed in the corresponding assessment database. The assessment types include syntax tree compatibility assessment and statement compatibility assessment. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-extension.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-extension.md deleted file mode 100644 index b89f3b5a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-extension.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Dolphin Extension -summary: Dolphin Extension -author: Guo Huan -date: 2023-05-22 ---- - -# Dolphin Extension - -+ **[Dolphin Overview](dolphin-overview.md)** -+ **[Dolphin Installation](dolphin-installation.md)** -+ **[Dolphin Restrictions](dolphin-restrictions.md)** -+ **[Dolphin Syntax](dolphin-syntax/dolphin-syntax.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-installation.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-installation.md deleted file mode 100644 index c29d051c..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-installation.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Dolphin Installation -summary: Dolphin Installation -author: zhang cuiping -date: 2022-10-24 ---- - -# Dolphin Installation - -Dolphin is automatically installed and loaded. You do not need to manually install and load the plug-in. You can use Dolphin by creating a B-compatible database (CREATE DATABASE xxxx DBCOMPATIBILITY 'B';) and connecting to the database with the initial user. - -If you need to install Dolphin manually, please refer to [PTK Installing Extensions](https://docs.mogdb.io/en/ptk/v1.1/usage-install-plugin). \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-overview.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-overview.md deleted file mode 100644 index 7424fb43..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-overview.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Dolphin Overview -summary: Dolphin Overview -author: zhang cuiping -date: 2022-10-24 ---- - -# Dolphin Overview - -MogDB provides Dolphin extensions (version: Dolphin-1.0.0). Dolphin extensions of MogDB are compatible with MySQL databases (dbcompatibility='B') in terms of keywords, data types, constants and macros, functions and operators, expressions, type conversion, DDL/DML/DCL syntax, stored procedures/user-defined functions, and system views. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-restrictions.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-restrictions.md deleted file mode 100644 index 78d10faf..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-restrictions.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Dolphin Restrictions -summary: Dolphin Restrictions -author: zhang cuiping -date: 2022-10-24 ---- - -# Dolphin Restrictions - -- The Dolphin plug-in cannot be deleted. -- The Dolphin plug-in can be created only in the B-compatible database. -- The Dolphin plug-in needs to create data types and functions in schemas such as pg_catalog. Therefore, loading the Dolphin plug-in requires initial user permissions. MogDB automatically loads the Dolphin plug-in when the initial user or a user with the initial user permissions connects to B-compatible database for the first time. If a B-compatible database has never been connected by an initial user or a user with initial user permissions, it will not load the Dolphin plug-in. -- All added or modified syntaxes in Dolphin cannot be viewed by running `\h` on the gsql client, and cannot be automatically supplemented on the gsql client. -- The creation of the Dolphin plug-in deletes the functions and types with the same name required by the plug-in that exists in the database and the objects that previously depend on the plug-in. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/dolphin-reset-parameters.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/dolphin-reset-parameters.md deleted file mode 100644 index 5e50ae90..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/dolphin-reset-parameters.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: dolphin Resetting Parameters -summary: dolphin Resetting Parameters -author: Guo Huan -date: 2023-05-15 ---- - -# Resetting Parameters - -## Background - -MogDB provides multiple methods to set GUC parameters for databases, users, or sessions. - -- Parameter names are case-insensitive. -- A parameter value can be an integer, floating point number, string, Boolean value, or enumerated value. - - The Boolean values can be **on**/**off**, **true**/**false**, **yes**/**no**, or **1**/**0**, and are case-insensitive. - - The enumerated value range is specified in the **enumvals** column of the **pg\_settings** system catalog. - -- For parameters using units, specify their units during the setting, or default units are used. - - The default units are specified in the **unit** column of **pg\_settings**. - - The unit of memory can be KB, MB, or GB. - - The unit of time can be ms, s, min, h, or d. - -For details, see [dolphin GUC Parameters](guc-parameters.md)。 - -## Setting GUC Parameters - -MogDB provides six types of GUC parameters. For details about parameter types and their setting methods, see Table 1. - -**Table 1** GUC parameters - -| Parameter Type | Description | Setting Method | -| :------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| INTERNAL | Fixed parameter. It is set during database creation and cannot be modified. Users can only view the parameter by running the SHOW command or in the pg_settings view. | None | -| POSTMASTER | Database server parameter. It can be set in the configuration file when the database is started. | Method 2 in Table 2 | -| SIGHUP | Global database parameter. It can be set when the database is started or be modified later. | Method 2 in Table 2 | -| BACKEND | Session connection parameter. It is specified during session connection creation and cannot be modified after that. The parameter setting becomes invalid when the session is disconnected. This is an internal parameter and not recommended for users to set it. | Method 2 in Table 2
Note: After this parameter is set on the The parameter setting takes effect when the next session is created. | -| SUSET | Database administrator parameter. It can be set by common users when or after the database is started. It can also be set by database administrators using SQL statements. | Method 3 by a database administrator in Table 2 | -| USERSET | Common user parameter. It can be configured by any user at any time. | Method 1 in Table 2 | - -MogDB provides four methods to modify GUC parameters. For details, see Table 2. - -**Table 2** Methods for setting GUC parameters - -| No. | Setting Method | -| :------- | :----------------------------------------------------------- | -| Method 1 | Set parameters at database, user, or session levels.
- Set a database-level parameter.
`MogDB=# ALTER DATABASE dbname SET paraname TO value;`
The setting takes effect in the next session.
- Set a user-level parameter.
`MogDB=# ALTER USER username SET paraname TO value;`
The setting takes effect in the next session.
- Set a session-level parameter.
`MogDB=# SET paraname TO value;`
Parameter value in the current session is changed. After you exit the session, the setting becomes invalid.
Note: Session-level parameters set by SET have the highest priority, followed by parameters set by ALTER. Parameter values set by ALTER DATABASE have a higher priority than those set using ALTER USER. Priorities of the first three methods are all higher than those of gs_guc. | -| Method 2 | Use ALTER SYSTEM SET to modify database parameters.
- Set a POSTMASERT-level parameter.
`MogDB=# ALTER SYSTEM SET paraname TO value;`
The setting takes effect after the system is restarted.
- Set a SIGHUP-level parameter.
`MogDB=# ALTER SYSTEM SET paraname TO value;`
The setting takes effect immediately. (Actually, there is a slight delay to wait for the thread reloading the parameter.)
- Set a BACKEND-level parameter.
`MogDB=# ALTER SYSTEM SET paraname TO value;`
The setting takes effect in the next session. | - -## Procedure - -The following example shows how to set **explain\_perf\_mode** using method 1. - -1. Log in as the OS user **omm** to the primary node of the database. - -2. Run the following command to connect to the database: - - ``` - gsql -d postgres -p 8000 - ``` - - **postgres** is the name of the database to be connected, and **8000** is the port number of the database primary node. - - If information similar to the following is displayed, the connection is successful: - - ```sql - gsql((MogDB x.x.x build f521c606) compiled at 2021-09-16 14:55:22 commit 2935 last mr 6385 release) - Non-SSL connection (SSL connection is recommended when requiring high-security) - Type "help" for help. - - MogDB=# - ``` - -3. Check the **explain\_perf\_mode** parameter. - - ```sql - MogDB=# SHOW explain_perf_mode; - explain_perf_mode - ------------------- - normal - (1 row) - ``` - -4. Set the **explain\_perf\_mode** parameter. - - Perform one of the following operations: - - - Set a database-level parameter. - - ```sql - MogDB=# ALTER DATABASE postgres SET explain_perf_mode TO pretty; - ``` - - If the following information is displayed, the setting succeeds: - - ```sql - ALTER DATABASE - ``` - - The setting takes effect in the next session. - - - Set a user-level parameter. - - ```sql - MogDB=# ALTER USER omm SET explain_perf_mode TO pretty; - ``` - - If the following information is displayed, the setting succeeds: - - ```sql - ALTER ROLE - ``` - - The setting takes effect in the next session. - - - Set a session-level parameter. - - ```sql - MogDB=# SET explain_perf_mode TO pretty; - ``` - - If the following information is displayed, the setting succeeds: - - ```sql - SET - ``` - -5. Check whether the parameter is correctly configured. - - ```sql - MogDB=# SHOW explain_perf_mode; - explain_perf_mode - -------------- - pretty - (1 row) - ``` - -## Examples - -```sql --- Create a table named test1. -MogDB=# CREATE TABLE test1 ( a1 smallint not null, a2 int not null, a3 bigint not null, a4 float not null, a5 double not null, a6 numeric not null, a7 varchar(5) not null ); - --- Failed to insert records into the table. -MogDB=# insert into test1(a1,a2) values(123412342342314,3453453453434324); - --- Failed to query the table. -MogDB=# select a1,a2 from test1 group by a1; - --- A record is successfully inserted into the table. -MogDB=# set dolphin.sql_mode = ''; -MogDB=# insert into test1(a1,a2) values(123412342342314,3453453453434324); - --- The table is queried successfully. -MogDB=# select a1,a2 from test1 group by a1; - --- Drop a table. -MogDB=# DROP TABLE test1; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/dolphin-syntax.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/dolphin-syntax.md deleted file mode 100644 index 740b5d82..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/dolphin-syntax.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: dolphin Dolphin Syntax -summary: dolphin Dolphin Syntax -author: Guo Huan -date: 2023-05-22 ---- - -# Dolphin Syntax - -- **[SQL Reference](sql-reference/dolphin-sql-reference.md)** -- **[System Views](system-views/dolphin-system-views.md)** -- **[GUC Parameters](guc-parameters.md)** -- **[Resetting Parameters](dolphin-reset-parameters.md)** -- **[Stored Procedures](stored-procedures/dolphin-stored-procedures.md)** -- **[Identifiers](identifiers/dolphin-identifiers.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/guc-parameters.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/guc-parameters.md deleted file mode 100644 index ace48f4a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/guc-parameters.md +++ /dev/null @@ -1,266 +0,0 @@ ---- -title: dolphin GUC Parameters -summary: dolphin GUC Parameters -author: zhang cuiping -date: 2022-10-24 ---- - -# GUC Parameters - -## sql_mode - -**Parameter description:** The parameter value is a character string separated by commas (,). Only valid character strings are allowed. If the parameter value is invalid, a warning is reported after the startup. Similarly, if the new value is invalid, a warning is reported and the old value is not changed. The default string of the current sql_mode is sql_mode_strict,sql_mode_full_group. Currently, sql_mode is used in the following scenarios: - -1. sql_mode_strict: When a value that does not comply with the current column type is inserted, data conversion is performed. There are two scenarios: **insert into table values (…)** and **insert into table select …**. Conversion between various data types is involved. Currently, the following types are involved: tinyint (tinyint is not considered because its data scope is different from that of MySQL), smallint, int, bigint, float, double, numeric, clob, char, and varchar. -2. sql_mode_strict: If the length of the inserted column value exceeds the length limit of the column, the maximum or minimum value is assigned to the column. The involved types are tinyint, smallint, int, bigint, float, double, numeric, clob, char, and varchar. -3. sql_mode_strict: During insert, if a column whose attribute is not empty and does not have a default value is not in the insert list, the default value is added to the column. (The involved types are the same as the preceding types.) -4. sql_mode_strict: supports explicit insertion of default to columns whose attributes are not empty and do not have default values. (The involved types are the same as the preceding types.) -5. sql_mode_full_group: determines whether columns (without aggregate functions) in the SELECT list must be included in the GROUP BY clause. In sql_mode_full_group mode (default mode), if a column in the select list does not use an aggregate function or appear in the GROUP BY clause, an error is reported. Otherwise, the execution is successful and the first tuple is selected from all tuples that meet the conditions. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in [Appendices](../../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: a string - -**Default value:** 'sql_mode_strict,sql_mode_full_group' - -**Example**: - -```sql ---Create a table named test1. -MogDB=# CREATE TABLE test1 -( - a1 smallint not null, - a2 int not null, - a3 bigint not null, - a4 float not null, - a5 double not null, - a6 numeric not null, - a7 varchar(5) not null -); - ---Failed to insert records into the table. -MogDB=# insert into test1(a1,a2) values(123412342342314,3453453453434324); ---Failed to query the table. -MogDB=# select a1,a2 from test1 group by a1; - ---A record is successfully inserted into the table. -MogDB=# set sql_mode = ''; -MogDB=# insert into test1(a1,a2) values(123412342342314,3453453453434324); ---A table is queried successfully. -MogDB=# select a1,a2 from test1 group by a1; - ---Deleting a Table -MogDB=# DROP TABLE test1; -``` - -## b_db_timestamp - -**Parameter description:** The parameter value is a floating point number. This parameter affects the curdate, current_time, curtime, current_timestamp, localtime, localtimestamp, and now functions in Dolphin. If this parameter is set to **0**, the preceding functions return the current date or time. If the parameter value is within the range [1,2147483647], the preceding functions use the value of this parameter as the second offset and return the date or time corresponding to 1970-01-01 00:00:00 UTC + Second offset + Current time zone offset. If the value of this parameter is not in the preceding valid range, an error is reported. - -This parameter is a USERSET parameter. Set it based on instructions provided in [Appendices](../../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** [1.0, 2147483647.0] - -**Default value**: **0** - -**Example** - -```sql -MogDB=# show b_db_timestamp; - b_db_timestamp ----------------- - 0 -(1 row) - -MogDB=# select now(); - now() ---------------------- - 2022-09-18 19:52:23 -(1 row) - -MogDB=# set b_db_timestamp = 1.0; -SET -MogDB=# select now(); - now() ---------------------- - 1970-01-01 08:00:01 -(1 row) -``` - -## default_week_format - -**Parameter description:** The parameter value is an integer. This parameter affects the week function in the Dolphin plug-in. The value range of this parameter is [0,7], which corresponds to eight calculation policies. For details about these policies, see [Time and Date Functions](../../../../reference-guide/functions-and-operators/date-and-time-processing-functions-and-operators.md). If the value of this GUC parameter exceeds the corresponding boundary value, a warning is reported and the GUC parameter is set to the corresponding boundary value. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in [Appendices](../../../../reference-guide/guc-parameters/appendix.md). - -**Value range:** [0, 7] - -**Default value**: **0** - -**Example** - -```sql -MogDB=# show default_week_format; -default_week_format ---------------------- -0 -(1 row) - -MogDB=# select week('2000-1-1'); -week ------- - 0 -(1 row) - -MogDB=# alter system set default_week_format = 2; -ALTER SYSTEM SET - -MogDB=# select week('2000-1-1'); -week ------- -52 -(1 row) -``` - -## lc_time_names - -**Parameter description:** Specifies the language in which the dayname and monthname functions of the dolphin plug-in output results. The parameter value is a character string. There are 111 values for this parameter. If the value of a parameter is not within the valid value range, an error is reported. - -This parameter is a SIGHUP parameter. Set it based on instructions provided in [Appendices](../../../../reference-guide/guc-parameters/appendix.md). - -**Value range**: The options of lc_time_names are as follows: - -| Value | Language Set | -| ------ | ------------------------------------------------------------ | -|ar_AE |Arabic - United Arab Emirates| -|ar_BH |Arabic - Bahrain| -|ar_DZ |Arabic - Algeria| -|ar_EG |Arabic - Egypt| -|ar_IN |Arabic - India| -|ar_IQ |Arabic - Iraq| -|ar_JO |Arabic - Jordan| -|ar_KW |Arabic - Kuwait| -|ar_LB |Arabic - Lebanon| -|ar_LY |Arabic - Libya| -|ar_MA |Arabic - Morocco| -|ar_OM |Arabic - Oman| -|ar_QA |Arabic - Qatar| -|ar_SA |Arabic - Saudi Arabia| -|ar_SD |Arabic - Sudan| -|ar_SY |Arabic - Syria| -|ar_TN |Arabic - Tunisia| -|ar_YE |Arabic - Yemen| -|be_BY |Belarusian - Belarus| -|bg_BG |Bulgarian - Bulgaria| -|ca_ES |Catalan - Spain| -|cs_CZ |Czech - Czech Republic| -|da_DK |Danish - Denmark| -|de_AT |German - Austria| -|de_BE |German - Belgium| -|de_CH |German - Switzerland| -|de_DE |German - Germany| -|de_LU |German - Luxembourg| -|el_GR |Greek - Greece| -|en_AU |English - Australia| -|en_CA |English - Canada| -|en_GB |English - United Kingdom| -|en_IN |English - India| -|en_NZ |English - New Zealand| -|en_PH |English - Philippines| -|en_US |English - United States| -|en_ZA |English - South Africa| -|en_ZW |English - Zimbabwe| -|es_AR |Spanish - Argentina| -|es_BO |Spanish - Bolivia| -|es_CL |Spanish - Chile| -|es_CO |Spanish - Colombia| -|es_CR |Spanish - Costa Rica| -|es_DO |Spanish - Dominican Republic| -|es_EC |Spanish - Ecuador| -|es_ES |Spanish - Spain| -|es_GT |Spanish - Guatemala| -|es_HN |Spanish - Honduras| -|es_MX |Spanish - Mexico| -|es_NI |Spanish - Nicaragua| -|es_PA |Spanish - Panama| -|es_PE |Spanish - Peru| -|es_PR |Spanish - Puerto Rico| -|es_PY |Spanish - Paraguay| -|es_SV |Spanish - El Salvador| -|es_US |Spanish - United States| -|es_UY |Spanish - Uruguay| -|es_VE |Spanish - Venezuela| -|et_EE |Estonian - Estonia| -|eu_ES |Basque - Spain| -|fi_FI |Finnish - Finland| -|fo_FO |Faroese - Faroe Islands| -|fr_BE |French - Belgium| -|fr_CA |French - Canada| -|fr_CH |French - Switzerland| -|fr_FR |French - France| -|fr_LU |French - Luxembourg| -|gl_ES |Galician - Spain| -|gu_IN |Gujarati - India| -|he_IL |Hebrew - Israel| -|hi_IN |Hindi - India| -|hr_HR |Croatian - Croatia| -|hu_HU |Hungarian - Hungary| -|id_ID |Indonesian - Indonesia| -|is_IS |Icelandic - Iceland| -|it_CH |Italian - Switzerland| -|it_IT |Italian - Italy| -|ja_JP |Japanese - Japan| -|ko_KR |Korean - Republic of Korea| -|lt_LT |Lithuanian - Lithuania| -|lv_LV |Latvian - Latvia| -|mk_MK |Macedonian - North Macedonia| -|mn_MN |Mongolia - Mongolian| -|ms_MY |Malay - Malaysia| -|nb_NO |Norwegian(Bokmål) - Norway| -|nl_BE |Dutch - Belgium| -|nl_NL |Dutch - The Netherlands| -|no_NO |Norwegian - Norway| -|pl_PL |Polish - Poland| -|pt_BR |Portugese - Brazil| -|pt_PT |Portugese - Portugal| -|rm_CH |Romansh - Switzerland| -|ro_RO |Romanian - Romania| -|ru_RU |Russian - Russia| -|ru_UA |Russian - Ukraine| -|sk_SK |Slovak - Slovakia| -|sl_SI |Slovenian - Slovenia| -|sq_AL |Albanian - Albania| -|sr_RS |Serbian - Serbia| -|sv_FI |Swedish - Finland| -|sv_SE |Swedish - Sweden| -|ta_IN |Tamil - India| -|te_IN |Telugu - India| -|th_TH |Thai - Thailand| -|tr_TR |Turkish - Turkey| -|uk_UA |Ukrainian - Ukraine| -|ur_PK |Urdu - Pakistan| -|vi_VN |Vietnamese - Vietnam| -|zh_CN |Chinese - China| -|zh_HK |Chinese - Hong Kong| -|zh_TW |Chinese - Taiwan| - -**Default value:** **'en_US'** - -**Example** - -```sql -MogDB=# select dayname('2000-1-1'); -dayname ----------- -Saturday -(1 row) - -MogDB=# alter system set lc_time_names = 'zh_CN'; -ALTER SYSTEM SET - -MogDB=# select dayname('2000-1-1'); -dayname ---------- -Saturday -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/identifiers/dolphin-column-name-identifiers.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/identifiers/dolphin-column-name-identifiers.md deleted file mode 100644 index 3a06ca1e..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/identifiers/dolphin-column-name-identifiers.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: dolphin Column Name Identifiers -summary: dolphin Column Name Identifiers -author: Guo Huan -date: 2023-05-15 ---- - -# Column Name Identifiers - -## Precautions - -Compared with the original MogDB, Dolphin modifies the column name identifiers as follows: - -- Column names and aliases are sensitive to storage and display. Whether to use double quotation marks to enclose column names is not considered. -- Column names and aliases are insensitive to comparison. That is, column names **'aAa'** and **'AAa'** identify the same column. - -Example: - -```sql -MogDB=# create database col_name dbcompatibility 'B'; -CREATE DATABASE - -MogDB=# \c col_name - -col_name=# create table t1(aAa int); -CREATE TABLE - -col_name=# insert into t1 values(1); -INSERT 0 1 - -col_name=# select * from t1; - aAa ------ - 1 -(1 row) - -col_name=# select "AAa" from t1; - AAa ------ - 1 -(1 row) - -col_name=# select aaa AS AaA from t1; - AaA ------ - 1 -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/identifiers/dolphin-identifiers.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/identifiers/dolphin-identifiers.md deleted file mode 100644 index b2adafd1..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/identifiers/dolphin-identifiers.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: dolphin Identifiers -summary: dolphin Identifiers -author: Guo Huan -date: 2023-05-22 ---- - -# Identifiers - -+ **[Column Name Identifiers](dolphin-column-name-identifiers.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-binary-types.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-binary-types.md deleted file mode 100644 index c78d85e9..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-binary-types.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: dolphin Binary Types -summary: dolphin Binary Types -author: zhang cuiping -date: 2022-10-24 ---- - -# Binary Types - -Table 1 lists the binary data types supported by MogDB. - -Compared with the original MogDB, Dolphin modifies the binary types as follows: - -1. The BINARY, VARBINARY, TINYBLOB, MEDIUMBLOB, and LONGBLOB types are added. -2. The input function of the BLOB type is modified. When **dolphin.b_compatibility_mode** is set to **on**, the input function is compatible with the common character string input of the MySQL database. The corresponding character string can be output only when **bytea_output** is set to **escape**; otherwise, the value will be converted into a hexadecimal character string for output. -3. For the TINYBLOB, MEDIUMBLOB, and LONGBLOB types, if **dolphin.b_compatibility_mode** is set to **off**, the input function is still compatible with the common character string input of the MySQL database. The corresponding character string can be output only when **bytea_output** is set to **escape**; otherwise, the character string will be converted into a hexadecimal character string for output. -4. The input function of the BINARY type is modified to support the identification of escape characters in the MySQL database. -5. The BIANRY EXPR is added. The BINARY keyword before any expression indicates that the expression is converted to the binary type. - -**Table 1** Binary data types - -| Name | Description | Storage Space | -| :------------------------------ | :----------------------------------------------------------- | :----------------------------------------------------------- | -| BLOB | Binary large object (BLOB).
NOTE:
Column store does not support the BLOB type.TheThe input function of the BLOB type is compatible with the MySQL database's function of receiving common character strings only when **dolphin.b\_compatibility\_mode** is set to **on**. | The maximum size is 1GB-8203 bytes (that is, 1073733621 bytes). | -| BLOB | Binary large object (BLOB).
NOTE:
Column store does not support the TINYBLOB type.TheThe input function of the TINYBLOB type is still compatible with the MySQL database's function of receiving common character strings even if **dolphin.b\_compatibility\_mode** is set to **off**. | The maximum size is 255 bytes. | -| MEDIUMBLOB | Binary large object (BLOB).
NOTE:
Column store does not support the MEDIUMBLOB type.TheThe input function of the MEDIUMBLOB type is still compatible with the MySQL database's function of receiving common character strings even if **dolphin.b\_compatibility\_mode** is set to **off**. | The maximum size is 16 MB – 1 byte. | -| LONGBLOB | Binary large object (BLOB).
NOTE:
Column store does not support the LONGBLOB type.TheThe input function of the LONGBLOB type is still compatible with the MySQL database's function of receiving common character strings even if **dolphin.b\_compatibility\_mode** is set to **off**. | The maximum size is 4 GB – 1 byte. | -| RAW | Variable-length hexadecimal string.
NOTE:
Column store does not support the raw type. | 4 bytes plus the actual hexadecimal string. Its maximum length is 1 GB – 8203 bytes (that is, 1073733621 bytes). | -| BYTEA | Variable-length binary string. | 4 bytes plus the actual binary string. Its maximum length is 1 GB – 8203 bytes (that is, 1073733621 bytes). | -| BINARY | Fixed-length binary string. | 4 bytes plus the actual binary string (255 bytes). The maximum length is 259 bytes. | -| VARBINARY | Variable-length binary string. | 4 bytes plus the actual binary string (65535 bytes). The maximum length is 65539 bytes. | -| BYTEAWITHOUTORDERWITHEQUALCOL | Variable-length binary character string (new type for the encryption feature. If the encryption type of the encrypted column is specified as deterministic encryption, the column type is BYTEAWITHOUTORDERWITHEQUALCOL). The original data type is displayed when the encrypted table is printed by running the meta command. | 4 bytes plus the actual binary string. The maximum value is 1 GB – 53 bytes (that is, 1073741771 bytes). | -| BYTEAWITHOUTORDERCOL | Variable-length binary character string (new type for the encryption feature. If the encryption type of the encrypted column is specified as random encryption, the column type is BYTEAWITHOUTORDERCOL). The original data type is displayed when the encrypted table is printed by running the meta command. | 4 bytes plus the actual binary string. The maximum value is 1 GB – 53 bytes (that is, 1073741771 bytes). | -| \_BYTEAWITHOUTORDERWITHEQUALCOL | Variable-length binary character string, which is a new type for the encryption feature. | 4 bytes plus the actual binary string. The maximum value is 1 GB – 53 bytes (that is, 1073741771 bytes). | -| \_BYTEAWITHOUTORDERCOL | Variable-length binary character string, which is a new type for the encryption feature. | 4 bytes plus the actual binary string. The maximum value is 1 GB – 53 bytes (that is, 1073741771 bytes). | - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - In addition to the size limit of each column, the total size of each tuple cannot exceed 1 GB – 8203 bytes (that is, 1073733621 bytes). -> - `BYTEAWITHOUTORDERWITHEQUALCOL`, `BYTEAWITHOUTORDERCOL`, `_BYTEAWITHOUTORDERWITHEQUALCOL`, and `_BYTEAWITHOUTORDERCOL` cannot be directly used to create a table. - -Example: - -```sql ---Create a table. -MogDB=# CREATE TABLE blob_type_t1 -( - BT_COL1 INTEGER, - BT_COL2 BLOB, - BT_COL3 RAW, - BT_COL4 BYTEA -) ; - ---Insert data. -MogDB=# INSERT INTO blob_type_t1 VALUES(10,empty_blob(), -HEXTORAW('DEADBEEF'),E'\\xDEADBEEF'); - ---Query data in the table. -MogDB=# SELECT * FROM blob_type_t1; - bt_col1 | bt_col2 | bt_col3 | bt_col4 ----------+---------+----------+------------ - 10 | | DEADBEEF | \xdeadbeef -(1 row) - ---Delete the table. -MogDB=# DROP TABLE blob_type_t1; - ---Use BINARY to convert data. -MogDB=# select 'a\t'::binary; - binary --------- - \x6109 -(1 row) - -MogDB=# select binary 'a\b'; - binary --------- - \x6108 -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-bit-string-types.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-bit-string-types.md deleted file mode 100644 index a6542247..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-bit-string-types.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: dolphin Bit String Types -summary: dolphin Bit String Types -author: zhang cuiping -date: 2022-10-24 ---- - -# Bit String Types - -Compared with the original MogDB, Dolphin modifies the bit string types as follows: - -1. The data of the bit type is of variable length up to the maximum length *n*. Longer strings will be rejected. The data of the **bit varying** type is of variable length up to the maximum length *n*. Longer strings will be rejected. -2. If one explicitly casts a bit-string value to **bit(n)**, it will be truncated or zero-padded on the left to be exactly *n* bits, without raising an error. - -```sql ---Create a table. -MogDB=# CREATE TABLE bit_type_t1 -( - BT_COL1 INTEGER, - BT_COL2 BIT(3), - BT_COL3 BIT VARYING(5) -) ; - ---Data is converted if it exceeds the length of this data type. -MogDB=# INSERT INTO bit_type_t1 VALUES(2, B'1000'::bit(3), B'101'); - ---View data. -MogDB=# SELECT * FROM bit_type_t1; - bt_col1 | bt_col2 | bt_col3 ----------+---------+--------- - 2 | 100 | 101 -(2 rows) - ---If the length of a character string is insufficient, the character string is converted to bit(n) and zeros are padded on the left. -MogDB=# SELECT B'10'::bit(4); - bit --------- - 000010 -(1 row) - ---Delete a table. -MogDB=# DROP TABLE bit_type_t1; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-bool-types.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-bool-types.md deleted file mode 100644 index b13bd724..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-bool-types.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: dolphin Boolean Type -summary: dolphin Boolean Type -author: zhang cuiping -date: 2022-10-24 ---- - -# Boolean Type - -Compared to the original MogDB, dolphin's modifications to boolean types are mainly: - -- Modify the output representation of boolean types from 't' and 'f' to '1' and '0'. This change only works with tools other than gs_dump, gs_dumpall, gsql, gs_probackup, gs_rewind, gs_clean, such as JDBC. - -For more information on the original MogDB boolean types, see [Boolean Data Types](../../../../../../reference-guide/supported-data-types/boolean-data-types.md). - -## Example - -```sql --- Boolean type returns are still 't' and 'f' in gsql. -MogDB=# SELECT true; - bool ------- - t -(1 row) - -MogDB=# SELECT false; - bool ------- - f -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-character-types.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-character-types.md deleted file mode 100644 index f13b8ed9..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-character-types.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: dolphin Character Types -summary: dolphin Character Types -author: zhang cuiping -date: 2022-10-24 ---- - -# Character Types - -Compared with the original MogDB, Dolphin modifies the character types as follows: - -1. The meaning of **n** of the CHARACTER/NCHAR type is modified. **n** indicates the character length instead of the byte length. -2. During comparison of all character data types, spaces at the end are ignored, for example, in the WHERE and JOIN scenarios. For example, **'a'::text = 'a'::text** is true. For the VARCHAR, VARCHAR2, NVARCHAR2, NVARCHAR, TEXT, and CLOB types, HASH JOIN and HASH AGG ignore spaces at the end only when **string_hash_compatible** is set to **on**. -3. The optional modifier (n) is added for TEXT. That is, the usage of TEXT(n) is supported. **n** is meaningless and does not affect any performance. -4. The TINYTEXT(n)/MEDIUMTEXT(n)/LONGTEXT(n) data type is added, which is the alias of TEXT. **n** is meaningless and does not affect any performance. - -**Table 1** Character types - -| Name | Description | Storage Space | -| :-------------------------------------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| CHAR(n)
CHARACTER(n)
NCHAR(n) | Character string with fixed length. Empty characters are filled in with blank spaces. **n** indicates the string length. If it is not specified, the default precision **1** is used. | The maximum size is 10 MB. | -| TEXT(n)
TINYTEXT(n)
MEDIUMTEXT(n)
LONGTEXT(n) | Character string with variable length. **n** has no actual meaning and does not affect any performance. | The maximum size is 1 GB - 1 byte. However, the size of the column description header and the size of the tuple (less than 1 GB - 1 byte) where the column is located must also be considered. Therefore, the maximum size of the TEXT type may be less than 1 GB - 1 byte. | - -Example: - -```sql -""--Create a table. -MogDB=# CREATE TABLE char_type_t1 -( - CT_COL1 CHARACTER(4), - CT_COL2 TEXT(10), - CT_COL3 TINYTEXT(11), - CT_COL4 MEDIUMTEXT(12), - CT_COL5 LONGTEXT(13) -); - ---View a table structure. -MogDB=# \d char_type_t1 - Table "public.char_type_t1" - Column | Type | Modifiers ----------+--------------+----------- - ct_col1 | character(4) | - ct_col2 | text | - ct_col3 | text | - ct_col4 | text | - ct_col5 | text | - ---Insert data. -MogDB=# INSERT INTO char_type_t1 VALUES ('Four characters'); -MogDB=# INSERT INTO char_type_t1 VALUES('e '); - ---View data. -MogDB=# SELECT CT_COL1,length(CT_COL1) FROM char_type_t1; - ct_col1 | length -----------+-------- - Four characters | 4 - e | 1 -(2 rows) - ---Filter data. -MogDB=# SELECT CT_COL1 FROM char_type_t1 WHERE CT_COL1 = 'e'; - ct_col1 ---------- - e -(1 row) - -MogDB=# SELECT CT_COL1 FROM char_type_t1 WHERE CT_COL1 = 'e '; - ct_col1 ---------- - e -(1 row) - ---Delete the table. -MogDB=# DROP TABLE char_type_t1; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-data-types.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-data-types.md deleted file mode 100644 index 71dbe616..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-data-types.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: dolphin Data Types -summary: dolphin Data Types -author: Guo Huan -date: 2023-05-19 ---- - -# Data Types - -+ **[Numeric Types](dolphin-numeric-types.md)** -+ **[Character Types](dolphin-character-types.md)** -+ **[Boolean Type](dolphin-bool-types.md)** -+ **[M*-Compatible Time Types](dolphin-date-time-types.md)** -+ **[Bit String Types](dolphin-bit-string-types.md)** -+ **[ENUM Type](dolphin-enumeration-types.md)** -+ **[Binary Types](dolphin-binary-types.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-date-time-types.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-date-time-types.md deleted file mode 100644 index 533f99be..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-date-time-types.md +++ /dev/null @@ -1,250 +0,0 @@ ---- -title: dolphin M*-Compatible Time Types -summary: dolphin M*-Compatible Time Types -author: zhang cuiping -date: 2022-10-24 ---- - -## M*-Compatible Time Types - -Compared with the original MogDB, Dolphin modifies the date/time types as follows: - -1. The performance of the date, time, datetime, and timestamp types are modified. -2. The year data type is added. - -> Note: Due to the inherent features of the MogDB, the MogDB cannot be fully compatible with all features of the M* time data type. Therefore, you need to use the features according to the requirements in this document. Do not use the features that are not described in this document. In addition, the compatible features cover the requirements in most scenarios. - -The following table lists the basic attributes after the time data type is compatible with the M* database. - -| Type | Description | Storage Space | Value Range (for Users) | Precision Range | Remarks | -| :----------- | :----------------------------------------------------------- | :------------ | :----------------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| date | Indicates a date. | 4 bytes | 4713 BC ~ 5874897 AD | - | (1) The input must be a valid date. The month or number of days cannot be 0. (2) If the year is greater than or equal to 10000, the value must be in the YYYY-MM-DD format. (3) If BC or AD is not specified for the input data, AD is used by default. | -| time(p) | Indicates the time or a period of time (hours, minutes, and seconds) of a day. **p** indicates the precision. | 8 bytes | -838:59:59[.frac] ~ 838:59:59[.frac] | **p** indicates the precision after the decimal point. The value ranges from 0 to 6. If this parameter is not specified, the default value 0 is used. | - | -| datetime(p) | Indicates date and time without time zone information. **p** indicates the precision. | 8 bytes | 0 AD ~ 294276 AD | **p** indicates the precision after the decimal point. The value ranges from 0 to 6. If this parameter is not specified, the default value 0 is used. | (1) The input must be a valid date. The month or number of days cannot be 0. (2) If the entered year is greater than or equal to 10000, the year must be in the YYYY-MM-DD format. | -| timestamp(p) | Date and time with time zone information. **p** indicates the precision. | 8 bytes | 0 AD ~ 294276 AD | **p** indicates the precision after the decimal point. The value ranges from 0 to 6. If this parameter is not specified, the default value 0 is used. | (1) The input must be a valid date. The month or number of days cannot be 0. (2) Note that the timestamp type in the original MogDB database indicates a timestamp without a time zone. After compatibility, the timestamp type is close to the M* database, indicating a timestamp with a time zone. Therefore, compatibility issues exist. (3) If the entered year is greater than or equal to 10000, the year must be in the YYYY-MM-DD format. | -| year(w) | Indicates a year. **w** indicates the display width. The value of **year(4)** or **year** is in the YYYY format, and the value of **year(2)** is in the YY format. | 2 bytes | 1901 ~ 2155 | - | - | - -Remarks - -- Note that, for M*, in the CREATE TABLE or ALTER TABLE statement, if the precision is not specified when the time type (such as timestamp, datetime, and time) column attribute is defined, the default value 0 is used. When the cast(expr as typename) syntax is used for type conversion, if no precision is specified for the target type, the default precision is 0. Therefore, if you want to retain the input precision of the data, you need to explicitly use typmod. -- In addition, **::** is used to convert the compatible time type. If no precision is specified for the target type, the default precision is 0. - -### Date Type Inputs - -The following formats are supported: - -| Format | Description | -| :------------------------ | :-------------------- | -| 'YYYY-MM-DD' , 'YY-MM-DD' | Year, month, and date | -| 'YYYYMMDD', 'YYMMDD' | Year, month, and date | -| YYYYMMDD, YYMMDD | Year, month, and date | - -Remarks: - -- The input must be a valid date. The month or date cannot be 0. -- The original value range of M* is within 10000. Therefore, if you want to enter a date greater than or equal to 10000, use the YYYY-MM-DD format, for example, '10100-12-12'. -- Year 0000 is allowed. In addition, in MogDB, year 0000 is considered a leap year. You can enter 0000-2-29 (M* not allowed). - -Examples (Note that MogDB is compatible with B database.) - -```sql ---Create a table. -MogDB=# CREATE TABLE test_date( -MogDB(# dt date); -CREATE TABLE - ---Insert data. -MogDB=# INSERT INTO test_date VALUES ('2020-12-21'); -INSERT 0 1 -MogDB=# INSERT INTO test_date VALUES ('141221'); -INSERT 0 1 -MogDB=# INSERT INTO test_date VALUES (20151022); -INSERT 0 1 - ---View data. -MogDB=# SELECT * FROM test_date; - dt ------------- - 2020-12-21 - 2014-12-21 - 2015-10-22 -(3 rows) -``` - -### Time Type Inputs - -The following formats are supported: - -| Format | Description | -| :----------------------- | :----------------------------------------------------------- | -| '[-][D] hh:mm:ss[.frac]' | Indicates the hour, minute, and second. The value can be a negative number. **D** indicates the number of days. The value ranges from 0 to 34. | -| '[-]hhmmss[.frac]' | Hour, minute, and second | -| [-]hhmmss[.frac] | Hour, minute, and second | - -Remarks: - -- For the format 'hh:mm:ss', the loose input formats 'hh:mm' and 'ss' are also supported. -- When the integer 0 is entered, the value is **00:00:00**, which is also a zero value of the time type. -- After the time type is compatible, the value range can be greater than 24 hours. Do not convert the time type to the timetz type. - -Examples (Note that MogDB is compatible with B database.) - -```sql ---Create a table. -MogDB=# CREATE TABLE test_time( -MogDB(# ti time(2)); -CREATE TABLE - ---Insert data. -MogDB=# INSERT INTO test_time VALUES ('2 9:12:24.1234'); -INSERT 0 1 -MogDB=# INSERT INTO test_time VALUES ('-34:56:59.1234'); -INSERT 0 1 -MogDB=# INSERT INTO test_time VALUES (561234); -INSERT 0 1 - ---View data. -MogDB=# SELECT * FROM test_time; - ti --------------- - 57:12:24.12 - -34:56:59.12 - 56:12:34 -(3 rows) -``` - -### Datetime Type Inputs - -The following formats are supported: - -| Format | Description | -| :------------------------------------------------------- | :---------- | -| 'YYYY-MM-DD hh:mm:ss[.frac]', 'YY-MM-DD hh:mm:ss[.frac]' | Timestamp | -| 'YYYYMMDDhhmmss', 'YYMMDDhhmmss' | Timestamp | -| YYYYMMDDhhmmss, YYMMDDhhmmss | Timestamp | - -Remarks: - -- The input must be a valid date. The month or date cannot be 0. -- For the YYYYMMDDhhmmss and YYMMDDhhmmss formats, the first four letters of the character string are identified as the year only when the length of the character string is 8 or 14. In other cases, only the first two letters are identified as the year. -- If the input format is YYYYMMDDhhmmss or YYMMDDhhmmss, the length of the input integer must be 6, 8, 12, or 14. If the length does not meet this requirement, zeros are added before the integer, if the length is 6, 8, 12, or 14, the value is in the YYMMDD format. If the length is 8, the value is in the YYYYMMDD format. If the length is 12, the value is in the YYMMDDhhmmss format. If the length is 14, the value is in the YYYYMMDDhhmmss format. -- If you want to enter a timestamp whose year is greater than or equal to 10000, use the 'YYYY-MM-DD hh:mm:ss[.frac]' format. - -Examples (Note that MogDB is compatible with B database.) - -```sql ---Create a table. -MogDB=# CREATE TABLE test_datetime( -MogDB(# dt datetime(2)); -CREATE TABLE - ---Insert data. -MogDB=# INSERT INTO test_datetime VALUES ('2020-11-08 02:31:25.961'); -INSERT 0 1 -MogDB=# INSERT INTO test_datetime VALUES (201112234512); -INSERT 0 1 - ---View data. -MogDB=# SELECT * FROM test_datetime; - dt ------------------------- - 2020-11-08 02:31:25.96 - 2020-11-12 23:45:12 -(3 rows) -``` - -### Timestamp Type Inputs - -The following formats are supported: - -| Format | Description | -| :----------------------------------------------------------- | :----------------------- | -| 'YYYY-MM-DD hh:mm:ss[.frac][+/-hh:mm:ss]', 'YY-MM-DD hh:mm:ss[.frac][+/-hh:mm:ss]' | Timestamp with time zone | -| 'YYYYMMDDhhmmss[.frac]', 'YYMMDDhhmmss[.frac]' | Timestamp with time zone | -| YYYYMMDDhhmmss[.frac], YYMMDDhhmmss[.frac] | Timestamp with time zone | - -Remarks: - -- The input must be a valid date. The month or date cannot be 0. -- The compatible timestamp type allows the time zone information [+/-hh:mm:ss] to be added after the format 'YYYY-MM-DD hh:mm:ss[.frac]'. -- If you want to enter a timestamp whose year is greater than or equal to 10000, use the 'YYYY-MM-DD hh:mm:ss[.frac]' format. -- Note that the timestamp type in the M* database is a timestamp without the time zone, and that in MogDB is a timestamp with the time zone. After compatibility, the timestamp type is stored in the timestamptz type internally. Note that the timestamp type is stored in the timestamptz type. Pay attention to the difference before using the timestamp type. If you want to use a timestamp without a time zone, use the datetime type. -- Note: The M* does not have the timestamp with[out] time zone syntax, but we still retain this syntax in MogDB. The timestamp with time zone is equivalent to the original MogDB timestamptz type, and timestamp without time zone is equivalent to the timestamp type in the original MogDB (not the compatible timestamp type, that is, the original timestamp without time zone). - -Examples (Note that MogDB is compatible with B database.) - -```sql ---Create a table. -MogDB=# CREATE TABLE test_timestamp( -MogDB(# ts timestamp(2)); -CREATE TABLE - ---Insert data. -MogDB=# INSERT INTO test_timestamp VALUES ('2012-10-21 23:55:23-12:12'); -INSERT 0 1 -MogDB=# INSERT INTO test_timestamp VALUES (201112234512); -INSERT 0 1 - ---View data. -MogDB=# SELECT * FROM test_timestamp; - ts ------------------------- - 2012-10-22 20:07:23 - 2020-11-12 23:45:12 -(3 rows) - ---Change the time zone. -MogDB=# SET TIME ZONE UTC; -SET ---View data. -MogDB=# SELECT * FROM test_timestamp; - ts ------------------------- - 2012-10-22 12:07:23 - 2020-11-12 15:45:12 -(3 rows) -``` - -### Input of the year/year(4) or year(2) type - -The following formats are supported: - -| Format | Description | -| :----------- | :----------------------------------------------------------- | -| 'YYYY', 'YY' | Indicates a year. When you enter two digits, if the value is less than 70, add 2000 to the value. For example, 69 indicates 2069. If the value is greater than or equal to 70, add 1900 to the value. For example, 70 indicates 1970. | -| YYYY, YY | Year | - -Remarks - -- The three types accept the same input format and range. The only difference is that the output format of the year(2) type is only two digits. -- If **'0'** is entered, MogDB will parse the value to the year 2000. However, if an integer **0** is entered, MogDB will parse the value to 0, indicating the value 0 of the year type. - -Examples (Note that MogDB is compatible with B database.) - -```sql ---Create a table. -MogDB=# CREATE TABLE test_year( -MogDB(# y year, -MogDB(# y2 year(2)); -CREATE TABLE - ---Insert data. -MogDB=# INSERT INTO test_year VALUES ('70', '70'); -INSERT 0 1 -MogDB=# INSERT INTO test_year VALUES ('69', '69'); -INSERT 0 1 -MogDB=# INSERT INTO test_year VALUES ('2069', '2069'); -INSERT 0 1 -MogDB=# INSERT INTO test_year VALUES ('1970', '1970'); -INSERT 0 1 - ---View data. -MogDB=# SELECT * FROM test_year; - y | y2 -------+---- - 1970 | 70 - 2069 | 69 - 2069 | 69 - 1970 | 70 -(4 rows) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-enumeration-types.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-enumeration-types.md deleted file mode 100644 index a135050f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-enumeration-types.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: dolphin ENUM Type -summary: dolphin ENUM Type -author: zhang cuiping -date: 2022-10-24 ---- - -# ENUM Type - -The ENUM type is a string object whose value is selected from a list of values specified when a column is defined during table creation. To use the MySQL-compatible enum type, ensure that **CREATE DATABASE test_db with dbcompatibility** is set to **'B'**. - -## Creating and Using ENUM Columns - -- The enumerated value must be a character string. For example, to create a table that contains a column of ENUM type, run the following command: - - ```sql - CREATE TABLE shirts ( - name VARCHAR(40), - size ENUM('small', 'medium', 'large') - ); - INSERT INTO shirts (name, size) VALUES ('dress shirt','large'), ('t-shirt','medium'), - ('polo shirt','small'); - SELECT name, size FROM shirts WHERE size = 'medium'; - name | size - ---------+-------- - t-shirt | medium - (1 row) - ``` - -- The enumerated value string cannot contain 'anonymous_enum'. In addition, an existing type cannot be renamed to a name containing 'anonymous_enum'. If an existing type contains 'anonymous_enum', the following error message is displayed: - - ```sql - CREATE TYPE country_anonymous_enum_1 AS enum('CHINA','USA'); - ERROR: enum type name “country_anonymous_enum_1” can't contain “anonymous_enum” - ``` - -## Index of an enumerated value - -- Each enumerated value is assigned an index value starting from 1 based on the sequence of enumerated values in the column definition. - -- The index of the NULL value is 0. - -- The index refers to the position of the enumerated value in the list when the enumerated value is created, which is irrelevant to the position in the table. For example, a column specified as ENUM('male', 'female') has the following enumerated values and indexes: - - | Value | Index | - | :------- | :---- | - | NULL | 0 | - | 'male' | 1 | - | 'female' | 2 | - -- You can use the index number to insert enumerated values in ENUM or filter enumerated values using the index number in the WHERE clause as follows: - - ```sql - INSERT INTO staff (name, size) VALUES ('Jone',1); - SELECT name, gender FROM staff WHERE gender = 1; - name | gender - ------------+------- - Tom | male - Jone | male - (2 rows) - ``` - -- If the index value used exceeds the number of enumerated values or is a negative value, an error occurs. - - ```sql - INSERT INTO staff (name, gender) VALUES ('Lara',4); - ERROR: enum order 4 out of the enum value size: 2 - LINE 1: INSERT INTO staff (name, gender) VALUES ('Lara',4); - ^ - CONTEXT: referenced column: size - ``` - -### Null Values and Empty Strings - -- The enumerated value can be NULL, and the empty string **''** is also considered as NULL. -- If you insert an invalid value (that is, a string that does not exist in the enumerated value list) into an ENUM column, an error occurs. - -### Enumeration Restrictions - -- Numbers cannot be used as enumerated values. If you want to use a number as an enumerated value, enclose it in quotation marks to convert it into a string. If there is no quotation mark, the number is used as an index. -- The value of in the ENUM definition cannot contain duplicate enumerated values. -- The ENUM value can contain a maximum of 63 characters. -- There is no restriction on the maximum number of elements in the enumerated values of ENUM. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-numeric-types.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-numeric-types.md deleted file mode 100644 index bac4a54b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/data-types/dolphin-numeric-types.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: dolphin Numeric Types -summary: dolphin Numeric Types -author: zhang cuiping -date: 2022-10-24 ---- - -# Numeric Types - -For details about arithmetic operators and related built-in functions, see [Arithmetic Functions and Operators](../../../../../../reference-guide/functions-and-operators/mathematical-functions-and-operators.md). - -Compared with the original MogDB, Dolphin modifies the arithmetic types as follows: - -1. The INT, TINYINT, SMALLINT, and BIGINT support the optional modifier (n), that is, the usage of TINYINT(n), SMALLINT(n), and BIGINT(n) is supported. **n** is meaningless and does not affect any performance. -2. The MEDIUMINT(n) data type is added, which is the alias of INT4. **n** is meaningless and does not affect any performance. The storage space is 4 bytes, and the data ranges from -2,147,483,648 to +2,147,483,647. -3. The FIXED[(p[,s])] data type is added, which is the alias of the NUMERIC type. The precision is specified by users. Two bytes are occupied for every four decimals of precision. An extra eight-byte overhead is added for numbers of this type. Up to 131,072 digits before the decimal point and up to 16,383 digits after the decimal point when no precision is specified -4. The float4(p[,s]) mode is added, which is equivalent to dec(p[,s]). -5. The double data type is added, which is the alias of float8. -6. The new float4 and float support the modifier (n). That is, float4(n) and float(n) are supported. When the value range of **n** is [1,24], float4(n) and float(n) indicate a single-precision floating point number. If the value range of **n** is [25,53], float4(n) and float(n) indicate a double-precision floating point number. -7. For the decimal data type, if the precision is not specified, the default precision is (10,0). That is, the total number of digits is 10 and the number of decimal places is 0. -8. The UNSIGNED INT, TINYINT, SMALLINT, and BIGINT types are added. Compared with a common integer, the most significant bit of the UNSIGNED INT, TINYINT, SMALLINT, BIGINT type is a digit bit instead of a sign bit. -9. The zerofill attribute is added, which is supported only in syntax and does not have the effect of filling zeros. It is equivalent to UNSIGNED. - -**Table 1** Integer types - -| Name | Description | Storage Space | Value Range | -| :-------------------- | :----------------------------------------------------------- | :------------ | :------------------------------------------------------ | -| TINYINT(n) | Tiny integer, also called INT1. **n** has no actual meaning and does not affect any performance. | 1 byte | 0 ~ 255 | -| SMALLINT(n) | Small integer, also called INT2. **n** has no actual meaning and does not affect any performance. | 2 bytes | -32,768 ~ +32,767 | -| INTEGER(n) | Typical choice for integers, also called INT4. **n** has no actual meaning and does not affect any performance. | 4 bytes | -2,147,483,648 ~ +2,147,483,647 | -| MEDIUMINT(n) | Alias of INT4. **n** is meaningless and does not affect any performance. | 4 bytes | -2,147,483,648 ~ +2,147,483,647 | -| BIGINT(n) | Big integer, also called INT8. **n** has no actual meaning and does not affect any performance. | 8 bytes | -9,223,372,036,854,775,808 ~ +9,223,372,036,854,775,807 | -| TINYINT(n) UNSIGNED | Tiny integer, also called INT1. **n** has no actual meaning and does not affect any performance. | 1 byte | 0 ~ 255 | -| SMALLINT(n) UNSIGNED | Unsigned small integer, also called UINT2. **n** has no actual meaning and does not affect any performance. | 2 bytes | 0 ~ +65,535 | -| INTEGER(n) UNSIGNED | Unsigned integer, also called UINT4. **n** has no actual meaning and does not affect any performance. | 4 bytes | 0 ~ +4,294,967,295 | -| MEDIUMINT(n) UNSIGNED | Alias of UINT4. **n** is meaningless and does not affect any performance. | 4 bytes | 0 ~ +4,294,967,295 | -| BIGINT(n) UNSIGNED | Unsigned large integer, also called UINT8. **n** has no actual meaning and does not affect any performance. | 8 bytes | 0 ~ +18,446,744,073,709,551,615 | - -Example: - -```sql ---Create a table that contains data of the TINYINT(n), SMALLINT(n), MEDIUMINT(n), and BIGINT(n) types. -MogDB=# CREATE TABLE int_type_t1 - ( - IT_COL1 TINYINT(10), - IT_COL2 SMALLINT(20), - IT_COL3 MEDIUMINT(30), - IT_COL4 BIGINT(40), - IT_COL5 INTEGER(50) - ); - ---View the table structure. -MogDB=# \d int_type_t1 - Table "public.int_type_t1" - Column | Type | Modifiers ----------+----------+----------- - it_col1 | tinyint | - it_col2 | smallint | - it_col3 | integer | - it_col4 | bigint | - it_col5 | integer | - ---Create a table with the zerofill attribute column. -MogDB=# CREATE TABLE int_type_t2 - ( - IT_COL1 TINYINT(10) zerofill, - IT_COL2 SMALLINT(20) unsigned zerofill, - IT_COL3 MEDIUMINT(30) unsigned, - IT_COL4 BIGINT(40) zerofill, - IT_COL5 INTEGER(50) zerofill - ); - ---View the table structure. -MogDB=# \d int_type_t2 - Table "public.int_type_t2" - Column | Type | Modifiers ----------+-------+----------- - it_col1 | uint1 | - it_col2 | uint2 | - it_col3 | uint4 | - it_col4 | uint8 | - it_col5 | uint4 | - ---Delete a table. -MogDB=# DROP TABLE int_type_t1, int_type_t2; -``` - -**Table 2** Arbitrary precision types - -| Name | Description | Storage Space | Value Range | -| :---------------------------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| NUMERIC[(p[,s])],DECIMAL[(p[,s])]FIXED[(p[,s])] | The value range of p (precision) is [1,1000], and the value range of s (scale) is [0,p].
Note:
p indicates the total digits, and s indicates the decimal digits. | The precision is specified by users. Two bytes are occupied for every four decimals of precision. An extra eight-byte overhead is added for numbers of this type. | If the precision is not specified, the value is equivalent to (10,0), that is, a maximum of 10 digits before the decimal point and 0 digits after the decimal point. | - -Example: - -```sql ---Create a table with FIXED(p,s), FIXED, and decimal data. -MogDB=# CREATE TABLE dec_type_t1 - ( - DEC_COL1 FIXED, - DEC_COL2 FIXED(20,5), - DEC_COL3 DECIMAL - ); - ---View the table structure. -MogDB=# \d dec_type_t1 - Table "public.dec_type_t1" - Column | Type | Modifiers -----------+---------------+----------- - dec_col1 | numeric(10,0) | - dec_col2 | numeric(20,5) | - dec_col3 | numeric(10,0) | - ---Delete a table. -MogDB=# DROP TABLE dec_type_t1; -``` - -**Table 3** Floating-point types - -| Name | Description | Storage Space | Value Range | -| :----------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| FLOAT[(p)],FLOAT4[(p)] | Floating point, which is not very precise. The value range of **p** (precision) is [1,53]. | 4 bytes or 8 bytes | When the precision p is between [1,24], the option REAL is used as the internal identifier. When the precision p is between [25,53], the option DOUBLE PRECISION is used as the internal identifier. If no precision is specified, REAL is used as the internal identifier. | -| DOUBLE PRECISION,FLOAT8,DOUBLE | Double-precision floating point, which is not very precise. | 8 bytes | –1.79E+308 to 1.79E+308, 15-bit decimal digits. | -| FLOAT4(p,s) | The value range of p (precision) is [1,1000], and the value range of s (scale) is [0,p].
Note:
p indicates the total digits, and s indicates the decimal digits. It is equivalent to dec(p,s). | The precision is specified by users. Two bytes are occupied for every four decimals of precision. An extra eight-byte overhead is added for numbers of this type. | | - -Example: - -```sql ---Create a table that contains data of the float4(p,s), double, float4(n), and float(n) types. -MogDB=# CREATE TABLE float_type_t1 - ( - F_COL1 FLOAT4(10,4), - F_COL2 DOUBLE, - F_COL3 float4(10), - F_COL4 float4(30), - F_COL5 float(10), - F_COL6 float(30) - ); - ---View the table structure. -MogDB=# \d float_type_t1 - Table "public.float_type_t1" - Column | Type | Modifiers ---------+------------------+----------- - f_col1 | numeric(10,4) | - f_col2 | double precision | - f_col3 | real | - f_col4 | double precision | - f_col5 | real | - f_col6 | double precision | - ---Delete a table. -MogDB=# DROP TABLE float_type_t1; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-dcl-syntax.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-dcl-syntax.md deleted file mode 100644 index 4efb20a2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-dcl-syntax.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: dolphin DCL Syntax Overview -summary: dolphin DCL Syntax Overview -author: zhang cuiping -date: 2022-10-24 ---- - -# DCL Syntax Overview - -Data control language (DCL) is used to create users and roles and set or modify database users or role rights. - -## SHOW - -MogDB can use the SHOW command to display various object information. For details about related SQL statements, see Table 1. - -**Table 1** SQL statements related to SHOW - -| Function | SQL Statement | -| :----------------------------------------------------------- | :----------------------------------------------------------- | -| Display index information. | [SHOW INDEX](sql-syntax/dolphin-show-index.md) | -| Query the information about the current external connection (or internal thread). | [SHOW PROCESSLIST](sql-syntax/dolphin-show-processlist.md) | -| Display the statement for creating a table. | [SHOW-CREATE-TABLE](sql-syntax/dolphin-create-table.md) | -| Display the statement for creating a function. | SHOW CREATE FUNCTION | -| Display the statement for creating a stored procedure. | SHOW CREATE PROCEDURE | -| Display the statement for creating a database. | SHOW CREATE DATABASE | -| Display the statement for creating a trigger. | SHOW CREATE TRIGGER | -| Display the statement for creating a view. | SHOW CREATE VIEW | -| Query GUC parameters. | SHOW-VARIABLES | -| Display the user permission information in MogDB. | [SHOW GRANTS](sql-syntax/dolphin-show-grants.md) | -| Display information about storage functions. | [SHOW FUNCTION STATUS](sql-syntax/dolphin-show-function-status.md) | -| Display the information about the stored procedure. | [SHOW PROCEDURE STATUS](sql-syntax/dolphin-show-procedure-status.md) | -| Display information about the trigger. | [SHOW TIRRGER](sql-syntax/dolphin-show-triggers.md) | -| Display the information about the supported character sets. | SHOW CHARACTER SET, SHOW CHARSET | -| Display the information about the supported character sequence. | SHOW COLLATION | - -## GRANT - -The MogDB allows you to run the GRANT command to grant various permissions. For details about related SQL statements, see Table 2. - -**Table 2** SQL statements related to GRANT - -| Function | SQL Statement | -| :--------------------------------------------------------- | :----------------------------------------------------------- | -| Grant the permission to create indexes. | [GRANT INDEX](../../../../../reference-guide/sql-syntax/GRANT.md) | -| Grant the permission to modify functions and procedures. | [GRANT ALTER ROUTINE](../../../../../reference-guide/sql-syntax/GRANT.md) | -| Grant the permission to create functions and procedures. | [GRANT CREATE ROUTINE](../../../../../reference-guide/sql-syntax/GRANT.md) | -| Grant the permission to create temporary tables. | [GRANT CREATE TEMPORARY TABLES](../../../../../reference-guide/sql-syntax/GRANT.md) | -| Grant the permission to the current user to create a user. | [GRANT CREATE USER](../../../../../reference-guide/sql-syntax/GRANT.md) | -| Grant the permission to create tablespaces. | [GRANT CREATE TABLESPACE](../../../../../reference-guide/sql-syntax/GRANT.md) | -| Grant the proxy permission. | [GRANT PROXY](../../../../../reference-guide/sql-syntax/GRANT.md) | - -## REVOKE - -MogDB allows you to run the REVOKE command to revoke various permissions. For details about related SQL statements, see Table 3. - -**Table 3** SQL statements related to REVOKE - -| Function | SQL Statement | -| :--------------------------------------------------------- | :----------------------------------------------------------- | -| Grant the permission to create indexes. | [REVOKE INDEX](../../../../../reference-guide/sql-syntax/REVOKE.md) | -| Grant the permission to modify functions and procedures. | [REVOKE ALTER ROUTINE](../../../../../reference-guide/sql-syntax/REVOKE.md) | -| Grant the permission to create functions and procedures. | [REVOKE CREATE ROUTINE](../../../../../reference-guide/sql-syntax/REVOKE.md) | -| Grant the permission to create temporary tables. | [REVOKE CREATE TEMPORARY TABLES](../../../../../reference-guide/sql-syntax/REVOKE.md) | -| Grant the permission to the current user to create a user. | [REVOKE CREATE USER](../../../../../reference-guide/sql-syntax/REVOKE.md) | -| Grant the permission to create tablespaces. | [REVOKE CREATE TABLESPACE](../../../../../reference-guide/sql-syntax/REVOKE.md) | -| Revoke the proxy permission. | [REVOKE PROXY](../../../../../reference-guide/sql-syntax/REVOKE.md) | - -## KILL - -MogDB allows you to run the KILL command to terminate a specified connection or SQL statements executed under the connection. The following table lists the related SQL statements. - -**Table 4** SQL statements related to KILL - -| Function | SQL Statement | -| :----------------------------------------------------------- | :--------------------------------- | -| Terminate a specified connection or an SQL statement executed under the connection. | [KILL](sql-syntax/dolphin-kill.md) | - -## SET PASSWORD - -MogDB allows you to run the SET PASSWORD command to change the user password. The following table lists the related SQL statements. - -**Table 5** SQL statements related to SET PASSWORD - -| Function | SQL Statement | -| :------------------------ | :------------------------------------------------- | -| Change the user password. | [SET PASSWORD](sql-syntax/dolphin-set-password.md) | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-ddl-syntax.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-ddl-syntax.md deleted file mode 100644 index c49c9bf8..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-ddl-syntax.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: dolphin DDL Syntax Overview -summary: dolphin DDL Syntax Overview -author: zhang cuiping -date: 2022-10-24 ---- - -# DDL Syntax Overview - -Data definition language (DDL) is used to define or modify an object in a database, such as a table, index, or view. - -## Defining a Tablespace - -A tablespace is used to manage data objects and corresponds to a catalog on a disk. For details about the involved SQL statements, see Table 5. - -**Table 5** SQL statements for defining a tablespace - -| Function | Related SQL Statements | -| :------------------- | :----------------------------------------------------------- | -| Create a tablespace. | [CREATE TABLESPACE](../../../../../reference-guide/sql-syntax/CREATE-TABLESPACE.md) | - -## Defining a Table - -A table is a special data structure in a database and is used to store data objects and relationships between data objects. For details about the involved SQL statements, see Table 1. - -**Table 1** SQL statements for defining a table - -| Function | Related SQL Statements | -| :----------------------- | :----------------------------------------------------------- | -| Modify table attributes. | [ALTER TABLE](../../../../../reference-guide/sql-syntax/ALTER-TABLE.md) | - -## Define a Partitioned Table - -A partitioned table is a special data structure in a database and is used to store data objects and the relationship between data objects. For details about the involved SQL statements, see Table 7. - -**Table 7** SQL statements for defining a partitioned table - -| Function | Related SQL Statements | -| :-------------------------------------------- | :----------------------------------------------------------- | -| Create a partitioned table. | [CREATE TABLE PARTITION](../../../../../reference-guide/sql-syntax/CREATE-TABLE-PARTITION.md) | -| Create a partition. | [ALTER TABLE PARTITION](../../../../../reference-guide/sql-syntax/ALTER-TABLE-PARTITION.md) | -| Modify the attributes of a partitioned table. | [ALTER TABLE PARTITION](../../../../../reference-guide/sql-syntax/ALTER-TABLE-PARTITION.md) | -| Delete a partition. | [ALTER TABLE PARTITION](../../../../../reference-guide/sql-syntax/ALTER-TABLE-PARTITION.md) | - -## Define an Index - -An index is the sequence of values in one or more columns in a database table. It is a data structure that improves the speed of data access to specific information in a database table. For details about the involved SQL statements, see Table 2. - -**Table 2** SQL statements for defining an index - -| Function | Related SQL Statements | -| :--------------- | :----------------------------------------------------------- | -| Define an index. | [CREATE INDEX](../../../../../reference-guide/sql-syntax/CREATE-INDEX.md) | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-dml-syntax.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-dml-syntax.md deleted file mode 100644 index b63d73f0..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-dml-syntax.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: dolphin DML Syntax Overview -summary: dolphin DML Syntax Overview -author: zhang cuiping -date: 2022-10-24 ---- - -# DML Syntax Overview - -Data manipulation language (DML) is used to perform operations on data in database tables, such as inserting, updating, querying, or deleting data. - -## Inserting Data - -Inserting data refers to adding one or multiple records to a database table. For details, see [INSERT](../../../../../reference-guide/sql-syntax/INSERT.md). - -## Modifying Data - -Updating data refers to modifying one or multiple records in a database table. For details, see [UPDATE](../../../../../reference-guide/sql-syntax/UPDATE.md). - -## Querying Data - -The database query statement SELECT is used to search for required information in a database. For details, see [SELECT](../../../../../reference-guide/sql-syntax/SELECT.md). \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-keywords.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-keywords.md deleted file mode 100644 index 7a741c0e..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-keywords.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: dolphin Keywords -summary: dolphin Keywords -author: zhang cuiping -date: 2022-10-24 ---- - -# Keywords - -The SQL contains reserved and non-reserved words. Standards require that reserved keywords not be used as other identifiers. Non-reserved keywords have special meanings only in a specific environment and can be used as identifiers in other environments. - -The naming rules for identifiers are as follows: - -- An identifier name can only contain letters, underscores, digits (0-9), and dollar signs ($). - -- An identifier name must start with a letter (a to z) or an underscore (_). - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - The naming rules are recommended but not mandatory. - > - In special cases, double quotation marks (") can be used to avoid special character errors. - -Compared with the original MogDB, Dolphin modifies keywords as follows: - -1. MEDIUMINT is added as a non-reserved keyword. -2. The keyword `DATE` can be used as a function. -3. `LAST_DAY` is added as a reserved keyword to distinguish the original LAST_DAY function of MogDB from the LAST_DAY function of Dolphin at the syntax level. - -**Table 1** SQL keywords - -| Keyword | MogDB | SQL:1999 | SQL-92 | -| :-------- | :------------------------------------------- | :------- | :----- | -| FORMAT | Non-reserved (excluding functions and types) | - | - | -| IF | Non-reserved (excluding functions and types) | - | - | -| KEYS | Non-reserved | - | - | -| MEDIUMINT | Non-reserved (excluding functions and types) | - | - | -| SIGNED | Non-reserved (excluding functions and types) | - | - | -| UNSIGNED | Non-reserved (excluding functions and types) | - | - | -| ZEROFILL | Non-reserved | - | - | -| DATE | Non-reserved (a function or type) | - | - | -| LAST_DAY | Reserved | - | - | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-sql-reference.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-sql-reference.md deleted file mode 100644 index 391c79e2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/dolphin-sql-reference.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: dolphin SQL Reference -summary: dolphin SQL Reference -author: Guo Huan -date: 2023-05-22 ---- - -# SQL Reference - -- **[Keywords](dolphin-keywords.md)** -- **[Data Types](data-types/dolphin-data-types.md)** -- **[Functions and Operators](functions-and-operators/dolphin-functions-and-operators.md)** -- **[Expressions](expressions/dolphin-expressions.md)** -- **[DDL Syntax Overview](dolphin-ddl-syntax.md)** -- **[DML Syntax Overview](dolphin-dml-syntax.md)** -- **[DCL Syntax Overview](dolphin-dcl-syntax.md)** -- **[SQL Syntax](sql-syntax/dolphin-sql-syntax.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/expressions/dolphin-conditional-expressions.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/expressions/dolphin-conditional-expressions.md deleted file mode 100644 index b4b6b906..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/expressions/dolphin-conditional-expressions.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: dolphin Conditional Expressions -summary: dolphin Conditional Expressions -author: zhang cuiping -date: 2022-10-24 ---- - -# Conditional Expressions - -Compared with the original openGauss, Dolphin modifies the condition expressions as follows: - -1. The IFNULL and IF expressions are added. - -- IFNULL - - It is equivalent to NVL. For the NVL syntax, see the following figure - - **Figure 1** nvl::= - ![nvl](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/dolphin-nvl.png) - - If the value of **value1** is **NULL**, the value of **value2** is returned. Otherwise, the value of **value1** is returned. - - Example: - - ```sql - MogDB=# SELECT ifnull(null,1); - ifnull - ------- - 1 - (1 row) - ``` - - ```sql - MogDB=# SELECT ifnull ('Hello World' ,1); - ifnull - ------------- - Hello World - (1 row) - ``` - -- IF - - Only IF(expr1,expr2,expr3) is supported, which is equivalent to CASE WHEN expr1 THEN expr2 ELSE expr3 END. - - For the CASE syntax, see the following figure - - **Figure 2** case::= - ![case](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/dolphin-case.png) - - A **CASE** clause can be used in a valid expression. **condition** is an expression that returns a value of Boolean type. - - - If the result is **true**, the result of the **CASE** expression is the required result. - - If the result is **false**, the following **WHEN** or **ELSE** clauses are processed in the same way. - - If every **WHEN condition** is **false**, the result of the expression is the result of the **ELSE** clause. If the **ELSE** clause is omitted and has no match condition, the result is **NULL**. - - Example: - - ```sql - MogDB=# CREATE TABLE case_when_t1(CW_COL1 INT); - - MogDB=# INSERT INTO case_when_t1 VALUES (1), (2), (3); - - MogDB=# SELECT * FROM case_when_t1; - cw_col1 - --------- - 1 - 2 - 3 - (3 rows) - - MogDB=# SELECT CW_COL1, IF(CW_COL1=1, 'one', 'other') FROM case_when_t1 ORDER BY 1; - cw_col1 | case - ---------+------- - 1 | one - 2 | other - 3 | other - (3 rows) - - MogDB=# DROP TABLE case_when_t1; - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/expressions/dolphin-expressions.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/expressions/dolphin-expressions.md deleted file mode 100644 index 37c5df67..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/expressions/dolphin-expressions.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: dolphin Expressions -summary: dolphin Expressions -author: Guo Huan -date: 2023-05-19 ---- - -# Expressions - -- **[Conditional Expressions](dolphin-conditional-expressions.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-advisory-lock-functions.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-advisory-lock-functions.md deleted file mode 100644 index 3d0b86ba..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-advisory-lock-functions.md +++ /dev/null @@ -1,244 +0,0 @@ ---- -title: dolphin Advisory Lock Functions -summary: dolphin Advisory Lock Functions -author: zhang cuiping -date: 2022-10-24 ---- - -# Advisory Lock Functions - -Advisory lock functions manage advisory locks. - -- pg_advisory_lock(key bigint) - - Description: Obtains an exclusive session-level advisory lock. - - Return type: void - - Note: **pg_advisory_lock** locks resources defined by an application. The resources can be identified using a 64-bit or two unoverlapped 32-bit key values. If another session locks the resources, the function blocks the resources until they can be used. The lock is exclusive. Multiple locking requests are pushed into the stack. Therefore, if the same resource is locked three times, it must be unlocked three times so that it is released to another session. - -- pg_advisory_lock(key1 int, key2 int) - - Description: Obtains an exclusive session-level advisory lock. - - Return type: void - - Note: Only the sysadmin user is allowed to add a session-level exclusive advisory lock to the key-value pair (65535, 65535). Common users do not have the permission. - -- pg_advisory_lock(int4, int4, Name) - - Description: Obtains the exclusive advisory lock of a specified database. - - Return type: void - -- pg_advisory_lock_shared(key bigint) - - Description: Obtains a shared session-level advisory lock. - - Return type: void - -- pg_advisory_lock_shared(key1 int, key2 int) - - Description: Obtains a shared session-level advisory lock. - - Return type: void - - Note: pg_advisory_lock_shared is similar to pg_advisory_lock. The only difference is that a shared lock session can share resources with other sessions that request a shared lock, except for exclusive locks. - -- pg_advisory_unlock(key bigint) - - Description: Releases an exclusive session-level advisory lock. - - Return type: Boolean - -- pg_advisory_unlock(key1 int, key2 int) - - Description: Releases an exclusive session-level advisory lock. - - Return type: Boolean - - Note: pg_advisory_unlock releases the obtained exclusive advisory lock. If the release is successful, the function returns **true**. If the lock was not held, it will return **false**. In addition, a SQL warning will be reported by the server. - -- pg_advisory_unlock(int4, int4, Name) - - Description: Releases the exclusive advisory lock of a specified database. - - Return type: Boolean - - Note: If the release is successful, **true** is returned. If no lock is held, **false** is returned. - -- pg_advisory_unlock_shared(key bigint) - - Description: Releases a shared session level advisory lock. - - Return type: Boolean - -- pg_advisory_unlock_shared(key1 int, key2 int) - - Description: Releases a shared session level advisory lock. - - Return type: Boolean - - Note: pg_advisory_unlock_shared is similar to pg_advisory_unlock. The difference is that this function releases a shared advisory lock. - -- pg_advisory_unlock_all() - - Description: Releases all advisory locks owned by the current session. - - Return type: void - - Note: **pg_advisory_unlock_all** releases all advisory locks owned by the current session. The function is implicitly invoked when the session ends even if the client is abnormally disconnected. - -- pg_advisory_xact_lock(key bigint) - - Description: Obtains an exclusive transaction-level advisory lock. - - Return type: void - -- pg_advisory_xact_lock(key1 int, key2 int) - - Description: Obtains an exclusive transaction-level advisory lock. - - Return type: void - - Note: pg_advisory_xact_lock is similar to pg_advisory_lock. The difference is that locks are automatically released at the end of the current transaction and cannot be explicitly released. Only the sysadmin user is allowed to add a transaction-level exclusive advisory lock to the key-value pair (65535, 65535). Common users do not have the permission. - -- pg_advisory_xact_lock_shared(key bigint) - - Description: Obtains a shared transaction-level advisory lock. - - Return type: void - -- pg_advisory_xact_lock_shared(key1 int, key2 int) - - Description: Obtains a shared transaction-level advisory lock. - - Return type: void - - Note: pg_advisory_xact_lock_shared is similar to pg_advisory_lock_shared. The difference is that locks are automatically released at the end of the current transaction and cannot be explicitly released. - -- pg_try_advisory_lock(key bigint) - - Description: Obtains exclusive session level advisory lock if available. - - Return type: Boolean - - Note: pg_try_advisory_lock is similar to pg_advisory_lock. The difference is that this function is not blocked to wait for resource release. It either immediately obtains the lock and returns **true** or returns **false**, which indicates the lock cannot be performed currently. - -- pg_try_advisory_lock(key1 int, key2 int) - - Description: Obtains exclusive session level advisory lock if available. - - Return type: Boolean - - Note: Only the sysadmin user is allowed to add a session-level exclusive advisory lock to the key-value pair (65535, 65535). Common users do not have the permission. - -- pg_try_advisory_lock_shared(key bigint) - - Description: Obtains a shared session-level advisory lock if available. - - Return type: Boolean - -- pg_try_advisory_lock_shared(key1 int, key2 int) - - Description: Obtains a shared session-level advisory lock if available. - - Return type: Boolean - - Note: pg_try_advisory_lock_shared is similar to pg_try_advisory_lock. The difference is that pg_try_advisory_lock_shared attempts to obtain a shared lock instead of an exclusive lock. - -- pg_try_advisory_xact_lock(key bigint) - - Description: Obtains an exclusive transaction-level advisory lock if available. - - Return type: Boolean - -- pg_try_advisory_xact_lock(key1 int, key2 int) - - Description: Obtains exclusive transaction level advisory lock if available. - - Return type: Boolean - - Note: pg_try_advisory_xact_lock is similar to pg_try_advisory_lock. The difference is that if a lock is obtained, it is automatically released at the end of the current transaction and cannot be explicitly released. Only the sysadmin user is allowed to add a transaction-level exclusive advisory lock to the key-value pair (65535, 65535). Common users do not have the permission. - -- pg_try_advisory_xact_lock_shared(key bigint) - - Description: Obtains a shared transaction-level advisory lock if available. - - Return type: Boolean - -- pg_try_advisory_xact_lock_shared(key1 int, key2 int) - - Description: Obtains a shared transaction-level advisory lock if available. - - Return type: Boolean - - Note: pg_try_advisory_xact_lock_shared is similar to pg_try_advisory_lock_shared. The difference is that if a lock is obtained, it is automatically released at the end of the current transaction and cannot be explicitly released. - -- lock_cluster_ddl() - - Description: Attempts to obtain a session-level exclusive advisory lock for all active primary database nodes in openGauss. - - Return type: Boolean - - Note: Only the sysadmin user can call this function. Common users do not have the permission. - -- unlock_cluster_ddl() - - Description: Attempts to add a session-level exclusive advisory lock on the primary database node. - - Return type: Boolean - -- pg_catalog.get_lock(text,text) - - Description: Adds a user lock to the database with a specified character string. The second parameter is the lock waiting time. - - Return type: Int - -- pg_catalog.get_lock(text,double) - - Description: Adds a user lock to the database with a specified character string. The second parameter is the lock waiting time. - - Return type: Int - -- pg_catalog.get_lock(text) - - Description: Adds a user lock to the database with a specified character string. - - Return type: Int - -- pg_catalog.release_lock(text) - - Description: Releases a specified lock. If the lock is successfully released, **1** is returned. If the current session does not hold the specified lock, **0** is returned. If the current lock does not exist (the lock must be held), **NULL** is returned. - - Return type: Int - -- pg_catalog.is_free_lock(text) - - Description: Checks whether a string is idle. If the string is not locked, **1** is returned. Otherwise, **0** is returned. If other errors occur during the check, **NULL** is returned. - - Return type: Int - -- pg_catalog.is_used_lock(text) - - Description: Checks who holds the lock of a string and returns the session ID of the corresponding user. If the specified lock is not held, **NULL** is returned. - - Return type: Bigint - -- pg_catalog.clear_all_invalid_locks() - - Description: Clears information about invalid locks in the lockname hash table and returns the number of cleared locks. - - Return type: Bigint - -- pg_catalog.release_all_locks() - - Description: Releases all locks held by the current session and returns the number of release times. If a single string holds multiple locks, the number of release times is calculated based on the corresponding number instead of only once. - - Return type: Bigint - -- pg_catalog.get_all_locks() - - Description: Queries all user locks in the current database and returns the names and holders of all user locks in the form of records. - - Return type: Record \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-aggregate-functions.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-aggregate-functions.md deleted file mode 100644 index ce5aab29..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-aggregate-functions.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: dolphin Aggregate Functions -summary: dolphin Aggregate Functions -author: Guo Huan -date: 2023-05-12 ---- - -# Aggregate Functions - -- any_value(expression) - - Description: Any expression in all input lines (the first expression by default) - - Argument types: any set, numeric, string, or date/time type - - Return type: same as the argument data type - - Example: - - ```sql - MogDB=# create table test_any_value(a int, b int); - CREATE TABLE - MogDB=# insert into test_any_value values(1,1),(2,1),(3,2),(4,2); - INSERT 0 4 - MogDB=# select any_value(a), b from test_any_value group by b; - any_value | b - -----------+--- - 1 | 1 - 3 | 2 - (2 rows) - ``` - -- default(column_name) - - Description: Gets the default value output for a table field. - - Return type: text - - Example: - - ```sql - MogDB=# create database test dbcompatibility 'B'; - CREATE DATABASE - MogDB=# \c test - Non-SSL connection (SSL connection is recommended when requiring high-security) - You are now connected to database "test" as user "test". - test=# CREATE TABLE TEST(id int default 100, stime timestamp default now()); - CREATE TABLE - test=# insert into test values(1, now()); - INSERT 0 1 - test=# select default(id) from test; - mode_b_default - ---------------- - 100 - (1 row) - - test=# select default(stime) from test; - mode_b_default - ---------------- - - (1 row) - - test=# insert into test values(default(id) + 10); - INSERT 0 1 - test=# update test set id = default(id) - 10; - UPDATE 2 - test=# delete from test where id = default(id) - 10; - DELETE 2 - ``` - -- When the default value in a field is a function, the default function returns null. - -- The default function is only used in DML statements. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-arithmetic-functions-and-operators.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-arithmetic-functions-and-operators.md deleted file mode 100644 index cfcd5a98..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-arithmetic-functions-and-operators.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: dolphin Arithmetic Functions and Operators -summary: dolphin Arithmetic Functions and Operators -author: zhang cuiping -date: 2022-10-24 ---- - -# Arithmetic Functions and Operators - -Compared with the original MogDB, Dolphin modifies the time/date function as follows: - -1. The DIV, MOD, and XOR operators are added. -2. The truncate, rand, crc32, conv functions are added. - -- DIV - - Description: Division (rounded) - - Example: - - ```sql - MogDB=# SELECT 8 DIV 3 AS RESULT; - result - -------- - 2 - (1 row) - ``` - -- MOD - - Description: Model (to obtain the remainder) - - Example: - - ```sql - MogDB=# SELECT 4 MOD 3 AS RESULT; - result - -------- - 1 - (1 row) - ``` - -- XOR - - Description: Binary XOR - - Example: - - ```sql - MogDB=# SELECT 4 XOR 3 AS RESULT; - result - -------- - 0 - (1 row) - ``` - -- truncate(v numeric, s int) - - Description: Truncates a number with **s** digits after the decimal point. It is equivalent to trunc. - - Return type: numeric - - Example: - - ```sql - MogDB=# SELECT truncate(42.4382, 2); - truncate - ---------- - 42.43 - (1 row) - ``` - -- rand() - - Description: Random number between 0.0 and 1.0 It is equivalent to random. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT rand(); - rand - ------------------- - 0.254671605769545 - (1 row) - ``` - -- crc32(string) - - Description: Calculates the crc32 value of string. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT crc32('abc'); - crc32 - ----------- - 891568578 - (1 row) - ``` - -- conv(input in, current_base int, new_base int) - - Description: Converts a number or string from one number base system to another. The value of in can be a number or a character string. - - Return type: text - - Example: - - ```sql - MogDB=# SELECT conv(20, 10, 2); - conv - ------- - 10100 - (1 row) - - MogDB=# SELECT conv('8D', 16, 10); - conv - ------ - 141 - (1 row) - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-assignment-operators.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-assignment-operators.md deleted file mode 100644 index 216a66cf..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-assignment-operators.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: dolphin Assignment Operators -summary: dolphin Assignment Operators -author: zhang cuiping -date: 2022-10-24 ---- - -# Assignment Operators - -Compared with the original MogDB, Dolphin modifies the assignment operators as follows: - -1. Values can be assigned using `:=`. For example, `UPDATE table_name SET col_name := new_val;`. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-b-compatible-database-lock.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-b-compatible-database-lock.md deleted file mode 100644 index 6c276cd3..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-b-compatible-database-lock.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: dolphin B-Compatible Database Lock -summary: dolphin B-Compatible Database Lock -author: GUO HUAN -date: 2023-05-12 ---- - -# B-Compatible Database Lock - -To ensure database data consistency, you can execute the LOCK TABLES statement to prevent other users from modifying tables. - -For example, an application needs to ensure that data in a table is not modified during transaction running. For this purpose, table usage can be locked. This prevents data from being concurrently modified. - -After LOCK TABLES is used, the subsequent SQL statements are in the transaction state. Therefore, you need to run UNLOCK TABLES to manually release the lock and end the transaction. - -In addition, if you want to make the current session read-only, you can use FLUSH TABLES WITH READ LOCK to implement this function. Then, you need to use UNLOCK TABLES to manually disable this function. - -## Syntax - -- Lock. - - ``` - LOCK TABLES namelist READ/WRITE - ``` - -- Make the current session read-only. - - ``` - FLUSH TABLES WITH READ LOCK - ``` - -- Unlock. - - ``` - UNLOCK TABLES - ``` - -## Parameter Description - -- **namelist** - - Name of the table to be locked. Multiple tables are allowed. - -- **READ/WRITE** - - Lock mode. Values: - - - **READ** - - Tables can be read only. - - - **WRITE** - - The holder is the only transaction accessing the table in any way. - -## Examples - -Obtains a **WRITE** lock on a table when going to perform a delete operation. - -```sql --- Create an example table. -MogDB=# CREATE TABLE graderecord - ( - number INTEGER, - name CHAR(20), - class CHAR(20), - grade INTEGER - ); --- Insert data. -MogDB=# insert into graderecord values('210101','Alan','21.01',92); - --- Provide the example table. -MogDB=# LOCK TABLES graderecord WRITE; - --- Delete the example table. -MogDB=# DELETE FROM graderecord WHERE name ='Alan'; - -MogDB=# UNLOCK TABLES; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-bit-string-functions-and-operators.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-bit-string-functions-and-operators.md deleted file mode 100644 index f76f157f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-bit-string-functions-and-operators.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: dolphin Bit String Functions and Operators -summary: dolphin Bit String Functions and Operators -author: zhang cuiping -date: 2022-10-24 ---- - -# Bit String Functions and Operators - -Compared with the original MogDB, Dolphin modifies the bit string functions as follows: - -1. The bit_bool function is added. - -- bit_bool(bit) - - Description: Returns a Boolean value based on the data in the bit string. If the value is **0**, **false** is returned. Otherwise, **true** is returned. - - Return type: Boolean - - Example: - -```sql - MogDB=# select bit_bool('11111'); - bit_bool - ---------- - t - (1 row) - MogDB=# select bit_bool('00001'); - bit_bool - ---------- - t - (1 row) - MogDB=# select bit_bool('00000'); - bit_bool - ---------- - f - (1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-character-processing-functions-and-operators.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-character-processing-functions-and-operators.md deleted file mode 100644 index dbda70be..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-character-processing-functions-and-operators.md +++ /dev/null @@ -1,541 +0,0 @@ ---- -title: dolphin Character Processing Functions and Operators -summary: dolphin Character Processing Functions and Operators -author: zhang cuiping -date: 2022-10-24 ---- - -# Character Processing Functions and Operators - -Compared with the original MogDB, Dolphin modifies character processing functions and operators as follows: - -1. The regexp, not regexp, and rlike operators are added. -2. The locate, lcase, ucase, insert, bin, chara, elt, field, find_int_set, hex, space, and soundex functions are added. -3. The performance of the length, bit_length, octet_length, convert, and format functions are modified. -4. The XOR function of the `^` operator is added, and the `LIKE BINARY/NOT LIKE BINARY` operator is added. -5. The `LIKE/NOT LIKE` operator is modified. - -- bit_length(string) - - Description: Specifies the number of bits in a string. For binary input, the value is padded up to a multiple of 8. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT bit_length('world'); - bit_length - ------------ - 40 - (1 row) - - MogDB=# SELECT bit_length(b'010'); - bit_length - ------------ - 8 - (1 row) - ``` - -- insert(des text, start int, length int, src text) - - Description: Inserts a new string at a specified position of the original string and replaces a certain number of characters in the original string from the specified position. - - Return type: text - - Example: - - ```sql - MogDB=# select insert('abcdefg', 2, 4, 'yyy'); - insert - -------- - ayyyfg - (1 row) - ``` - -- lcase(string) - - Description: Converts a string to lowercase, equivalent to **lower**. - - Return type: varchar - - Example: - - ```sql - MogDB=# SELECT lcase('TOM'); - lcase - ------- - tom - (1 row) - ``` - -- length(string) - - Description: Obtains the number of characters in a string. For multi-character encoding (such as Chinese), the number of bytes is returned. - - Return type: integer - - Example: - - ```sql - MogDB=# SELECT length('abcd'); - length - -------- - 4 - (1 row) - - MogDB=# SELECT length('中文'); - length - -------- - 6 - (1 row) - ``` - -- format(val number, dec_num int [,locale string]) - - Description: Returns **val** in the format of x,xxx,xxx.xx. The **val** will retain *dec_num* decimal places. A maximum of 32 decimal places can be reserved. If **dec_num** is greater than 32, 32 decimal places are reserved. If **dec_num** is set to 0, the returned content does not contain the decimal point or decimal part. The third parameter is optional. You can specify the format of the decimal point and thousands separator in the returned content based on locale. If the third parameter is not specified or the value of the third parameter is invalid, the default value **en_US** is used. - - Note: This format function is used for B-compatible databases and has different semantics from the original format function of MogDB. To use this semantics, create a B-compatible database, enable the B-compatible SQL engine plug-in, and set **B_COMPATIBILITY_MODE** to **TRUE**. - - Return type: text - - Example: - - ```sql - MogDB=# CREATE DATABASE B_COMPATIBILITY_DATABASE DBCOMPATIBILITY 'B'; - CREATE DATABASE - MogDB=# \c B_COMPATIBILITY_DATABASE - b_compatibility_database=# CREATE EXTENSION dolphin; - CREATE EXTENSION - b_compatibility_database=# SET B_COMPATIBILITY_MODE = TRUE; - SET - b_compatibility_database=# select format(1234.4567,2); - format - ----------- - 1,234.46 - (1 row) - - b_compatibility_database=# select format(1234.5,4); - format - ----------- - 1,234.5000 - (1 row) - - b_compatibility_database=# select format(1234.5,0); - format - ----------- - 1,235 - (1 row) - - b_compatibility_database=# select format(1234.5,2,'de_DE'); - format - ----------- - 1.234,50 - (1 row) - ``` - -- hex(number or string or bytea or bit) - - Description: Converts a number, character, binary character type, or bit string type to a hexadecimal format. - - Note: The MogDB considers the backslash () as a character. Therefore, the length of the character string **\n** is 2. - - Return type: text - - Example: - - ```sql - MogDB=# SELECT hex(256); - hex - ----- - 100 - (1 row) - - MogDB=# select hex('abc'); - hex - -------- - 616263 - (1 row) - - MogDB=# select hex('abc'::bytea); - hex - -------- - 616263 - (1 row) - - MogDB=# select hex(b'1111'); - hex - ----- - 0f - (1 row) - - MogDB=# select hex('\n'); - hex - ------- - 5c6e - (1 row) - ``` - -- locate(substring, string [,position]) - - Description: From the specified **position** (**1** by default) in the string on, queries and returns the value of **position** where the substring occurs for the first time. Parameters are case-sensitive. - - - If the value of **position** is **0**, 0 is returned. - - If the value of **position** is negative, the search is performed backwards from the last *n*th character in the string, in which *n* indicates the absolute value of **position**. - - Return type: integer. If the character string does not exist, **0** is returned. - - Example: - - ```sql - MogDB=# SELECT locate('ing', 'string'); - locate - -------- - 4 - (1 row) - - MogDB=# SELECT locate('ing', 'string', 0); - locate - -------- - 0 - (1 row) - - MogDB=# SELECT locate('ing', 'string', 5); - locate - -------- - 0 - (1 row) - ``` - -- octet_length(string) - - Description: It is equivalent to **length**. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT octet_length('中文'); - octet_length - -------------- - 6 - (1 row) - ``` - -- source_string regexp pattern - - Description: Indicates the pattern matching operator of a regular expression. - - **source_string** indicates the source string and **pattern** indicates the matching pattern of the regular expression. - - Return type: integer (0 or 1) - - Example: - - ```sql - MogDB=# SELECT 'str' regexp '[ac]' AS RESULT; - result - -------- - 0 - (1 row) - ``` - -- source_string not regexp pattern - - Description: Reverses the result of regexp. - - **source_string** indicates the source string and **pattern** indicates the matching pattern of the regular expression. - - Return type: integer (0 or 1) - - Example: - - ```sql - MogDB=# SELECT 'str' not regexp '[ac]' AS RESULT; - result - -------- - 1 - (1 row) - ``` - -- source_string rlike pattern - - Description: It is equivalent to **regexp**. - - **source_string** indicates the source string and **pattern** indicates the matching pattern of the regular expression. - - Return type: integer (0 or 1) - - Example: - - ```sql - MogDB=# SELECT 'str' rlike '[ac]' AS RESULT; - result - -------- - 0 - (1 row) - ``` - -- ucase(string) - - Description: Converts the string into the uppercase. It is equivalent to **upper**. - - Return type: varchar - - Example: - - ```sql - MogDB=# SELECT ucase('tom'); - ucase - ------- - TOM - (1 row) - ``` - -- bin(number or string) - - Description: Returns a binary string of N integers or numeric characters. For Chinese characters, 0 is returned. - - Return type: text - - Example: - - ```sql - b_compatibility_database=# SELECT bin('309'); - bin - ------------ - 100110101 - (1 row) - - b_compatibility_database=# SELECT bin('你好'); - bin - --- - 0 - (1 row) - ``` - -- chara(any) - - Description: Converts multiple digits into multiple characters based on ASCII codes. - - Return type: text - - Example: - - ```sql - b_compatibility_database=# select chara(77,77.3,'77.3','78.8',78.8); - chara - ------------ - MMMNO - (1 row) - ``` - -- char_length(string)或character_leng(string) - - Description: Specifies the number of characters in a character string. The length of a Chinese character is 1. The binary type is supported. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT char_length('hello'); - char_length - ------------- - 5 - (1 row) - b_compatibility_database=# SELECT char_length(B'101'); - char_length - ------------- - 1 - (1 row) - ``` - -- convert(expr using transcoding_name) - - Description: Converts **expr** using **transcoding_name**. - - Return type: text - - Example: - - ```sql - b_compatibility_database=# select convert('a' using 'utf8'); - convert - --------- - a - (1 row) - ``` - -- elt(number, str1,str2,str3,…) - - Description: Returns the *N*th string. - - Return type: text - - Example: - - ```sql - b_compatibility_database=# select elt(3,'wo','ceshi','disange'); - elt - --------- - disange - (1 row) - ``` - -- field(str, str1,str2,str3,…) - - Description: Obtains the position of str in strn. The position is case insensitive. - - Return type: int - - Example: - - ```sql - b_compatibility_database=# select field('ceshi','wo','ceshi','disange'); - field - ------- - 2 - (1 row) - ``` - -- find_in_set(str, strlist) - - Description: Obtains the position of str in strlist. The position is case insensitive and is separated by commas (,). - - Return type: int - - Example: - - ```sql - b_compatibility_database=# select find_in_set('ceshi','wo','ceshi,ni,wo,ta'); - find_in_set - ------------- - 3 - (1 row) - ``` - -- space(number) - - Description: Returns *N* spaces. - - Return type: text - - Example: - - ```sql - b_compatibility_database=# select space('5'); - space - ------- - - (1 row) - ``` - -- soundex(str) - - Description: Returns the algorithm that describes the alphanumeric pattern of the speech representation of the specified string. - - Return type: text - - Example: - - ```sql - b_compatibility_database=# select soundex('abcqwcaa'); - soundex - --------- - A120 - (1 row) - ``` - -- make_set(number, string1, string2, …) - - Description: Returns a set value (a string containing substrings separated by commas) consisting of a string with the corresponding bit set in number. string1 corresponds to bit 0, string2 corresponds to bit 1, and so on. NULL values in string1, string2, … are not added to the result. - - Return type: text - - ```sql - select make_set(1|4, 'one', 'two', NULL, 'four'); - make_set - ---------- - one - (1 row) - ``` - -- \^ - - Description: Implements the XOR function of two character strings. The content before the first non-numeric symbol is truncated for XOR. - - Return type: INT - - Example: - - ```sql - MogDB=# SELECT '123a'^'123'; - ?column? - --------- - 0 - (1 row) - ``` - -- like/not like - - Description: Specifies whether the string matches the pattern string following LIKE. In the source version, LIKE of MogDB is case sensitive. In this version, when `b_compatibility_mode` is set to `TRUE`, LIKE is case insensitive. When `b_compatibility_mode` is set to `FALSE`, LIKE is case sensitive. If the string matches the provided pattern, the LIKE expression returns true (the ILIKE expression returns false). - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT 'a' like 'A' as result; - result - ------------ - t - (1 row) - - MogDB=# SELECT 'abc' like 'a' as result; - result - ------------ - f - (1 row) - - MogDB=# SELECT 'abc' like 'A%' as result; - result - ------------ - t - (1 row) - ``` - -- like binary/not like binary - - Description: Determines whether a string can match the pattern string after LIKE BINARY. LIKE BINARY uses case-sensitive pattern matching. If the pattern is matched, true is returned (NOT LIKE BINARY returns false). If the pattern is not matched, false is returned (NOT LIKE BINARY returns true). - - Return type: Boolean - - Example: - - ```sql - MogDB=# SELECT 'a' like binary 'A' as result; - result - ------------ - f - (1 row) - - MogDB=# SELECT 'a' like binary 'a' as result; - result - ------------ - t - (1 row) - - MogDB=# SELECT 'abc' like binary 'a' as result; - result - ------------ - f - (1 row) - - MogDB=# SELECT 'abc' like binary 'a%' as result; - result - ------------ - t - (1 row) - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-comment-operators.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-comment-operators.md deleted file mode 100644 index 000bfc6a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-comment-operators.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: dolphin Comment Operators -summary: dolphin Comment Operators -author: Guo Huan -date: 2023-05-15 ---- - -# Comment Operators - -Compared to the original MogDB, dolphin's changes to the comment operators are mainly: - -1. Support has been added for indicating the start of a single line comment via `#`. For example, `# select a from t1` indicates a single comment. - -- Single line comment at the beginning of # - - Description: Treats comments starting with # and ending with a newline as comments. - - Note: This operator is intended for use with B-compatible databases and has different semantics than MogDB's original # operator. To use this semantics, create a B-compatible database, enable the MySQL Compatibility SQL Engine plugin, and set dolphin.b_compatibility_mode to TRUE. - - Example: - - ```sql - MogDB=# SELECT lower('TOM') #1234; - MogDB=# ; - lower - ------- - tom - ``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-compatible-operators-and-operations.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-compatible-operators-and-operations.md deleted file mode 100644 index 1e3c1451..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-compatible-operators-and-operations.md +++ /dev/null @@ -1,327 +0,0 @@ ---- -title: dolphin Compatible Operators and Operations -summary: dolphin Compatible Operators and Operations -author: zhang cuiping -date: 2022-10-24 ---- - -# Compatible Operators and Operations - -If **dolphin.b_compatibility_mode** is set to **on**, MySQL-compatible four arithmetic operations are enabled. Compared with the original MogDB, Dolphin modifies the four arithmetic operations as follows: - -1. The following types of arithmetic operations are supported: - - - Numeric types: tinyint (**unsigned**), smallint (**unsigned**), integer (**unsigned**), bigint (**unsigned**), float4, float8, decimal/numeric, and bit. - - Character string types: char, varchar, binary, varbinary, tinyblob, blob, mediumblob, longblob, enum, set, json, and text (Currently, MogDB does not have tinytext, mediumtext, and longtext. Therefore, they are not considered.) - - Date and time types: date, datetime, timestamp, time, and year. - -2. The return values of some original operators are compatible with MySQL. The compatibility rules are as follows: - - - Integer x integer: For the +, -, and * operators, if both operators are signed integers, the returned result is also signed integers. Otherwise, the returned result is unsigned integers. For the / operator, the returned value is of the fixed-point type (numeric type). - - Integer x fixed-point type: For the +, -, *, and / arithmetic operations, the fixed-point type is returned. Note: The MogDB fixed-point numbers are not unsigned. Therefore, the returned results are signed. - - Integer x floating-point type: For the +, -, *, and / arithmetic operations, the floating-point type is returned. Note: The MogDB floating-point numbers are not unsigned. Therefore, the returned results are signed. - - Fixed-point type x fixed-point type: For the +, -, *, and / arithmetic operations, the fixed-point type is returned. - - Fixed-point type x floating-point type: For the +, -, *, and / arithmetic operations, the floating-point type is returned. - - Floating-point type x floating-point type: For the +, -, *, and / arithmetic operations, the floating-point type is returned. - - Based on the preceding rules, you only need to use the type conversion rules of the character string type and time type to calculate the return values of these types during hybrid calculation. The type conversion rules are as follows: - - - The character string type is converted to the floating-point type in four arithmetic operations. - - Date and time types: The date type is converted to a signed integer, and the year type is converted to an unsigned integer. If typmod (indicating millisecond and microsecond) is not specified, the datetime, timestamp, and time types are converted to signed integers; otherwise, they are converted to fixed-point numbers with the same number of decimal places as the specified typmod. - -## Example - -Test case: - -```sql -create database test_db dbcompatibility 'B'; -\c test_db -set dolphin.b_compatibility_mode to on; --- Integer x integer -select 1::int4 + 1::int4; -select 1::int4 - 1::int4; -select 1::int4 * 1::int4; -select 1::int4 / 1::int4; - --- Integer x unsigned integer -select 1::int4 + 1::uint4; -select 1::int4 - 1::uint4; -select 1::int4 * 1::uint4; -select 1::int4 / 1::uint4; - --- Integer x fixed-point type -select 1::int4 + 1::numeric; -select 1::int4 - 1::numeric; -select 1::int4 * 1::numeric; -select 1::int4 / 1::numeric; - --- Integer x floating-point type -select 1::int4 + 1::float8; -select 1::int4 - 1::float8; -select 1::int4 * 1::float8; -select 1::int4 / 1::float8; - --- Fixed-point type x floating-point type -select 1::numeric + 1::float8; -select 1::numeric - 1::float8; -select 1::numeric * 1::float8; -select 1::numeric / 1::float8; - --- Integer x character string -select 1::int4 + '1.23'::text; -select 1::int4 - '1.23'::text; -select 1::int4 * '1.23'::text; -select 1::int4 / '1.23'::text; - --- Integer x date -select 1::int4 + '2022-01-01'::date; -select 1::int4 - '2022-01-01'::date; -select 1::int4 * '2022-01-01'::date; -select 1::int4 / '2022-01-01'::date; - --- Integer x time (without microseconds) -select 1::int4 + '12:12:12'::time; -select 1::int4 - '12:12:12'::time; -select 1::int4 * '12:12:12'::time; -select 1::int4 / '12:12:12'::time; - --- Integer x time (with microseconds) -select 1::int4 + '12:12:12.36'::time(3); -select 1::int4 - '12:12:12.36'::time(3); -select 1::int4 * '12:12:12.36'::time(3); -select 1::int4 / '12:12:12.36'::time(3); -``` - -Results: - -```sql -MogDB=# create database test_db dbcompatibility 'B'; -CREATE DATABASE -MogDB=# \c test_db -test_db=# set dolphin.b_compatibility_mode to on; -SET -test_db=# -- Integer x integer -test_db=# select 1::int4 + 1::int4; - ?column? ----------- - 2 -(1 row) - -test_db=# select 1::int4 - 1::int4; - ?column? ----------- - 0 -(1 row) - -test_db=# select 1::int4 * 1::int4; - ?column? ----------- - 1 -(1 row) - -test_db=# select 1::int4 / 1::int4; - ?column? ------------------------- - 1.00000000000000000000 -(1 row) - -test_db=# -- Integer x unsigned integer -test_db=# select 1::int4 + 1::uint4; - ?column? ----------- - 2 -(1 row) - -test_db=# select 1::int4 - 1::uint4; - ?column? ----------- - 0 -(1 row) - -test_db=# select 1::int4 * 1::uint4; - ?column? ----------- - 1 -(1 row) - -test_db=# select 1::int4 / 1::uint4; - ?column? ------------------------- - 1.00000000000000000000 -(1 row) - -test_db=# -- Integer x fixed-point type -test_db=# select 1::int4 + 1::numeric; - ?column? ----------- - 2 -(1 row) - -test_db=# select 1::int4 - 1::numeric; - ?column? ----------- - 0 -(1 row) - -test_db=# select 1::int4 * 1::numeric; - ?column? ----------- - 1 -(1 row) - -test_db=# select 1::int4 / 1::numeric; - ?column? ------------------------- - 1.00000000000000000000 -(1 row) - -test_db=# -- Integer x floating-point type -test_db=# select 1::int4 + 1::float8; - ?column? ----------- - 2 -(1 row) - -test_db=# select 1::int4 - 1::float8; - ?column? ----------- - 0 -(1 row) - -test_db=# select 1::int4 * 1::float8; - ?column? ----------- - 1 -(1 row) - -test_db=# select 1::int4 / 1::float8; - ?column? ----------- - 1 -(1 row) - -test_db=# -- Fixed-point type x floating-point type -test_db=# select 1::numeric + 1::float8; - ?column? ----------- - 2 -(1 row) - -test_db=# select 1::numeric - 1::float8; - ?column? ----------- - 0 -(1 row) - -test_db=# select 1::numeric * 1::float8; - ?column? ----------- - 1 -(1 row) - -test_db=# select 1::numeric / 1::float8; - ?column? ----------- - 1 -(1 row) - -test_db=# -- Integer x character string -test_db=# select 1::int4 + '1.23'::text; - ?column? ----------- - 2.23 -(1 row) - -test_db=# select 1::int4 - '1.23'::text; - ?column? ----------- - -0.23 -(1 row) - -test_db=# select 1::int4 * '1.23'::text; - ?column? ----------- - 1.23 -(1 row) - -test_db=# select 1::int4 / '1.23'::text; - ?column? -------------------- - 0.813008130081301 -(1 row) - -test_db=# -- Integer x date -test_db=# select 1::int4 + '2022-01-01'::date; - ?column? ----------- - 20220102 -(1 row) - -test_db=# select 1::int4 - '2022-01-01'::date; - ?column? ------------ - -20220100 -(1 row) - -test_db=# select 1::int4 * '2022-01-01'::date; - ?column? ----------- - 20220101 -(1 row) - -test_db=# select 1::int4 / '2022-01-01'::date; - ?column? ----------------------------- - 0.000000049455737139987580 -(1 row) - -test_db=# -- Integer x time (without microseconds) -test_db=# select 1::int4 + '12:12:12'::time; - ?column? ----------- - 121213 -(1 row) - -test_db=# select 1::int4 - '12:12:12'::time; - ?column? ----------- - -121211 -(1 row) - -test_db=# select 1::int4 * '12:12:12'::time; - ?column? ----------- - 121212 -(1 row) - -test_db=# select 1::int4 / '12:12:12'::time; - ?column? ----------------------------- - 0.000008250008250008250008 -(1 row) - -test_db=# -- Integer x time (with microseconds) -test_db=# select 1::int4 + '12:12:12.36'::time(3); - ?column? ---------------- - 121213.360000 -(1 row) - -test_db=# select 1::int4 - '12:12:12.36'::time(3); - ?column? ----------------- - -121211.360000 -(1 row) - -test_db=# select 1::int4 * '12:12:12.36'::time(3); - ?column? ---------------- - 121212.360000 -(1 row) - -test_db=# select 1::int4 / '12:12:12.36'::time(3); - ?column? ----------------------------- - 0.000008249983747532017362 -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-conditional-expression-functions.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-conditional-expression-functions.md deleted file mode 100644 index 75a86ed0..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-conditional-expression-functions.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: dolphin Conditional Expression Functions -summary: dolphin Conditional Expression Functions -author: zhang cuiping -date: 2022-10-24 ---- - -# Conditional Expression Functions - -## Precautions - -- This section describes only the new conditional expression functions of Dolphin. For details about the conditional expression functions of the original MogDB, see [Conditional Expression Functions](../../../../../../reference-guide/functions-and-operators/conditional-expressions-functions.md). - -## Conditional Expression Functions - -- if(bool, expr1, expr2) - - Description: Condition judgment function. If **bool** is **true**, **expr1** is returned. If **bool** is **false**, **expr2** is returned. - - Example: - - ```sql - MogDB=# select if(true, 1, 2); - case - ------ - 1 - (1 row) - ``` - - ```sql - MogDB=# select if(false, 1, 2); - case - ------ - 2 - (1 row) - ``` - -- ifnull( expr1 , expr2 ) - - Description: - - - If the value of **expr1** is **NULL**, the value of **expr2** is returned. - - If the value of **expr1** is not **NULL**, the value of **expr1** is returned. - - Example: - - ```sql - MogDB=# SELECT ifnull('hello','world'); - nvl - ------- - hello - (1 row) - ``` - - Remarks: The parameter conversion logic is the same as that of the NVL. - -- isnull( expr ) - - - Returns **true** if **expr** is **NULL**. - - Returns **false** if **expr** is not **NULL**. - - Example: - - ```sql - MogDB=# SELECT ifnull('hello'); - ?column? - -------- - f - (1 row) - ``` - - Remarks: The null check logic is the same as that of **expr is null**. - -- gs_interval(base_expr, expr1, expr2, …, exprn) - - Description: - - - Compares base_expr with expr(n) one by one until expr(n) is greater than base_expr and returns value(n-1). If expr(n) is less than or equal to base_expr, returns value(n). - - If base_expr or expr(n) is non-numeric data: - - BOOL: TRUE is converted to 1, and FALSE is converted to 0. - - If it can be truncated to a floating point number in float8 format, it is truncated to float8. - - If it cannot be truncated to a floating point number float8, it is considered as 0. - - Example: - - ```sql - MogDB=# SELECT gs_interval(5,2,3,4,6,7); - gs_interval - ------------- - 3 - (1 row) - MogDB=# SELECT gs_interval(false,-1,0,true,2); - gs_interval - ------------- - 2 - (1 row) - MogDB=# SELECT gs_interval('2022-12-12'::timestamp,'asdf','2020-12-12'::date,2023); - gs_interval - ------------- - 2 - (1 row) - ``` - -- strcmp(str1, str2) - - Description: Compares str1 with str2 from left to right. If str1 is equal to str2, 0 is returned. If str1 is greater than str2, 1 is returned. If str1 is less than str2, -1 is returned. - - Example: - - ```sql - MogDB=# SELECT strcmp('asd','asd'); - strcmp - -------- - 0 - (1 row) - MogDB=# SELECT strcmp(312,311); - strcmp - -------- - 1 - (1 row) - MogDB=# SELECT strcmp('2021-12-12'::timestamp,20210::float8); - strcmp - -------- - -1 - (1 row) - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-date-and-time-processing-functions-and-operators.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-date-and-time-processing-functions-and-operators.md deleted file mode 100644 index 82471915..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-date-and-time-processing-functions-and-operators.md +++ /dev/null @@ -1,1772 +0,0 @@ ---- -title: dolphin Date and Time Processing Functions and Operators -summary: dolphin Date and Time Processing Functions and Operators -author: zhang cuiping -date: 2022-10-24 ---- - -# Date and Time Processing Functions and Operators - -## Time/Date Functions - -Compared with the original MogDB, Dolphin modifies the time/date function as follows: - -1. The dayofmonth, dayofweek, dayofyear, hour, microsecond, minute, quarter, second, weekday, weekofyear, year, and current_date functions are added. -2. The curdate, current_time, curtime, current_timestamp, localtime, localtimestamp, now, and sysdate functions are added. -3. The makedate, maketime, period_add, period_diff, sec_to_time, and subdate functions are added. -4. The subtime, timediff, time, time_format, timestamp, and timestampadd functions are added. -5. The to_days, to_seconds, unix_timestamp, utc_date, utc_time, and utc_timestamp functions are added. -6. The date_bool and time_bool functions are added. -7. The dayname, monthname, time_to_sec, month, day, date, week, yearweek functions are added and the last_day function is modified. -8. The datediff, from_days, convert_tz, date_add, date_sub, adddate, addtime functions are added and the timestampdiff function is modified. - -- curdate() - - Description: Returns the date when the statement started to be executed. - - Return type: date - - Example: - - ```sql - MogDB=# select curdate(); - curdate - ------------ - 2022-07-21 - (1 row) - ``` - -- current_time - - Description: Returns the time when a statement starts to be executed. - - Return type: time - - Example: - - ```sql - MogDB=# select current_time; - current_time - -------------- - 16:56:02 - (1 row) - ``` - -- current_time(n) - - Description: Returns the time when a statement starts to be executed. **n** indicates the precision. The maximum value is 6. - - Return type: time - - Example: - - ```sql - MogDB=# select current_time(3); - current_time(3) - ----------------- - 16:57:23.255 - (1 row) - - MogDB=# select current_time(); - current_time() - ---------------- - 17:05:01 - (1 row) - ``` - -- curtime(n) - - Description: Returns the time when a statement starts to be executed. **n** indicates the precision. The maximum value is 6. - - Return type: time - - Example: - - ```sql - MogDB=# select curtime(3); - curtime(3) - -------------- - 17:45:33.844 - (1 row) - - MogDB=# select curtime(); - curtime() - ----------- - 17:45:54 - (1 row) - ``` - -- current_timestamp - - Description: Returns the timestamp when a statement starts to be executed. - - Return type: datetime - - Example: - - ```sql - MogDB=# select current_timestamp; - current_timestamp - --------------------- - 2022-07-21 16:59:38 - (1 row) - ``` - -- current_timestamp(n) - - Description: Returns the timestamp when the statement starts to be executed. **n** indicates the precision. The maximum value is 6. - - Return type: datetime - - Example: - - ```sql - MogDB=# select current_timestamp(3); - current_timestamp(3) - ------------------------- - 2022-07-21 17:00:41.251 - (1 row) - - MogDB=# select current_timestamp(); - current_timestamp() - --------------------- - 2022-07-21 17:06:06 - (1 row) - ``` - -- dayofmonth(timestamp) - - Description: Obtains the value of date in the date or time value. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT dayofmonth(timestamp '2001-02-16 20:38:40'); - date_part - ----------- - 16 - (1 row) - ``` - -- dayofweek(timestamp) - - Description: Obtains the week number in the date/time value. The value **1** indicates Sunday, the value **2** indicates Monday, and the value **7** indicates Saturday. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT dayofweek(timestamp '2001-02-16 20:38:40'); - ?column? - ---------- - 6 - (1 row) - ``` - -- dayofyear(timestamp) - - Description: Obtains the day of a year in a date/time value. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT dayofyear(timestamp '2001-02-16 20:38:40'); - date_part - ----------- - 47 - (1 row) - ``` - -- hour(timestamp) - - Description: Obtains the value of hour in the date or time value. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT hour(timestamp '2001-02-16 20:38:40'); - date_part - ----------- - 20 - (1 row) - ``` - -- localtime - - Description: Returns the timestamp when a statement starts to be executed. - - Return type: datetime - - Example: - - ```sql - MogDB=# select localtime; - localtime - --------------------- - 2022-07-21 17:02:04 - (1 row) - ``` - -- localtime(n) - - Description: Returns the timestamp when the statement starts to be executed. **n** indicates the precision. The maximum value is 6. - - Return type: datetime - - Example: - - ```sql - MogDB=# select localtime(3); - localtime - --------------------- - 2022-07-21 17:02:04 - (1 row) - - MogDB=# select localtime(); - localtime() - --------------------- - 2022-07-21 17:14:22 - (1 row) - ``` - -- localtimestamp - - Description: Returns the timestamp when a statement starts to be executed. - - Return type: datetime - - Example: - - ```sql - MogDB=# select localtimestamp; - localtimestamp - --------------------- - 2022-07-21 17:17:20 - (1 row) - ``` - -- localtimestamp(n) - - Description: Returns the timestamp when the statement starts to be executed. **n** indicates the precision. The maximum value is 6. - - Return type: datetime - - Example: - - ```sql - MogDB=# select localtimestamp(3); - localtimestamp(3) - ------------------------- - 2022-07-21 17:28:02.013 - (1 row) - - MogDB=# select localtimestamp(); - localtimestamp() - --------------------- - 2022-07-21 17:28:49 - (1 row) - ``` - -- MAKEDATE() - - Function prototype: - - `DATE MAKEDATE(int64 year, int64 dayofyear)` - - Function description: - - Returns the date value of a year when the year and the number of days are given. - - Remarks: - - - If any of them is NULL, the function returns NULL. - - The value of **dayofyear** must be greater than 0. Otherwise, NULL is returned. - - 0 <= year < 70: **year** is regarded as 20XX. 70 <= year < 100: **year** is regarded as 19XX. - - The return value ranges from 0 to 9999-12-31. If the return value is out of the range, NULL is returned. - - Example: - - ```sql - MogDB=# SELECT MAKEDATE(2022,31), MAKEDATE(2022,32); - makedate | makedate - ------------+------------ - 2022-01-31 | 2022-02-01 - (1 row) - - -- 0<= year < 70 以及 70 <= year < 100 - MogDB=# SELECT MAKEDATE(0,31), MAKEDATE(70,32); - makedate | makedate - ------------+------------ - 2000-01-31 | 1970-02-01 - (1 row) - - -- dayofyear <= 0 以及 超出范围 的情况 - MogDB=# SELECT MAKEDATE(2022,0), MAKEDATE(9999,366); - makedate | makedate - ----------+---------- - | - (1 row) - ``` - -- MAKETIME() - - Function prototype: - - ``` - TIME MAKETIME(int64 hour, int64 minue, Numeric second) - ``` - - Function description: - - Returns a TIME type value when the hour, minute, and second parameters are given. - - Remarks: - - - The function returns NULL if any of the following conditions is met: - 1. minue < 0 or minue >= 60 - 2. second < 0 or second >= 60 - 3. Any parameter is NULL. - - The returned value of the TIME type contains six decimal places. If the value of **second** contains more than six decimal places, the value is rounded off. - - The returned value of the TIME type must be in the range [-838:59:59, 838:59:59]. If the value is out of the range, the specified boundary value is returned based on the positive and negative types of hour. - - Example: - - ```sql - MogDB=# SELECT MAKETIME(15, 15, 15.5); - maketime - ------------ - 15:15:15.5 - (1 row) - - -- 四舍五入进位 - MogDB=# SELECT MAKETIME(10, 15, 20.5000005); - maketime - ----------------- - 10:15:20.500001 - (1 row) - - -- 四舍五入进位 - MogDB=# SELECT MAKETIME(839,0,0); - maketime - ----------- - 838:59:59 - (1 row) - ``` - -- microsecond(timestamp) - - Description: Obtains the value of microsecond in the date or time value. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT microsecond(timestamp '2001-02-16 20:38:40.123'); - date_part - ----------- - 123000 - (1 row) - ``` - -- minute(timestamp) - - Description: Obtains the value of minute in the date or time value. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT minute(timestamp '2001-02-16 20:38:40.123'); - date_part - ----------- - 38 - (1 row) - ``` - -- now(n) - - Description: Returns the timestamp when the statement starts to be executed. **n** indicates the precision. The maximum value is 6. - - Return type: datetime - - Example: - - ```sql - MogDB=# select now(3); - now(3) - ------------------------- - 2022-07-21 17:30:18.037 - (1 row) - - MogDB=# select now(); - now() - --------------------- - 2022-07-21 17:30:51 - (1 row) - ``` - -- PERIOD_ADD() - - Function prototype: - - ``` - int64 PERIOD_ADD(int64 P, int64 N) - ``` - - Function description: - - Return the period **P** (in YYYYMM or YYMM format) plus **N** months. The format is YYYYMM. - - Remarks: - - - If any parameter is NULL, the function returns NULL. - - If **P = 0**, 0 is returned. - - If the value of **P** and the year in the returned result is less than 100, 70 is used as the boundary to convert the year to 20XX or 19XX. - - Example: - - ```sql - MogDB=# SELECT PERIOD_ADD(202201, 2); - period_add - ------------ - 202203 - (1 row) - - -- p = 0 - MogDB=# SELECT PERIOD_ADD(0, 2); - period_add - ------------ - 0 - (1 row) - - -- 时期的年份处于[0,70) 或 [70, 100)范围内 - MogDB=# SELECT PERIOD_ADD(0101, 2), PERIOD_ADD(7001, 2); - period_add | period_add - ------------+------------ - 200103 | 197003 - (1 row) - ``` - -- PERIOD_DIFF() - - Function prototype: - - ``` - int64 PERIOD_DIFF(int64 P1, int64 P2) - ``` - - Function description: - - Returns the month difference between P1 and P2. - - Remarks: - - - If any parameter is NULL, the function returns NULL. - - If the year in P1 and P2 is less than 100, 70 is used as the boundary to convert the year to 20XX or 19XX. - - Example: - - ```sql - MogDB=# SELECT PERIOD_DIFF(202201,202003); - period_diff - ------------- - 22 - (1 row) - - MogDB=# SELECT PERIOD_DIFF(0101,7001); - period_diff - ------------- - 372 - (1 row) - ``` - -- quarter(timestamp) - - Description: Gets the number of quarters in a date/time value, from 1 to 4. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT quarter(timestamp '2001-02-16 20:38:40.123'); - date_part - ----------- - 1 - (1 row) - ``` - -- second(timestamp) - - Description: Obtains the value of second in the date or time value. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT second(timestamp '2001-02-16 20:38:40.123'); - date_part - ----------- - 40 - (1 row) - ``` - -- SEC_TO_TIME() - - Function prototype: - - ``` - TIME SEC_TO_TIME(Numeric second) - ``` - - Function description: - - Converts a given number of seconds to hours, minutes, and seconds. This function returns a value of the TIME type. - - Remarks: - - - If any parameter is NULL, the function returns NULL. - - The returned value of the TIME type contains only six decimal places. The excess part is rounded off. - - The returned value of the TIME type must be in the range [-838:59:59, 838:59:59]. If the value is out of the range, the specified boundary value is returned based on the positive and negative types of second. - - Example: - - ```sql - MogDB=# SELECT SEC_TO_TIME(4396); - sec_to_time - ------------- - 01:13:16 - (1 row) - - -- Round off. - MogDB=# SELECT SEC_TO_TIME(2378.2222225); - sec_to_time - ----------------- - 00:39:38.222223 - (1 row) - - -- The returned result is out of range. - MogDB=# SELECT SEC_TO_TIME(3888888); - sec_to_time - ------------- - 838:59:59 - (1 row) - ``` - -- SUBDATE(expr, interval) - - Function prototype: - - ``` - CString SUBDATE(text date, INTERVAL expr unit) - CString SUBDATE(text date, int64 days) - ``` - - Function description: - - Performs date calculation. The **date** parameter specifies the start DATE or DATETIME type value. Specifies the INTERVAL value to be subtracted from the start date. The result date value after subtraction is returned. If the second parameter is an integer, it is considered as a subtracted day value. - - Remarks: - - - The return format of the function is DATE or DATETIME. Generally, the return type is the same as the type of the first parameter. When the type of the first parameter is DATE and the unit of INTERVAL contains HOUR, MINUTE, and SECOND, the return result is DATETIME. - - The function returns NULL if any of the following conditions is met: - 1. The value of date is out of range [0, 9999-12-31]. - 2. Any parameter is NULL. - - The date of the returned result must be within the range [0001-1-1, 9999-12-31]. If the value is out of range, NULL is returned. - - Example: - - ```sql - MogDB=# SELECT SUBDATE('2022-01-01', INTERVAL 31 DAY), SUBDATE('2022-01-01', 31); - subdate | subdate - ------------+------------ - 2021-12-01 | 2021-12-01 - (1 row) - - -- The first parameter is DATE. - MogDB=# SELECT SUBDATE('2022-01-01 01:01:01', INTERVAL 1 YEAR); - subdate - --------------------- - 2021-01-01 01:01:01 - (1 row) - - -- The first parameter is DATETIME. - MogDB=# SELECT SUBDATE('2022-01-01 01:01:01', INTERVAL 1 YEAR); - subdate - --------------------- - 2021-01-01 01:01:01 - (1 row) - - -- The first parameter is DATE, but the unit of INTERVAL contains TIME. - MogDB=# SELECT SUBDATE('2022-01-01', INTERVAL 1 SECOND); - subdate - --------------------- - 2021-12-31 23:59:59 - (1 row) - ``` - -- SUBDATE(TIME, interval) - - Function prototype: - - ``` - TIME SUBDATE(TIME time, INTERVAL expr unit) - TIME SUBDATE(TIME time, int64 days) - ``` - - Function description: - - This function is used to be compatible with the scenario where the first parameter type of the subdate function in MySQL can be TIME. In this case, the input of the first parameter must be the original TIME data, not the implicit conversion of the character string. The **time** parameter specifies the start time of the TIME type. The **second** parameter specifies the INTERVAL value to be subtracted from the start time. The result date after subtraction is returned. If the second parameter is an integer, it is considered as a subtracted day value. - - Remarks: - - - The first parameter must be of the original TIME type, not implicitly converted from a string. For example, SUBDATE('1:1:1', 1) does not enter this function. Change it to SUBDATE(time'1:1:1', 1). - - The INTERVAL unit of the second parameter cannot contain the year or month part. Otherwise, NULL is returned. - - The return value must be within [-838:59:59, 838:59:59]. Otherwise, NULL is returned. - - Example: - - ```sql - MogDB=# SELECT SUBDATE(time'10:15:20', INTERVAL '1' DAY), SUBDATE(time'10:15:20', 1); - subdate | subdate - -----------+----------- - -13:44:40 | -13:44:40 - (1 row) - - -- The INTERVAL unit of the second parameter cannot contain the year or month part. - MogDB=# SELECT SUBDATE(time'838:00:00', INTERVAL '1' MONTH); - subdate - --------- - - (1 row) - - -- The result is out of range. - MogDB=# SELECT SUBDATE(time'838:59:59', INTERVAL '-1' SECOND); - subdate - --------- - - (1 row) - ``` - -- SUBTIME() - - Function prototype: - - ``` - TEXT SUBTIME(TIME time1, TIME time2) - TEXT SUBTIME(DATETIME time1, TIME time2) - ``` - - Function description: - - This function performs date calculation and returns the result of DATETIME or TIME expression time1 minus TIME expression time2. The return parameter type is the same as the input type of time1. - - Remarks: - - - The value of time1 must be in TIME or DATETIME format. Otherwise, an error is reported. - - The value of time2 must be in the correct and valid TIME format. Otherwise, an error is reported. - - If the return value is greater than [-838:59:59, 838:59:59], the extreme value is returned based on the symbol. - - Example: - - ```sql - MogDB=# select subtime('11:22:33','10:20:30'); subtime - - ------ - - 01:02:03 (1 row) - - MogDB=# select SUBTIME('2020-03-04 11:22:33', '-10:20:30'); subtime - - ------ - - 2020-03-04 21:43:03 (1 row) - ``` - -- sysdate(n) - - Description: Returns the real-time timestamp of the system. **n** indicates the precision. The maximum value is 6. - - Return type: datetime - - Example: - - ```sql - MogDB=# select sysdate(3); - sysdate(3) - ------------------------- - 2022-07-21 17:38:23.442 - (1 row) - - MogDB=# select sysdate(); - sysdate() - --------------------- - 2022-07-21 17:39:02 - (1 row) - ``` - -- time() - - Function prototype: - - ``` - Text TIME(TEXT expr) - ``` - - Function description: - - The time() function of MySQL is compatible. The parameter specifies a TIME or DATETIME expression from which the time expression is extracted and returned as a string. - - Remarks: - - - The returned time expression can contain a maximum of six decimal places. The excess part is rounded off. - - For an abnormal date or time format or a date or time with domain overflow (for example, 1:60:60 and 2022-12-32), this function is compatible with the insert statement in MySQL, that is, an error is reported. - - An error is reported for a character string in the date format, and 00:00:00 is returned for a parameter of the date type. - - Example: - - ```sql - MogDB=# select time('2022-1-1 1:1:1.1111116'), time('25:25:25'); - time | time - -----------------+---------- - 01:01:01.111112 | 25:25:25 - (1 row) - - MogDB=# select time(date'2022-1-1'); - time - ---------- - 00:00:00 - (1 row) - ``` - -- TIMEDIFF() - - Function prototype: - - ``` - TIME TIMEDIFF(TIME time1, TIME time2) - DATETIME TIMEDIFF(DATETIME time1, DATETIME time2) - ``` - - Function description: - - This function performs date calculation and returns the result of subtracting time2 from time1. The type of the returned parameter is the same as the input type. - - Remarks: - - - The types of time1 and time2 must be the same and valid. Otherwise, NULL is returned. - - For example, if time1 and time2 are of the TIME type and the return value is beyond [-838:59:59, 838:59:59], the function reports an error. - - Example: - - ```sql - MogDB=# select TIMEDIFF(time'23:59:59',time'01:01:01'), TIMEDIFF(datetime'2008-12-31 23:59:59',datetime'2008-12-30 01:01:01'); - timediff | timediff - ----------+---------- - 22:58:58 | 46:58:58 - (1 row) - - -- If the value is out of range, the extreme value is returned. - MogDB=# SELECT TIMEDIFF(time'-830:00:00', time'10:20:30'), TIMEDIFF(time'830:00:00', time'-10:20:30'); - timediff | timediff - ------------+----------- - -838:59:59 | 838:59:59 - (1 row) - ``` - -- TIMESTAMP() - - Function prototype: - - ``` - DATETIME TIMESTAMP(TEXT expr) - DATETIME TIMESTAMP(TEXT expr, TIME time) - ``` - - Function description: - - If there is only one parameter, the function converts the DATE or DATETIME expression expr to the DATETIME value and returns the value. - - If there are two parameters, the function calculates the result of the DATE or DATETIME expression expr plus time of the TIME type and returns the result. - - Remarks: - - - expr is a date or datetime expression that does not exist. For example, '2000-12-32' and '2000-1-1 24:00:00'. The function reports an error. - - When the value contains two parameters and the value of the second parameter time is not a character string in TIME format, the function reports an error. - - Example: - - ```sql - MogDB=# select TIMESTAMP('2022-01-01'), TIMESTAMP('20220101'); - timestamp | timestamp - ---------------------+--------------------- - 2022-01-01 00:00:00 | 2022-01-01 00:00:00 - (1 row) - - MogDB=# select TIMESTAMP('2022-01-31 12:00:00.123456'), TIMESTAMP('20000229120000.1234567'); - timestamp | timestamp - ----------------------------+---------------------------- - 2022-01-31 12:00:00.123456 | 2000-02-29 12:00:00.123457 - (1 row) - - MogDB=# select TIMESTAMP('2022-01-31','12:00:00.123456'), TIMESTAMP('2022-01-31 12:00:00','-32:00:00'); - timestamp | timestamp - ----------------------------+--------------------- - 2022-01-31 12:00:00.123456 | 2022-01-30 04:00:00 - (1 row) - - MogDB=# select TIMESTAMP('20000229','100:00:00'), TIMESTAMP('20000229120000.123','100:00:00'); - timestamp | timestamp - ---------------------+------------------------- - 2000-03-04 04:00:00 | 2000-03-04 16:00:00.123 - (1 row) - ``` - -- timestamp_add() - - Function prototype: - - ``` - TEXT TIMESTAMP_ADD(text unit, interval span, text datetime) - ``` - - Function description: - - Adds a period of time to a known time point. The first parameter **unit** indicates the time unit, the second parameter **span** indicates a specific value, and the third parameter **datetime** indicates a known time point. - - Remarks: - - - The supported units are as follows: - - | Unit | Input | - | ----------- | ---------- | - | Year | year | - | Quarter | qtr | - | Month | month | - | Week | week | - | Date | day | - | Hour | hour | - | Minute | minute | - | Second | second | - | Microsecond | microsecon | - - - The span supports decimals. If the unit is second, the span is rounded off to six decimal places based on the seventh decimal place. Otherwise, the span is rounded off to an integer. - - - The input type of datetime can be character string, date, datetime, or time. - - - The input range of datetime and the calculation result of the function must be within the range [0001-01-01 00:00:00.000000, 9999-12-31 23:59:59.999999]. Otherwise, an error is reported. (This function is compatible with the insert statement in MySQL.) - - - For an abnormal date or time format or a date or time with domain overflow (for example, 1:60:60 and 2022-12-32), this function is compatible with the insert statement in MySQL, that is, an error is reported. - - Example: - - ```sql - MogDB=# select timestampadd(day, 1, '2022-09-01'); - timestampadd - -------------- - 2022-09-02 - (1 row) - - MogDB=# select timestampadd(hour, 1, '2022-09-01 08:00:00'); - timestampadd - --------------------- - 2022-09-01 09:00:00 - (1 row) - ``` - -- time_format() - - Function prototype: - - ``` - TEXT TIME_FORMAT(text time, text format)。 - ``` - - Function description: - - The first parameter **time** is a time or datetime expression. The function formats the time value based on the second parameter **format** and returns the value as a string. - - Remarks: - - - The following formats are supported: - - | Format | Description | - | ------ | -------------------------------------------------------- | - | %f | Microsecond (000000 to 999999) | - | %H | Hour (00 to 23) | - | %h | Hour (00 to 12) | - | %I | Hour (00 to 12) | - | %i | Minute (00 to 59) | - | %p | AM or PM | - | %r | The time is in 12-hour AM or PM format (hh:mm:ss AM/PM). | - | %S | Second (00 to 59) | - | %s | Second (00 to 59) | - | %T | Time in 24-hour format (hh:mm:ss) | - | %k | Hours | - - - For formats that are not related to hour, minute, and second, 0 or NULL is returned, including: - - | Format | Return Result | - | :------------------------------------------------- | :------------ | - | %a, %b, %D, %j, %M, %U, %u, %V, %v, %W, %w, %X, %x | NULL | - | %c , %e | 0 | - | %d, %m, %y | 00 | - | %Y | 0000 | - - The extracted time value can contain a maximum of six decimal places. The excess part is rounded off. - - Example: - - ```sql - MogDB=# select TIME_FORMAT('83:59:59.0000009', '%T|%r|%H|%h|%I|%i|%S|%f|%p|%k'); - time_format - -------------------------------------------------- - 83:59:59|11:59:59 AM|83|11|11|59|59|000001|AM|83 - (1 row) - - MogDB=# select TIME_FORMAT('2022-1-1 23:59:59.0000009', '%T|%r|%H|%h|%I|%i|%S|%f|%p|%k'); - time_format - -------------------------------------------------- - 23:59:59|11:59:59 PM|23|11|11|59|59|000001|PM|23 - ``` - -- weekday(timestamp) - - Description: Obtains the day of a week in the date/time value. The value **0** indicates Monday, the value **1** indicates Tuesday, and the value **6** indicates Sunday. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT weekday(timestamp '2001-02-16 20:38:40.123'); - ?column? - ---------- - 4 - (1 row) - ``` - -- weekofyear(timestamp) - - Description: Obtains the week of a year in a date/time value. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT weekofyear(timestamp '2001-02-16 20:38:40.123'); - date_part - ----------- - 7 - (1 row) - ``` - -- year(timestamp) - - Description: Obtains the value of year in the date or time value. - - Return type: double precision - - Example: - - ```sql - MogDB=# SELECT year(timestamp '2001-02-16 20:38:40.123'); - year - ------ - 2001 - (1 row) - ``` - -- current_date() - - Description: Specifies the current date. - - Return type: date - - Example: - - ```sql - MogDB=# SELECT current_date; - date - ------------ - 2017-09-01 - (1 row) - ``` - -- to_days() - - Function prototype: `int8 TO_DAYS(DATETIME date)` - - Function description: Receives a date or datetime expression as a parameter and returns the number of days from the date specified by the parameter to the year 0000. - - Remarks: - - - If the input parameter type is time, the date used for calculation is the current date plus the time specified by time. - - If the entered date is out of the range [0000-01-01, 9999-12-31] or the input parameter is an invalid date or datetime expression, the function reports an error. - - Example: - - ```sql - MogDB=# select to_days('0000-01-01'); - to_days - --------- - 1 - (1 row) - - MogDB=# select to_days('2022-09-05 23:59:59.5'); - to_days - --------- - 738768 - (1 row) - - -- The current date is 2022-09-05. - MogDB=# select to_days(time'25:00:00'); - to_days - --------- - 738769 - (1 row) - ``` - -- to_seconds() - - Function prototype: `NUMERIC TO_SECONDS(text datetime)` - - Function description: After you enter a time point **datetime**, the number of seconds from 0000-01-01 00:00:00 to the time point is returned. - - Remarks: - - - The **datetime** parameter supports the following types: character string, number, date, datetime, and time. If the input parameter is of the time type, the date is automatically set to the current date. - - The returned result contains only the integer number of seconds and the decimal part is discarded. - - Example: - - ```sql - MogDB=# select to_seconds('2022-09-01'); - to_seconds - ------------- - 63829209600 - (1 row) - - MogDB=# select to_seconds('2022-09-01 12:30:30.888'); - to_seconds - ------------- - 63829254630 - (1 row) - - MogDB=# select to_seconds(20220901123030); - to_seconds - ------------- - 63829254630 - (1 row) - ``` - -- unix_timestamp() - - Function prototype: - - ``` - NUMERIC UNIX_TIMESTAMP() - NUMERIC UNIX_TIMESTAMP(text datetime) - ``` - - Function description: - - - If you run the function without entering any parameter, the number of seconds from 1970-01-01 00:00:00 UTC to the current time is returned. - - If you enter a time point **datetime**, the number of seconds from 1970-01-01 00:00:00 UTC to datetime is returned. - - Remarks: - - - The **datetime** parameter supports the following types: character string, number, date, datetime, and time. If the input parameter is of the time type, the date is automatically set to the current date. - - The valid range of the **datetime** parameter is [1970-01-01 00:00:00.000000 UTC, 2038-01-19 03:14:07.999999 UTC]. - - The value range of this parameter is affected by the time zone, but the final calculation result is not affected by the time zone. - - The calculation result can contain a maximum of six decimal places. - - Example: - - ```sql - MogDB=# select unix_timestamp('2022-09-01'); - unix_timestamp - ---------------- - 1661961600 - (1 row) - - MogDB=# select unix_timestamp('2022-09-01 12:30:30.888'); - unix_timestamp - ---------------- - 1662006630.888 - (1 row) - - MogDB=# select unix_timestamp(20220901123030.6); - unix_timestamp - ---------------- - 1662006630.6 - (1 row) - ``` - -- utc_date() - - Function prototype: `DATE UTC_DATE()` - - This function is used to return the current UTC date of the DATE type. - - Remarks: - - - UTC_DATE can be identified as a keyword. In this case, parentheses are not required. - - Example: - - ```sql - MogDB=# select UTC_DATE(); - utc_date - ------------ - 2022-09-06 - (1 row) - - MogDB=# select UTC_DATE; - utc_date - ------------ - 2022-09-06 - (1 row) - ``` - -- utc_time() - - Function prototype: - - `TIME UTC_TIME()` - - `TIME UTC_TIME(int fsp)` - - Function description: This function is used to return the current UTC time of the TIME type. If an integer parameter is specified as the precision, the number of decimals to be retained in the result can be specified. The supported precision range is [0-6]. - - Remarks: - - - UTC_TIME can be identified by keywords. In this case, parentheses are not required. The effect is the same as that of the UTC_TIME() function without parameters. - - Example: - - ```sql - MogDB=# select UTC_TIME(); - utc_time - ---------- - 15:13:54 - (1 row) - - MogDB=# select UTC_TIME(6); - utc_time - ---------------- - 15:13:56.59698 - (1 row) - - MogDB=# select UTC_TIME; - utc_time - ---------- - 15:14:01 - (1 row) - ``` - -- utc_timestamp() - - Function prototype: - - - `DATETIME UTC_TIMESTAMP()` - - `DATETIME UTC_TIMESTAMP(int fsp)` - - Function description: This function is used to return the current UTC date and time. The type is DATETIME. If an integer parameter is specified as the precision, the number of decimals to be retained in the result can be specified. The supported precision range is [0-6]. - - Remarks: - - - UTC_TIMESTAMP can be identified by keywords. In this case, parentheses are not required. The effect is the same as that of the UTC_TIMESTAMP() function without parameters. - - Example: - - ```sql - MogDB=# select UTC_TIMESTAMP(); - utc_timestamp - --------------------- - 2022-09-06 15:16:28 - (1 row) - - MogDB=# select UTC_TIMESTAMP(6); - utc_timestamp - ---------------------------- - 2022-09-06 15:16:34.691118 - (1 row) - - MogDB=# select UTC_TIMESTAMP; - utc_timestamp - --------------------- - 2022-09-06 15:16:39 - ``` - -- date_bool(date) - - Description: Returns a Boolean value based on the number of years in a date value. If the value is **0**, **false** is returned. Otherwise, **true** is returned. - - Return type: Boolean - - Example: - - ```sql - MogDB=# select time_bool('18:50:00'); - time_bool - ----------- - t - (1 row) - MogDB=# select time_bool('00:50:00'); - time_bool - ----------- - f - (1 row) - ``` - -- time_bool(time) - - Description: Returns a Boolean value based on the number of years in a date value. If the value is **0**, **false** is returned. Otherwise, **true** is returned. - - Return type: Boolean - - Example: - - ```sql - MogDB=# select date_bool('2022-08-20'); - date_bool - ----------- - t - (1 row) - MogDB=# select date_bool('0000-08-20'); - date_bool - ----------- - f - (1 row) - ``` - -- dayname(date) - - Description: Returns the workday corresponding to the date. The language set of the returned content is controlled by the GUC parameter [lc_time_names](./../../guc-parameters.md). - - Return type: text - - Note: This function is compatible with MySQL table insertion parameters and result constraints. - - Example: - - ```sql - MogDB=# select dayname('2000-1-1'); - dayname - ---------- - Saturday - (1 row) - - MogDB=# alter system set lc_time_names = 'zh_CN'; - ALTER SYSTEM SET - - MogDB=# select dayname('2000-1-1'); - dayname - --------- - Saturday - (1 row) - ``` - -- monthname(date) - - Description: Returns the full name of the month corresponding to the date. The language set of the returned content is controlled by the GUC parameter [lc_time_names](./../../guc-parameters.md). - - Return type: text - - Note: This function is compatible with MySQL table insertion parameters and result constraints. - - Example: - - ```sql - MogDB=# select monthname('2000-1-1'); - monthname - ----------- - January - (1 row) - - MogDB=# alter system set lc_time_names = 'zh_CN'; - ALTER SYSTEM SET - - MogDB=# select monthname('2000-1-1'); - monthname - ----------- - January - (1 row) - ``` - -- time_to_sec(time) - - Description: Converts time to seconds. - - Return type: integer - - Note: This function is compatible with MySQL table insertion parameters and result constraints. - - Example: - - ```sql - MogDB=# select time_to_sec('838:59:59'); - time_to_sec - ------------- - 3020399 - (1 row) - - MogDB=# select time_to_sec('-838:59:59'); - time_to_sec - ------------- - -3020399 - (1 row) - ``` - -- month(date) - - Description: Returns the month of a date. - - Return type: integer - - Note: This function is compatible with MySQL table insertion parameters and result constraints. - - Example: - - ```sql - MogDB=# select month('2021-11-12'); - month - ------- - 11 - (1 row) - - MogDB=# select month('2021-11-0'); - month - ------- - 11 - (1 row) - ``` - -- day(date) - - Description: Returns the day of a date. - - Return type: integer - - Note: This function is compatible with MySQL table insertion parameters and result constraints. - - Example: - - ```sql - MogDB=# select day('2021-11-12'); - day - ----- - 12 - (1 row) - - MogDB=# select day('2021-0-0'); - day - ----- - 0 - (1 row) - ``` - -- date(expr) - - Description: Extracts the date part from expr when expr is recognized as a date or datetime expression. - - Return type: text - - Note: This function is compatible with MySQL table insertion parameters and result constraints. - - Example: - - ```sql - MogDB=# select date('2021-11-12'); - date - ------------ - 2021-11-12 - (1 row) - - MogDB=# select date('2021-11-12 23:59:59.9999999'); - date - ------------ - 2021-11-13 - (1 row) - - MogDB=# select date('2021-11-0'); - date - ------------ - 2021-11-00 - (1 row) - - MogDB=# select date('2021-0-3'); - date - ------------ - 2021-00-03 - (1 row) - ``` - -- last_day(expr) - - Description: Returns the date of the last day of a month when expr is identified as date or datetime. - - Return type: date - - Note: This function is compatible with MySQL table insertion parameters and result constraints. In B-compatible databases, when the GUC parameter **b_compatibility_mode** is set to **true**, this function replaces the original last_day function of MogDB. - - Example: - - ```sql - MogDB=# set b_compatibility_mode = true; - SET - - MogDB=# select last_day('2021-1-30'); - last_day - ------------ - 2021-01-31 - (1 row) - - MogDB=# select last_day('2021-1-0'); - last_day - ------------ - 2021-01-31 - (1 row) - ``` - -- week(date[,mode]) - - Description: Returns the week of the date represented by the date parameter in a year. The mode parameter is optional. The value range is [0,7]. If no mode parameter is transferred, the GUC parameter **default_week_format** is used as the default mode parameter. - - The following table lists the values and meanings of the mode parameter. - - | mode | Description | - | :--- | :----------------------------------------------------------- | - | 0 | Sunday indicates the first day of a week. The value of week ranges from 0 to 53. The first week of a year must contain Sunday. | - | 1 | Monday indicates the first day of a week. The value of week ranges from 0 to 53. The first week of a year must contain at least four days in the year. | - | 2 | Sunday indicates the first day of a week. The value of week ranges from 1 to 53. The first week of a year must contain Sunday. | - | 3 | Monday indicates the first day of a week. The value of week ranges from 1 to 53. The first week of a year must contain at least four days in the year. | - | 4 | Sunday indicates the first day of a week. The value of week ranges from 0 to 53. The first week of a year must contain at least four days in the year. | - | 5 | Monday indicates the first day of a week. The value of week ranges from 0 to 53. The first week of a year must contain Monday. | - | 6 | Sunday indicates the first day of a week. The value of week ranges from 1 to 53. The first week of a year must contain at least four days in the year. | - | 7 | Monday indicates the first day of a week. The value of week ranges from 1 to 53. The first week of a year must contain Monday. | - - For the first day of a week, the value range of week is described as follows: - - - The first day of a week refers to the first day of a week. Monday or Sunday may be the first day of a week. - - The value range of week refers to the value range of the return value of the WEEK function. There are two value ranges: [0-53] and [1-53]. The value 0 in [0-53] indicates that the given date is actually located in the last week of the year before the given date. To associate the returned result with the year of the given date, the given date is considered to be located in the zeroth week of the year (that is, the first week has not started). If you want the week of a given date to be more closely related to its year, use 0, 1, 4, or 5 as the mode value. In this way, when the given date is in the last week of the year, the WEEK function returns 0. - - The condition for determining the first week of a year refers to the condition for determining that the given date is in the first week of the current year. Generally, the condition is determined only when the date is at the boundary of the year. There are two determination modes. The mode parameter determines the mode to be used. - - Method 1: If Monday or Sunday is the first day of a week and Monday or Sunday is in the year of the specified date, the week is the first week of the year. The value of mode can be 0, 2, 5, or 7. - - Method 2: If four or more days of the week to which the specified date belongs are in the year to which the specified date belongs, the week is the first week of the year to which the specified date belongs. Otherwise, the week is the last week of the previous year. The value of mode can be 1, 3, 4, or 6. - - Return type: integer - - Note: This function is compatible with MySQL table insertion parameters and result constraints. - - Example: - - ```sql - MogDB=# show default_week_format; - default_week_format - --------------------- - 0 - (1 row) - - -- If the specified date is in the last week of the previous year, the value of mode is 0. - MogDB=# select week('2000-1-1'); - week - ------ - 0 - (1 row) - - MogDB=# alter system set default_week_format = 2; - ALTER SYSTEM SET - - -- If the specified date is in the last week of the previous year, the value of mode is 2. - MogDB=# select week('2000-1-1'); - week - ------ - 52 - (1 row) - - MogDB=# select week('2000-1-1', 2); - week - ------ - 52 - (1 row) - ``` - -- yearweek(date[,mode]) - - Description: Returns the year and week of the date represented by the date parameter. mode is an optional integer parameter. The value range is [0,7]. If no mode parameter is specified, 0 is used as the default mode parameter. The GUC parameter default_week_format does not affect the yearweek function. For details about the mode parameter, see the week function. - - The yearweek function does not return 0 weeks. That is, the value range of week is always [1-53], which is not affected by the mode parameter. - - Return type: bigint - - Note: This function is compatible with MySQL table insertion parameters and result constraints. - - Example: - - ```sql - MogDB=# select week('1987-01-01', 0); - week - ------ - 0 - (1 row) - - MogDB=# select yearweek('1987-01-01', 0); - yearweek - ---------- - 198652 - (1 row) - ``` - -- datediff(expr1,expr2) - - Description: expr1 and expr2 can be date or datetime. Calculate the number of days represented by expr1-expr2. Only the date part of expr1 and expr2 is involved in the calculation. If an input parameter is invalid, the function returns NULL. - - Return type: integer (indicating the date difference, in days) - - Example: - - ```sql - MogDB=# select datediff('2001-01-01','321-02-02'); - datediff - ---------- - 613576 - (1 row) - ``` - -- from_days(N) - - Description: Returns the date corresponding to the number of days represented by N. - - Return type: date - - Example: - - ```sql - MogDB=# select from_days(365); - from_days - ------------ - 0000-00-00 - (1 row) - - MogDB=# select from_days(366); - from_days - ------------ - 0001-01-01 - (1 row) - ``` - -- timestampdiff(unit,datetime expr1,datetime expr2) - - Description: The function returns the values of two date parameters expr2 - expr1. Both parameters may be datetime or date. If the parameter is date, the time part is considered as 0. After the difference is calculated, the calculation result is converted into a specified unit for display. The value of **unit** can be MICROSECOND, SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, QUARTER, or YEAR. If an input parameter is invalid, the function returns NULL. - - Return type: bigint (indicating the difference displayed in a specified unit) - - Note: In B-compatible databases, this function replaces the original timestampdiff function of MogDB when the GUC parameter **b_compatibility_mode** is set to **true**. - - Example: - - ```sql - MogDB=# set b_compatibility_mode = true; - SET - - MogDB=# select timestampdiff(SECOND,'2001-01-01 12:12:12','2001-01-01 12:12:11'); - timestampdiff - --------------- - -1 - (1 row) - - MogDB=# select timestampdiff(MONTH,'2001-01-01 12:12:12','2001-02-01 12:12:12'); - timestampdiff - --------------- - 1 - (1 row) - ``` - -- convert_tz(datetime, from_tz, to_tz) - - Description: Converts datetime from the time zone specified by from_tz to the time zone specified by to_tz. If the range of datetime converted from from_tz to the UTC time zone exceeds [1970-01-01 00:00:01.000000, 2038-01-19 03:14:07.999999], the conversion is not performed. If the parameter is invalid, the function returns NULL. - - Return value: datetime - - Example: - - ```sql - MogDB=# SELECT CONVERT_TZ('2004-01-01 12:00:00','GMT','MET'); - convert_tz - --------------------- - 2004-01-01 13:00:00 - (1 row) - ``` - -- DATE_ADD(date/datetime/time, interval expr unit) - - Function prototype: - - ``` - text DATE_ADD(text expr1, INTERVAL expr2 unit) - time DATE_ADD(time expr1, INTERVAL expr2 unit) - ``` - - Description: This function performs the date and time addition operation and returns the result of expr1 plus expr2. expr1 can be data of the date, datetime, or time type, and expr2 indicates the interval value. If expr1 is of the time type, time can be added only when expr1 is of the time type. - - Return value type: same as the type of the first parameter. - - Remarks: - - - Generally, the return type is the same as the type of the first parameter. When the type of the first parameter is DATE and the unit of INTERVAL contains HOUR, MINUTE, and SECOND, the return result is DATETIME. - - Parameter restrictions during MySQL table insertion. - - If expr1 is in the date or datetime format and the value exceeds [0000-1-1 00:00:00.000000, 9999-12-31 23:59:59.999999], an error is reported. - - If expr1 is of the time type, time can be added only when expr1 is of the time type. If date_add('1:1:1',interval 1 second) does not enter this function, change it to date_add(time'1:1:1', interval 1 second). - - Result restriction during MySQL table insertion. - - When expr1 is in the date or datetime format, if the result exceeds [0000-1-1 00:00:00.000000, 9999-12-31 23:59:59.999999], an error is reported. If the result is within this range but less than '0001-1-1 00:00:00.000000', MySQL defines the result as 0000-00-00 or 0000-00-00 xx:xx:xx. The time depends on the calculation result. Because such a result is meaningless, an error is reported in MogDB. - - For data whose first parameter is of the time type, if the calculation result exceeds the time type range [-838:59:59, 838:59:59], an error is reported. - - Example: - - ```sql - MogDB=# SELECT DATE_ADD('2022-01-01', INTERVAL 31 DAY); - date_add - ------------ - 2022-02-01 - (1 row) - - MogDB=# SELECT DATE_ADD('2022-01-01 01:01:01', INTERVAL 1 YEAR); - date_add - --------------------- - 2023-01-01 01:01:01 - (1 row) - - MogDB=# SELECT DATE_ADD('2022-01-01', INTERVAL 1 SECOND); - date_add - --------------------- - 2022-01-01 00:00:01 - (1 row) - ``` - -- DATE_SUB(date/datetime/time, interval expr unit) - - Function prototype: - - ``` - text DATE_SUB(text expr1, INTERVAL expr2 unit) - time DATE_SUB(time expr1, INTERVAL expr2 unit) - ``` - - Description: This function performs the date and time subtractive operation and returns the result of expr1 minus expr2. expr1 can be data of the date, datetime, or time type, and expr2 indicates the interval value. If expr1 is of the time type, time can be subtracted only when expr1 is of the time type. - - Return value type: same as the type of the first parameter. - - Remarks: - - - Generally, the return type is the same as the type of the first parameter. When the type of the first parameter is DATE and the unit of INTERVAL contains HOUR, MINUTE, and SECOND, the return result is DATETIME. - - Parameter restrictions during MySQL table insertion. - - If expr1 is in the date or datetime format and the value exceeds [0000-1-1 00:00:00.000000, 9999-12-31 23:59:59.999999], an error is reported. - - If expr1 is of the time type, time can be subtracted only when expr1 is of the time type. If date_sub('1:1:1',interval 1 second) does not enter this function, change it to date_sub(time'1:1:1', interval 1 second). - - Result restriction during MySQL table insertion. - - When expr1 is in the date or datetime format, if the result exceeds [0000-1-1 00:00:00.000000, 9999-12-31 23:59:59.999999], an error is reported. If the result is within this range but less than '0001-1-1 00:00:00.000000', MySQL defines the result as 0000-00-00 or 0000-00-00 xx:xx:xx. The time depends on the calculation result. Because such a result is meaningless, an error is reported in MogDB. - - For data whose first parameter is of the time type, if the calculation result exceeds the time type range [-838:59:59, 838:59:59], an error is reported. - - Example: - - ```sql - MogDB=# SELECT DATE_SUB('2022-01-01', INTERVAL 31 DAY); - date_sub - ------------ - 2021-12-01 - (1 row) - - MogDB=# SELECT DATE_SUB('2022-01-01 01:01:01', INTERVAL 1 YEAR); - date_sub - --------------------- - 2021-01-01 01:01:01 - (1 row) - - - MogDB=# SELECT DATE_SUB('2022-01-01', INTERVAL 1 SECOND); - date_sub - --------------------- - 2021-12-31 23:59:59 - (1 row) - ``` - -- ADDDATE(date/datetime/time, interval/days) - - Description: Performs a date or time addition operation. When the second parameter is interval, the function is the same as the DATE_ADD function. For details, see DATE_ADD. When the second parameter is an integer, the integer is added to the first parameter as a number of days. - - Example: - - ```sql - MogDB=# SELECT ADDDATE('2021-11-12', INTERVAL 1 SECOND); - adddate - --------------------- - 2021-11-12 00:00:01 - (1 row) - - MogDB=# SELECT ADDDATE(time'12:12:12', INTERVAL 1 DAY); - adddate - ---------- - 36:12:12 - (1 row) - - MogDB=# SELECT ADDDATE('2021-11-12', 1); - adddate - ------------ - 2021-11-13 - (1 row) - - MogDB=# SELECT ADDDATE(time'12:12:12', 1); - adddate - ---------- - 36:12:12 - (1 row) - ``` - -- ADDTIME(datetime/time,time) - - Function prototype: - - ``` - time ADDTIME(text expr1, time expr2) - ``` - - Description: This function performs the time addition operation and returns the result of expr1 plus expr2. The expr1 can be in datetime or time format, and expr2 can only be in time format. - - Return value type: same as the type of the first parameter. - - Remarks: - - - Parameter restrictions during MySQL table insertion. - - If the value of the first parameter is in the datetime format and the value exceeds [0000-1-1 00:00:00.000000, 9999-12-31 23:59:59.999999], an error is reported. - - If the value of the first parameter is in the time format and exceeds the time range, an error is reported. - - The value of the second parameter must be in the time format. - - Result restriction during MySQL table insertion. - - If the result is in datetime format and exceeds [0000-1-1 00:00:00.000000, 9999-12-31 23:59:59.999999], an error is reported. If the result is within this range but is less than '0001-1-1 00:00:00.000000', null is returned. - - If the result is in the time format and the value exceeds [-838:59:59, 838:59:59], an error is reported. - - Example: - - ```sql - MogDB=# SELECT ADDTIME('11:22:33','10:20:30'); - addtime - ---------- - 21:43:03 - (1 row) - - MogDB=# SELECT ADDTIME('2020-03-04 11:22:33', '-10:20:30'); - addtime - --------------------- - 2020-03-04 01:02:03 - (1 row) - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-functions-and-operators.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-functions-and-operators.md deleted file mode 100644 index 88d08b30..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-functions-and-operators.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: dolphin Functions and Operators -summary: dolphin Functions and Operators -author: Guo Huan -date: 2023-05-19 ---- - -# Functions and Operators - -+ **[Assignment Operators](dolphin-assignment-operators.md)** -+ **[Character Processing Functions and Operators](dolphin-character-processing-functions-and-operators.md)** -+ **[Arithmetic Functions and Operators](dolphin-arithmetic-functions-and-operators.md)** -+ **[Dolphin Lock](dolphin-b-compatible-database-lock.md)** -+ **[Date and Time Processing Functions and Operators](dolphin-date-and-time-processing-functions-and-operators.md)** -+ **[Advisory Lock Functions](dolphin-advisory-lock-functions.md)** -+ **[Network Address Functions and Operators](dolphin-network-address-functions-and-operators.md)** -+ **[Conditional Expression Functions](dolphin-conditional-expression-functions.md)** -+ **[Aggregate Functions](dolphin-aggregate-functions.md)** -+ **[System Information Functions](dolphin-system-information-functions.md)** -+ **[Logical Operators](dolphin-logical-operators.md)** -+ **[Bit String Functions and Operators](dolphin-bit-string-functions-and-operators.md)** -+ **[JSON-JSONB Functions and Operators](dolphin-json-jsonb-functions-and-operators.md)** -+ **[Type Conversion Functions](dolphin-type-conversion-functions.md)** -+ **[Compatible Operators and Operations](dolphin-compatible-operators-and-operations.md)** -+ **[Comment Operators](dolphin-comment-operators.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-json-jsonb-functions-and-operators.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-json-jsonb-functions-and-operators.md deleted file mode 100644 index 0961cf74..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-json-jsonb-functions-and-operators.md +++ /dev/null @@ -1,646 +0,0 @@ ---- -title: dolphin JSON-JSONB Functions and Operators -summary: dolphin JSON-JSONB Functions and Operators -author: zhang cuiping -date: 2022-10-24 ---- - -# JSON-JSONB Functions and Operators - -**New JSON Functions** - -- json_array([val[, val] …]) - - Description: Inputs variable-length parameters and outputs a JSON array. - - Return type: array-json - - Example: - - ```sql - MogDB=# select json_array(1,'a','b',true,null); - - json_array - --------------------------- - - [1, "a", "b", true, null] - (1 row) - ``` - -- json_object([VARIADIC “any”]) - - Description: Inputs parameters with `key` and `value` that appear alternately. Constructs a JSON object from a variable parameter list. Before using the JSON object, set the GUC parameter **b_compatibility_mode** to **1**. - - Return type: json - - Remarks: - - - All keys in a JSON object are character strings. Therefore, `JSON_OBJECT()` converts `keys` that are not of the character string type to the character string type. To ensure program stability, `keys` of the string type are used. - - The `key` cannot be NULL, and the number of input parameters must be an even number. - - Example: - - ```sql - MogDB=# SET b_compatibility_mode = 1; - MogDB=# SELECT JSON_OBJECT( - MogDB(# 'name', - MogDB(# 'Tim', - MogDB(# 'age', - MogDB(# 20, - MogDB(# 'friend', - MogDB(# JSON_OBJECT('name', 'Jim', 'age', 20), - MogDB(# 'hobby', - MogDB(# JSON_BUILD_ARRAY('games', 'sports') - MogDB(# ) AS object; - - object - ------------------------------------------------------------------------------------------------------ - - {"age" : 20, "name" : "Tim", "hobby" : ["games", "sports"], "friend" : {"age" : 20, "name" : "Jim"}} - (1 row) - - MogDB=# SET b_compatibility_mode = 0; - MogDB=# select json_object('{a,b,"a b c"}', '{a,1,1}'); - json_object - --------------------------------------- - {"a" : "a", "b" : "1", "a b c" : "1"} - (1 row) - ``` - -- json_quote(string) - - Description: Inputs a character string, outputs a JSON file, and quotes the file with double quotation marks. - - Return type: json - - Example: - - ```sql - MogDB=# select json_quote('gauss'); - json_quote - ------------ - "gauss" - (1 row) - ``` - -- json_contains(json, json[, text]) - - Description: The third optional parameter is the `path` of the first JSON parameter. It determines whether the first JSON parameter contains the second JSON parameter. - - Return type: Boolean - - Remarks: - - - If any parameter is NULL, the function returns NULL. - - If the `path` does not exist in JSON, the function returns `NULL`. - - Example: - - ```sql - MogDB=# select json_contains('[1,2,3,4,5]','[3,5]'); - json_contains - --------------- - t - (1 row) - - MogDB=# select json_contains('[1,2,3,4,5]','6'); - json_contains - --------------- - f - (1 row) - - MogDB=# select json_contains('{"a":[null,true,false]}','{"a":false}'); - json_contains - --------------- - t - (1 row) - - MogDB=# select json_contains('{"a":[1,2,3]}','3'); - json_contains - --------------- - f - (1 row) - - MogDB=# select json_contains('{"a":[1,2,3]}','3','$.a'); - json_contains - --------------- - t - (1 row) - - MogDB=# select json_contains('{"a":[1,2,3]}','3','$.b'); - json_contains - --------------- - - (1 row) - ``` - -- json_contains_path(json, text, text[]) - - Description: Determines whether the target JSON parameter contains the input `path` parameter. The second parameter is used to select a mode. - - Return type: Boolean - - Remarks: - - - The second parameter can be `one` or `all`. - - `one` indicates that `true` is returned as long as one `path` exists. Otherwise, `false` is returned. `all` indicates that `true` is returned only when all `paths` exist. Otherwise, `false` is returned. - - - If the first and second parameters are `NULL`, the function returns NULL. - - - If the mode is `one`, `paths` are checked in sequence. If a NULL `path` is prior to any existing `path`, NULL is returned. - - If the mode is `all`, `paths` are checked in sequence. If a NULL `path` is prior to any `path` that does not exist, the function returns NULL. - - Example: - - ```sql - MogDB=# select json_contains_path('{"a": 1, "b": 2, "c": {"d": 4}}', 'one', '$.a', '$.e'); - json_contains_path - -------------------- - t - (1 row) - - MogDB=# select json_contains_path('{"a": 1, "b": 2, "c": {"d": 4}}', 'all', '$.a', '$.b','$."c".d'); - json_contains_path - -------------------- - t - (1 row) - - MogDB=# select json_contains_path('{"a": 1, "b": 2, "c": {"d": [3,4,5]}}', 'one', '$.c.d[3]'); - json_contains_path - -------------------- - f - (1 row) - - MogDB=# select json_contains_path('{"a": 1, "b": 2, "c": {"d": 4}}', 'all', '$.a.d'); - json_contains_path - -------------------- - f - (1 row) - - MogDB=# select json_contains_path('[1,2,3]',null,'$[0]'); - json_contains_path - -------------------- - - (1 row) - - MogDB=# select json_contains_path('[1,2,3]','one','$[0]',null,'$[1]'); - json_contains_path - -------------------- - t - (1 row) - - MogDB=# select json_contains_path('[1,2,3]','one','$[3]',null,'$[1]'); - json_contains_path - -------------------- - - (1 row) - - MogDB=# select json_contains_path('[1,2,3]','all','$[0]',null,'$[1]'); - json_contains_path - -------------------- - - (1 row) - - MogDB=# select json_contains_path('[1,2,3]','all','$[3]',null,'$[1]'); - json_contains_path - -------------------- - f - (1 row) - ``` - -- json_extract(json, VARIADIC text[]) - - Description: Extracts the data specified by the path expression from a JSON file and returns the data. - - Return type: json - - Example: - - ```sql - MogDB=# select json_extract('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}', '$.f4.f6'); - json_extract - -------------- - "stringy" - (1 row) - ``` - -- json_unquote(json_val) - - Description: Removes quotation marks from text, processes escape characters, or discards quotation marks in JSON values. - - Return type: text - - Example: - - ```sql - MogDB=# select json_unquote('"dajifa\\tIMIDF"'); - json_unquote - --------------- - dajifa IMIDF - (1 row) - ``` - -- json_unquote(json_extract(column, path)) - - Description: Removes quotation marks from text, processes escape characters, or discards quotation marks in JSON values. - - Return type: text - - Example: - - ```sql - MogDB=# select json_unquote(json_extract('{"a": "lihua"}', '$.a')); - json_unquote - -------------- - lihua - (1 row) - ``` - -- json_keys(json_doc[, path]) - - Description: Returns the key in the top-level value of a JSON object as a JSON array. If a path parameter is given, the top-level key of the JSON object indicated by the path is returned. - - Return type: json - - Example: - - ```sql - MogDB=# SELECT JSON_KEYS('{"a":123,"b":{"c":"qwe"}}'); - - json_keys - ----------- - - ["a","b"] - (1 row) - ``` - -- json_search(json_doc, one_or_all, search_str [, escape_char [, path] …]) - - Description: One or more path parameters can be input. The location of the target character string in the target file corresponding to the path is returned based on the escape character and `one_or_all` mode. - - Return type: text - - Remarks: - - - If the escape character is of the Boolean type, it is equivalent to NULL. By default, the backslash () is the escape character. You can enter a single-digit integer as the escape character. - - The target JSON file and target character string cannot be empty, and the path cannot be empty. If the path does not exist, an empty value is returned. - - The value of `one_or_all` can only be `one` or `all`. - - You can enter an integer, floating point, or Boolean value in `search_str` for matching. However, only character strings in the target file can be matched. - - Fuzzy match can be used for`search_str`, and wildcard characters can be used for matching in `path`. - - Example: - - ```sql - select json_search('"abc"','one','abc',true); - json_search - ------------- - "$" - (1 row) - - select json_search('"a%c"','all','a1%c',1); - json_search - ------------- - "$" - (1 row) - - select json_search('"abc"','one','abc','&','$',null); - json_search - ------------- - - (1 row) - - select json_search('"1.2"','one',1.2); - json_search - ------------- - "$" - (1 row) - - select json_search('{"a":[{"b":["abc","abc"]},"ac"],"c":["abbc","abcc"]}','all','a%c',null,'$.*[*]'); - json_search - ---------------------------------------------------------- - ["$.a[0].b[0]","$.a[0].b[1]","$.a[1]","$.c[0]","$.c[1]"] - (1 row) - - select json_search(''{"a":[{"b":["abc","abc"]},"ac"],"c":["abbc","abcc"]}','all','a%c',null,'$**[1]'); - json_search - ----------------------------------- - ["$.a[0].b[1]","$.a[1]","$.c[1]"] - (1 row) - ``` - -- json_array_append(json, path, value[, path2, value2] …) - - Description: Modifies a JSON document, appends an element to a specified array node, and returns the modified JSON document. - - Return type: json - - ```sql - MogDB=# select JSON_ARRAY_APPEND('{"name": "Tim", "hobby": "car"}', '$.name', 'food'); - - json_array_append - ------------------------------------------- - - {"name": ["Tim", "food"], "hobby": "car"} - (1 row) - ``` - -- json_append(json_doc, path, val[, path, val] …) Description: The function is the same as that of the `json_array_append` function. - - Return type: json - - Note: The `json_append()` function may be deleted in later versions. You are advised to use the `json_array_append` function. Example: - - ```sql - MogDB=# select JSON_ARRAY_APPEND('{"name": "Tim", "hobby": "car"}', '$.name', 'food'); - - json_array_append - ------------------------------------------- - - {"name": ["Tim", "food"], "hobby": "car"} - (1 row) - ``` - -- json_array_insert(json, path, value[, path2, value2] …) - - Description: This function is used to modify a JSON document. It inserts a value into a specified position in a specified array in the JSON document and returns a new JSON document. - - Return type: json - - Remarks: - - - If the array element indicated by the path expression exceeds the length of the array, the new element is inserted at the end of the array. - - If the JSON document or path is NULL, this function returns NULL. - - Example: - - ```sql - select json_array_insert('[1, [2, 3], {"a": [4, 5]}]', '$[0]', 0); - json_array_insert - ------------------------------- - [0, 1, [2, 3], {"a": [4, 5]}] - (1 row) - - - select json_array_insert('[1, [2, 3], {"a": [4, 5]}]', '$[9]', 4); - json_array_insert - ------------------------------- - [1, [2, 3], {"a": [4, 5]}, 4] - (1 row) - ``` - -- json_insert(VARIADIC “any”) - - Description: Inserts data into a JSON document and returns a new JSON document. - - Return type: json - - Remarks: When the JSON document or `path` is empty, an empty value is returned. - - Example: - - ```sql - MogDB=# select json_extract('{"x": 1}','$.y', true); - json_extract - ---------------------------- - {"x": 1, "y": true} - (1 row) - ``` - -- json_merge(json_doc, json_doc[, json_doc] …) - - Description: The function is the same as that of the `json_merge_preserve` function. - - Return type: json - - Note: The `JSON_MERGE()` function may be deleted in later versions. You are advised to use the `json_merge_preserve` function. - - Example: - - ```sql - MogDB=# select json_merge('"MogDB"', '[[1,2],3,"test"]'); - json_merge - ---------------------------------- - ["MogDB", [1, 2], 3, "test"] - (1 row) - ``` - -- json_merge_preserve(json_doc, json_doc[, json_doc] …) - - Description: Combines two or more JSON files with the same key-value into one array. - - Return type: json - - Remarks: - - - If the value of any parameter is NULL, NULL is returned. - - Merging specifications: - - If one of the two adjacent JSON parameters is a `scalar` or object and the other is an array, the `scalar` or object is added to an array parameter as an array element based on the parameter sequence to combine the scalar or object into a single array. - - If two adjacent JSON parameters are both `scalars` or objects, combine `scalars` or objects into a single array based on the parameter sequence. - - If two adjacent JSON parameters are both arrays, combine elements of two arrays into a single array based on the parameter sequence. - - If two adjacent JSON parameters are both objects, combine the members of two objects into a single object based on the `key` sequence. - - The returned values of object members after JSON combination comply with the `key` sequence. - - Example: - - ```sql - MogDB=# select json_merge_preserve('{"a":"abc"}', '[1,true,null]'); - json_merge_preserve - ------------------------------- - [{"a": "abc"}, 1, true, null] - (1 row) - - MogDB=# select json_merge_preserve('1', '"b"', 'true'); - json_merge_preserve - --------------------- - [1, "b", true] - (1 row) - - MogDB=# select json_merge_preserve('[1,{"a":"abc"}]', '["b",false]'); - json_merge_preserve - ------------------------------- - [1, {"a": "abc"}, "b", false] - (1 row) - - MogDB=# select json_merge_preserve('{"b":"abc"}', '{"a":"jks"}'); - json_merge_preserve - -------------------------- - {"a": "jks", "b": "abc"} - (1 row) - - MogDB=# select json_merge_preserve(NULL, '1'); - json_merge_preserve - --------------------- - - (1 row) - ``` - -- json_merge_patch(json_doc, json_doc[, json_doc] …) - - Description: Combines two or more JSON objects and retains the JSON object key members of the same key value. - - Return type: json - - Remarks: - - - If any parameter is NULL, the merge result of the previous parameter and this parameter is NULL. - - If the parameter following the NULL parameter is not NULL: - - The following parameters are arrays and scalar. The merge result is the following parameters. - - The following parameters are objects, and the merge result is NULL. - - Merging specifications: - - If two adjacent JSON parameters are objects, the combination result is a single object. - - If a member key of a JSON object is not duplicate in another JSON object, the member is retained in the merge result. - - If a member key of a JSON object is duplicate in the next JSON object, the duplicate key member in the latter JSON object is retained in the merge result. In particular, when the `value` of the object member corresponding to the next same key is NULL, the key member is deleted from the result. - - If one of the two adjacent JSON parameters is not an object, the merge result is the second JSON parameter. - - If any parameter is NULL, the merge result of the previous parameter and this parameter is NULL. - - If the parameter following the NULL parameter is not NULL: - - The latter parameter is an array or `scalar`. The merge result is the latter parameter itself. - - The latter parameter is an object. The merge result is NULL. - - Example: - - ```sql - MogDB=# select json_merge_patch('{"a":1}', '{"b":2}'); - json_merge_patch - ------------------ - {"a": 1, "b": 2} - (1 row) - - MogDB=# select json_merge_patch('{"a":1}', '{"a":2}'); - json_merge_patch - ------------------ - {"a": 2} - (1 row) - - MogDB=# select json_merge_patch('{"a":{"b":"abc"}}', '{"a":{"b":null}}'); - json_merge_patch - ------------------ - {"a": {}} - (1 row) - - MogDB=# select json_merge_patch('{"a":1}', 'true'); - json_merge_patch - ------------------ - true - (1 row) - - MogDB=# select json_merge_patch('{"a":1}', NULL); - json_merge_patch - ------------------ - - (1 row) - - MogDB=# select json_merge_patch(NULL, '{"a":1}'); - json_merge_patch - ------------------ - - (1 row) - - MogDB=# select json_merge_patch(NULL, '[1,2,3]'); - json_merge_patch - ------------------ - [1, 2, 3] - (1 row) - ``` - -- json_remove(json, path[, path] …) - - Description: Deletes the JSON object specified by the path from a JSON document and returns the modified JSON document. - - Return type: json - - Remarks: - - - Multiple path expressions can be provided for deletion through parameters. Multiple path parameters are executed from left to right. When the next parameter is executed, the JSON document may have changed. - - If the specified path does not exist in JSON, this function returns the original document. - - If the JSON document or path is NULL, this function returns NULL. - - Example: - - ```sql - SELECT JSON_REMOVE('[0, 1, 2, [3, 4]]', '$[0]', '$[2]'); - json_remove - ------------- - [1, 2] - (1 row) - SELECT JSON_REMOVE('{"x": 1, "y": 2}', '$.x'); - json_remove - ------------- - {"y": 2} - (1 row) - SELECT JSON_REMOVE('{"x": {"z":2,"a":3}, "y": 2}', NULL); - json_remove - ------------- - - (1 row) - SELECT JSON_REMOVE(NULL, '$.x.z'); - json_remove - ------------- - - (1 row) - ``` - -- json_replace([VARIADIC “any”]) - - Description: Replaces existing data in a JSON document and returns a new JSON document. The first parameter is a JSON document, followed by an alternate path and replacement value. - - Return type: json - - Example: - - ```sql - MogDB=# select json_replace('{"a": 1, "b": 2, "c": 3}', '$.b', 9); - json_replace - ------------------- - {"a": 1, "b": 9, "c": 3} - (1 row) - ``` - -- json_set(json_doc, path, val[, path, val] …)) - - Description: Enters the JSON file, path, and key value to replace the key value corresponding to the existing path in the JSON file. For a new path, inserts the corresponding key value. - - Return type: json - - Example: - - ```sql - MogDB=# select json_set('{"student":{"id":1,"gender":"man"}}','$.age',23,'$.student.id',3); - json_set - ---------------------------------------------- - {"age":23,"student":{"id":3,"gender":"man"}} - (1 row) - ``` - -- json_depth(json) - - Description: Returns the maximum depth of a JSON document. - - Return type: integer - - Remarks: - - - The depth of an empty array, empty object, or scalar value is 1. - - Only arrays whose depth is 1 or objects whose depth is 2 are contained. - - The maximum depth of a JSON node is equal to the maximum depth of all its subnodes. - - Example: - - ```sql - MogDB=# SELECT JSON_DEPTH('{}'), JSON_DEPTH('[]'), JSON_DEPTH('true'); - json_depth | json_depth | json_depth - ------------+------------+------------ - 1 | 1 | 1 - (1 row) - MogDB=# SELECT JSON_DEPTH('[10, 20]'), JSON_DEPTH('[[], {}]'); - json_depth | json_depth - ------------+------------ - 2 | 2 - (1 row) - MogDB=# SELECT JSON_DEPTH('[10, {"a": 20}]'); - json_depth - ------------ - 3 - (1 row) - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-logical-operators.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-logical-operators.md deleted file mode 100644 index 3f14c3f8..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-logical-operators.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: dolphin Logical Operators -summary: dolphin Logical Operators -author: zhang cuiping -date: 2022-10-24 ---- - -# Logical Operators - -Compared with the original MogDB, Dolphin has two new logical operators: - -1. The **&&** operator is added. -2. The **||** operator is added. - -- && - - Description: When **b_compatibility_mode** is set to **TRUE**, it indicates the logical AND operation. The supported types include Boolean, time, date, integer, floating point, and bit string. The truth table is as follows: - - | a | b | Result of a && b | - | ----- | ----- | ---------------- | - | TRUE | TRUE | TRUE | - | TRUE | FALSE | FALSE | - | TRUE | NULL | NULL | - | FLASE | FALSE | FALSE | - | FALSE | NULL | FALSE | - | NULL | NULL | NULL | - - The following table describes the processing of different input types. - - | Data Type | Processing Method | - | -------------- | ------------------------------------------------------------ | - | Boolean | The logical AND operation is performed according to the truth table. | - | Integer | Only the zero value is converted to Boolean false, and other values are converted to Boolean true. Then, the logical AND operation is performed. | - | Floating point | Only zero is converted to Boolean false, and other values are converted to Boolean true. Then, the logical AND operation is performed. | - | Bit string | Only the all-0 value is converted to Boolean false, and other values are converted to Boolean true. Then, the logical AND operation is performed. | - | Time | The conversion mode of the time type depends only on the hour part. When '00:xx:xx' is entered, the time type is converted to Boolean false. When 'yy:xx:xx' is entered, if **yy** is not 0, the time type is converted to Boolean true. Then, the logical AND operation is performed. | - | Date | The conversion mode of the date type depends only on the year part. When '0000-xx-xx' is entered, the date type is converted to Boolean false. When 'yyyy-xx-xx' is entered, if **yy** is not 0, the date type is converted to Boolean true. Then, the logical AND operation is performed. | - - Return type: Boolean - - Example: - - ```SQL - MogDB=# SELECT 1 && 1; - ?column? - ---------- - t - (1 row) - ``` - -- || - - Description: When **sql_mode** is not set to **'pipes_as_concat'**, it indicates a logical OR operation. The supported types include Boolean, time, date, integer, floating point, and bit string. The truth table is as follows: - - | a | b | Result of a \|\| b | - | ----- | ----- | ------------------ | - | TRUE | TRUE | TRUE | - | TRUE | FALSE | TRUE | - | TRUE | NULL | TRUE | - | FLASE | FALSE | FALSE | - | FALSE | NULL | NULL | - | NULL | NULL | NULL | - - The following table describes the processing of different input types. - - | Data Type | Processing Method | - | -------------- | ------------------------------------------------------------ | - | Boolean | The logical OR operation is performed according to the truth table. | - | Integer | Only the zero value is converted to Boolean false, and other values are converted to Boolean true. Then, the logical OR operation is performed. | - | Floating point | Only the zero value is converted to Boolean false, and other values are converted to Boolean true. Then, the logical OR operation is performed. | - | Bit string | Only the all-0 value is converted to Boolean false, and other values are converted to Boolean true. Then, the logical OR operation is performed. | - | Time | The conversion mode of the time type depends only on the hour part. When '00:xx:xx' is entered, the time type is converted to Boolean false. When 'yy:xx:xx' is entered, if **yy** is not 0, the time type is converted to Boolean true. Then, the logical OR operation is performed. | - | Date | The conversion mode of the date type depends only on the year part. When '0000-xx-xx' is entered, the date type is converted to Boolean false. When 'yyyy-xx-xx' is entered, if **yy** is not 0, the date type is converted to Boolean true. Then, the logical OR operation is performed. | - - Return type: Boolean - - Example: - - ```SQL - MogDB=# SELECT 0 || 0; - ?column? - ---------- - f - (1 row) - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-network-address-functions-and-operators.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-network-address-functions-and-operators.md deleted file mode 100644 index 3bdd00ea..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-network-address-functions-and-operators.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: dolphin Network Address Functions and Operators -summary: dolphin Network Address Functions and Operators -author: zhang cuiping -date: 2022-10-24 ---- - -# Network Address Functions and Operators - -MogDB provides network-related functions. - -- is_ipv4(string) - - Description: Determines whether an IPv4 address is used. - - Return type: int - - Example: - - ```sql - MogDB=# select is_ipv4('192.168.0.1'); - is_ipv4 - --------- - 1 - (1 row) - MogDB=# select is_ipv4('192.168.0.1'::inet); - is_ipv4 - --------- - 1 - (1 row) - ``` - -- is_ipv6(string) - - Description: Determines whether an IPv6 address is used. - - Return type: int - - Example: - - ```sql - MogDB=# select is_ipv6('2403:A200:A200:0:AFFF::3'); - is_ipv6 - --------- - 1 - (1 row) - MogDB=# select is_ipv6('2403:A200:A200:0:AFFF::3'::inet); - is_ipv6 - --------- - 1 - (1 row) - ``` - -- inet_aton(text) - - Description: This function takes the dotted decimal notation of an IPv4 address as a string and returns the value of the given IP address as an integer. If the input address is not a valid IPv4 address or an unrecognized expression, this function returns NULL. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT INET_ATON('10.0.5.9'); - inet_aton - ----------- - 167773449 - (1 row) - ``` - -- inet_ntoa(int) - - Description: Converts an integer network host address in network byte order into a string in dotted decimal notation (for example, 127.0.0.1). If the input integer cannot be converted to a valid address, this function returns NULL. - - Return type: text - - Example: - - ```sql - MogDB=# SELECT INET_NTOA(167773449); - inet_ntoa - ----------- - 10.0.5.9 - (1 row) - ``` - -- inet6_aton(text) - - Description: This function takes the given IPv6 or IPv4 network address as a string and returns a binary string representing the numeric value of the network byte order (big-endian) address. Because an IPv6 address in numeric format requires more bytes than the maximum integer type, the value returned by this function has two lengths: - - 1. The length of the returned character string for an IPv6 address is 16 characters. - 2. The length of the returned character string for an IPv4 address is 4 characters. - If the parameter is not a valid address or is NULL, INET6_ATON() returns NULL. - - Return type: bytea - - Example: - - ```sql - MogDB=# SELECT HEX(INET6_ATON('fdfe::5a55:caff:fefa:9089')); - hex - ---------------------------------- - fdfe0000000000005a55cafffefa9089 - (1 row) - MogDB=# SELECT HEX(INET6_ATON('10.0.5.9')); - hex - ---------- - 0a000509 - (1 row) - ``` - -- inet6_ntoa(bytea) - - Description: This function in MySQL returns the given IPv6 or IPv4 network address represented as a binary string in numeric format as a delimiter-separated string. If the parameter is not a valid address or it is NULL, INET6_NTOA() returns NULL. - - Return type: text - - Example: - - ```sql - MogDB=# SELECT INET6_NTOA(INET6_ATON('fdfe::5a55:caff:fefa:9089')); - inet6_ntoa - --------------------------- - fdfe::5a55:caff:fefa:9089 - (1 row) - MogDB=# SELECT INET6_NTOA(INET6_ATON('10.0.5.9')); - inet6_ntoa - ------------ - 10.0.5.9 - (1 row) - ``` - -- is_ipv4_compat(bytea) - - Description: This function accepts an IPv6 address of a binary string represented as a number, as returned by INET6_ATON(). If the parameter is a valid IPv4-compatible IPv6 address, 1 is returned. Otherwise, 0 is returned. The format of an IPv4-compatible address is ::ipv4_address. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT IS_IPV4_COMPAT(INET6_ATON('::10.0.5.9')); - is_ipv4_compat - ---------------- - 1 - (1 row) - MogDB=# SELECT IS_IPV4_COMPAT(INET6_ATON('::ffff:10.0.5.9')); - is_ipv4_compat - ---------------- - 0 - (1 row) - ``` - -- is_ipv4_mapped(bytea) - - Description: This function accepts an IPv6 address of a binary string represented as a number, as returned by INET6_ATON(). If the parameter is a valid IPv4-mapped IPv6 address, 1 is returned. Otherwise, 0 is returned. The IPv4-mapped address format is ::ffff:ipv4_address. - - Return type: int - - Example: - - ```sql - MogDB=# SELECT IS_IPV4_MAPPED(INET6_ATON('::10.0.5.9')); - is_ipv4_mapped - ---------------- - 0 - (1 row) - - MogDB=# SELECT IS_IPV4_MAPPED(INET6_ATON('::ffff:10.0.5.9')); - is_ipv4_mapped - ---------------- - 1 - (1 row) - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-system-information-functions.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-system-information-functions.md deleted file mode 100644 index ffc16992..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-system-information-functions.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: dolphin System Information Functions -summary: dolphin System Information Functions -author: zhang cuiping -date: 2022-10-24 ---- - -# System Information Functions - -## Dolphin Session Information Functions - -- database() - - Description: Name of the current database schema. - - Return type: name - - Example: - - ```sql - MogDB=# SELECT database(); - database - ---------- - public - (1 row) - ``` - - Note: **database** returns the first valid schema name in the search path. (If the search path is empty or contains no valid schema name, NULL is returned.) This is the schema that will be used for any tables or other named objects that are created without specifying a target schema. - -- uuid_short() - - Description: uuid_short information of the current database. - - Return type: int - - Example: - - ```sql - MogDB=# select uuid_short(); - uuid_short - ------------------ - 3939644819374082 - (1 row) - ``` - -- dolphin_version() - - Description: Dolphin version information It returns a string describing the version information of the Dolphin plug-in. - - Return type: text - - Example: - - ```sql - MogDB=# SELECT dolphin_version(); - dolphin_version - ------------------------ - dolphin build 511401b6 - (1 row) - ``` - -- dolphin_types() - - Description: Dolphin new type information. Returns a two-dimensional array of strings describing Dolphin's new type information, the information in each array is in order: type name, whether precision is supported, whether range is supported. - - Return type: text\[][] - - Example: - - ```sql - MogDB=# SELECT dolphin_types(); - dolphin_types - ---------------------------------------------------------------------------------------------------------------------------------------------- - {{uint1,false,false},{uint2,false,false},{uint4,false,false},{uint8,false,false},{year,true,false},{binary,true,false},{varbinary,true,false},{tinyblob,false,false},{mediumblob,false,false},{longblob,false,false},{set,false,false},{enum,false,false}} - (1 row) - ``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-type-conversion-functions.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-type-conversion-functions.md deleted file mode 100644 index d4ad4f8e..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/functions-and-operators/dolphin-type-conversion-functions.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: dolphin Type Conversion Functions -summary: dolphin Type Conversion Functions -author: zhang cuiping -date: 2022-10-24 ---- - -# Type Conversion Functions - -## Type Conversion Functions - -- cast(x as y) - - Description: Converts x into the type specified by y. If the target data type is char, the data type is converted to varchar in the case of **dolphin.b_compatibility_mode=on**. Otherwise, the data type is still char. - - Example: - - ```sql - MogDB=# set dolphin.b_compatibility_mode=on; - - MogDB=# SELECT cast('abc' as char(10)); - varchar - --------- - abc - (1 row) - - MogDB=# SELECT dolphin.b_compatibility_mode=off; - - MogDB=# SELECT cast('abc' as char(10)); - bpchar - ------------ - abc - (1 row) - ``` - -In subsequent development, the extended CAST function includes the conversion from money to unsigned and from timestamp to unsigned. - -```sql -CREATE CAST (timestamp AS uint8) WITH FUNCTION timestamp_uint8(timestamp) AS ASSIGNMENT; -CREATE CAST (money AS uint8) WITH FUNCTION cash_uint(money) AS ASSIGNMENT; -``` - -## Compatibility - -The CREATE CAST instruction complies with the SQL standard. Except that the SQL does not have extra parameters that can be forcibly converted to binary types or implement functions. - -```sql -MogDB=# SELECT CAST('$2'::money as unsigned); - uint8 - ------- - 2 - (1 row) -MogDB=# SELECT CAST(CURRENT_TIMESTAMP::TIMESTAMP AS UNSIGNED); - current_timestamp - ------------------- - 20230103023621 - (1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-database.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-database.md deleted file mode 100644 index ec06dea2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-database.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: dolphin ALTER DATABASE -summary: dolphin ALTER DATABASE -author: Guo Huan -date: 2023-05-15 ---- - -# ALTER DATABASE - -## Function - -**ALTER DATABASE** modifies a database, including its name, owner, connection limitation, and object isolation. - -Modifies the attributes of the schema. Meaning for the mode only when the default character set and character order are modified. - -## Precautions - -Compared to the original MogDB, dolphin's changes to the ALTER DATABASE syntax are: - -- Add modifiable items [ [DEFAULT] CHARACTER SET | CHARSET [ = ] default_charset ] [ [DEFAULT] COLLATE [ = ] default_collation ]. - -## Syntax - -- Modifying the default character set and character order of SCHEMA - - ``` - ALTER DATABASE schema_name [ [DEFAULT] CHARACTER SET | CHARSET [ = ] default_charset ] [ [DEFAULT] COLLATE [ = ] default_collation ]; - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**Note**: - > - > - With B compatibility, this syntax is only supported if dolphin.b_compatibility_mode is on. - > - When you use this syntax, the syntax is equivalent to ALTER SCHEMA. - -## Parameter Description - -- **schema_name** - - The name of the database where the attribute needs to be modified. - - Value range: string, to conform to the naming convention of the identifier. - -- **[ [DEFAULT] CHARACTER SET | CHARSET [ = ] default_charset ]** - - Specifies the default character set for the pattern. When specified alone, the default character order of the pattern is set to the default character order of the specified character set. - -- **[ [DEFAULT] COLLATE [ = ] default_collation ]** - - Specifies the default character order of the pattern. When specified alone, the default character set of the pattern is set to the character set corresponding to the specified character order. - -## Examples - -See [Examples](dolphin-create-database.md#Examples) for CREATE DATABASE. - -## Helpful Links - -[CREATE DATABASE](dolphin-create-database.md), [DROP DATABASE](dolphin-drop-database.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-function.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-function.md deleted file mode 100644 index 2d1c84f4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-function.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: dolphin ALTER FUNCTION -summary: dolphin ALTER FUNCTION -author: zhang cuiping -date: 2022-10-24 ---- - -# ALTER FUNCTION - -## Function Description - -Modifies the attributes of a user-defined function. - -## Precautions - -Compared with the original MogDB, Dolphin modifies the ALTER PROCEDURE syntax as follows: - -1. The modifiable LANGUAGE option is added. -2. The modifiable item { CONTAINS SQL | NO SQL | READS SQL DATA | MODIFIES SQL DATA } is added. -3. The modifiable item SQL SECURITY { DEFINER | INVOKER } is added. - -## Syntax - -- Modify the additional parameter of the customized function. - - ``` - ALTER FUNCTION function_name ( [ { [ argname ] [ argmode ] argtype} [, ...] ] ) - action [ ... ] [ RESTRICT ]; - ``` - - The syntax of the **action** clause is as follows: - - ``` - {CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT} - | {IMMUTABLE | STABLE | VOLATILE} - | {SHIPPABLE | NOT SHIPPABLE} - | {NOT FENCED | FENCED} - | [ NOT ] LEAKPROOF - | { [ EXTERNAL|SQL ] SECURITY INVOKER | [ EXTERNAL|SQL ] SECURITY DEFINER } - | AUTHID { DEFINER | CURRENT_USER } - | COST execution_cost - | ROWS result_rows - | SET configuration_parameter { { TO | = } { value | DEFAULT }| FROM CURRENT} - | RESET {configuration_parameter | ALL} - | COMMENT 'text' - | LANGUAGE lang_name - | { CONTAINS SQL | NO SQL | READS SQL DATA | MODIFIES SQL DATA } - ``` - -## Parameter Description - -- **LANGUAGE lang_name** - - Name of the language used to implement the function. This parameter is compatible only with the syntax and has no actual effect. - -- **SQL SECURITY INVOKER** - - Specifies that the function is to be executed with the permissions of the user that calls it. This parameter can be omitted. - - The functions of SQL SECURITY INVOKER and SECURITY INVOKER and AUTHID CURRENT_USER are the same. - -- **SQL SECURITY DEFINER** - - Specifies that the function is to be executed with the privileges of the user that created it. - - The functions of SQL SECURITY DEFINER and AUTHID DEFINER and SECURITY DEFINER are the same. - -- **CONTAINS SQL** | **NO SQL** | **READS SQL DATA** | **MODIFIES SQL DATA** - - Syntax compatibility item. - -## Example - -```sql - --Specify NO SQL. - MogDB=# ALTER FUNCTION f1 (s char(20)) NO SQL; - - --Specify CONTAINS SQL. - MogDB=# ALTER FUNCTION f1 (s char(20)) CONTAINS SQL; - - --Specify LANGUAGE SQL. - MogDB=# ALTER FUNCTION f1 (s char(20)) LANGUAGE SQL ; - - --Specify MODIFIES SQL DATA. - MogDB=# ALTER FUNCTION f1 (s char(20)) MODIFIES SQL DATA; - - --Specify READS SQL DATA. - MogDB=# ALTER FUNCTION f1 (s char(20)) READS SQL DATA; - - --Specify SECURITY INVOKER. - MogDB=# ALTER FUNCTION f1 (s char(20)) SQL SECURITY INVOKER; - - --Specify SECURITY DEFINER. - MogDB=# ALTER FUNCTION f1 (s char(20)) SQL SECURITY DEFINER; -``` - -## Helpful Links - -[ALTER FUNCTION](../../../../../../reference-guide/sql-syntax/ALTER-FUNCTION.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-procedure.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-procedure.md deleted file mode 100644 index f80c6dfb..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-procedure.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: dolphin ALTER PROCEDURE -summary: dolphin ALTER PROCEDURE -author: zhang cuiping -date: 2022-10-24 ---- - -# ALTER PROCEDURE - -## Function Description - -Alters the attributes of a customized stored procedure. - -## Precautions - -Compared with the original MogDB, Dolphin modifies the ALTER PROCEDURE syntax as follows: - -1. The modifiable LANGUAGE option is added. -2. The modifiable item { CONTAINS SQL | NO SQL | READS SQL DATA | MODIFIES SQL DATA } is added. -3. The modifiable item SQL SECURITY { DEFINER | INVOKER } is added. - -## Syntax - -- Modify the additional parameters of the customized stored procedure. - - ``` - ALTER PROCEDURE procedure_name ( [ { [ argname ] [ argmode ] argtype} [, ...] ] ) - action [ ... ] [ RESTRICT ]; - ``` - - 其中附加参数action子句语法为。 - - ``` - {CALLED ON NULL INPUT | STRICT} - | {IMMUTABLE | STABLE | VOLATILE} - | {SHIPPABLE | NOT SHIPPABLE} - | {NOT FENCED | FENCED} - | [ NOT ] LEAKPROOF - | { [ EXTERNAL ] SECURITY INVOKER | [ EXTERNAL ] SECURITY DEFINER } - | AUTHID { DEFINER | CURRENT_USER } - | COST execution_cost - | ROWS result_rows - | SET configuration_parameter { { TO | = } { value | DEFAULT }| FROM CURRENT} - | RESET {configuration_parameter | ALL} - | COMMENT 'text' - | LANGUAGE lang_name - | { CONTAINS SQL | NO SQL | READS SQL DATA | MODIFIES SQL DATA } - ``` - -## Parameter Description - -- **LANGUAGE lang_name** - - Name of the language used to implement the stored procedure. This parameter is compatible only with the syntax and has no actual effect. - -- **SQL SECURITY INVOKER** - - Specifies that the stored procedure is to be executed with the permissions of the user that calls it. This parameter can be omitted. - - The functions of SQL SECURITY INVOKER and SECURITY INVOKER and AUTHID CURRENT_USER are the same. - -- **SQL SECURITY DEFINER** - - Specifies that the stored procedure is to be executed with the privileges of the user that created it. - - The functions of SQL SECURITY DEFINER and AUTHID DEFINER and SECURITY DEFINER are the same. - -- **CONTAINS SQL** | **NO SQL** | **READS SQL DATA** | **MODIFIES SQL DATA** - - Syntax compatibility item. - -## Example - -```sql - --Specify NO SQL. - MogDB=# ALTER PROCEDURE proc1() NO SQL; - - --Specify CONTAINS SQL. - MogDB=# ALTER PROCEDURE proc1() CONTAINS SQL; - - --Specify LANGUAGE SQL. - MogDB=# ALTER PROCEDURE proc1() CONTAINS SQL LANGUAGE SQL ; - - --Specify MODIFIES SQL DATA. - MogDB=# ALTER PROCEDURE proc1() CONTAINS SQL MODIFIES SQL DATA; - - --Specify SECURITY INVOKER. - MogDB=# ALTER PROCEDURE proc1() SQL SECURITY INVOKER; -``` - -## Helpful Links - -[ALTER PROCEDURE](../../../../../../reference-guide/sql-syntax/ALTER-PROCEDURE.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-server.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-server.md deleted file mode 100644 index 1badfa6f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-server.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: dolphin ALTER SERVER -summary: dolphin ALTER SERVER -author: Guo Huan -date: 2023-05-15 ---- - -# ALTER SERVER - -## Function - -Adds, modifies, and deletes parameters for an existing server. Existing servers can be queried from the pg_foreign_server system table. - -## Precautions - -- This section contains only the syntax added to dolphin, the syntax of the original MogDB has not been removed or modified. -- Compared to the original MogDB, dolphin's changes to the `ALTER SERVER` syntax are mainly: - 1. Add optional OPTIONS: DATABASE, USER, PASSWORD, SOCKET, OWNER for modified servers whose fdw_name is mysql_fdw. - 2. For a modified server whose fdw_name is mysql_fdw, if the option does not specify an execution action and the server's option already exists, the action of this statement is changed to SET. - -## Syntax - -- Modify the parameters of the external server. - - ``` - ALTER SERVER server_name [ VERSION 'new_version' ] - [ OPTIONS ( {[ ADD | SET | DROP ] option ['value']} [, ... ] ) ]; - ``` - -- Modify the name of the external server. - - ``` - ALTER SERVER server_name - RENAME TO new_name; - ``` - -## Parameter Description - -- **OPTIONS** - - Changes the options for this server.ADD, SET, and DROP specify the action to be performed. If no action is explicitly specified, ADD is assumed. option names must be unique, and names and values are validated using the external data wrapper library for that server. - - - The options supported by mysql\_fdw include: - - - **host** (default value is 127.0.0.1) - - The IP address of MySQL Server/MariaDB. - - - **port** (default value is 3306) - - The port number on which MySQL Server/MariaDB listens. - - - **user** (default value is NULL) - - The username that MySQL Server/MariaDB uses to connect.If OPTIONS specifies this option and no user mapping exists for the current user to the given server, MogDB automatically creates a user mapping for the current user to the newly created server; if OPTIONS specifies this option and a user mapping already exists for the current user to the given server, MogDB modifies the corresponding OPTION value for that user mapping. - - - **password** (default value is NULL) - - The user password that MySQL Server/MariaDB uses to connect. If OPTIONS specifies this option and no user mapping exists for the current user to the given server, MogDB automatically creates a user mapping for the current user to the newly created server; if OPTIONS specifies this option and a user mapping already exists for the current user to the given server, MogDB modifies the corresponding OPTION value for that user mapping. - - - **database** (default value is NULL) - - No real meaning, only for syntax compatibility. Specify the database to which MySQL Server/MariaDB connects in [CREATE FOREIGN TABLE](../../../../../../reference-guide/sql-syntax/CREATE-FOREIGN-TABLE.md) or [ALTER FOREIGN TABLE](../../../../../../reference-guide/sql-syntax/ALTER-FOREIGN-TABLE.md). - - - **owner** (default value is NULL) - - No real meaning, only for syntax compatibility. - - - **socket** (default value is NULL) - - No real meaning, only for syntax compatibility. - -## Examples - -Modify the server. - -```sql --- When the user mapping from the current user to the given server does not exist -MogDB=# alter server server_test options(user 'my_user', password 'mypassword'); -WARNING: USER MAPPING for current user to server server_test created. -ALTER SERVER - --- When the user mapping from the current user to the given server already exists. -MogDB=# alter server server_test options(port '3308', user 'my_user'); -WARNING: USER MAPPING for current user to server server_test altered. -ALTER SERVER -``` - -## Helpful Links - -[CREATE SERVER](dolphin-create-server.md), [DROP SERVER](../../../../../../reference-guide/sql-syntax/DROP-SERVER.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table-partition.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table-partition.md deleted file mode 100644 index 32bfd521..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table-partition.md +++ /dev/null @@ -1,377 +0,0 @@ ---- -title: dolphin ALTER TABLE PARTITION -summary: dolphin ALTER TABLE PARTITION -author: zhang cuiping -date: 2022-10-24 ---- - -# ALTER TABLE PARTITION - -## Function - -**ALTER TABLE PARTITION** modifies table partitions, including adding, deleting, splitting, merging partitions, and altering partition attributes. - -Compared with the kernel syntax, the rebuild, remove, check, repair, optimize, truncate, analyze, exchange of Dolphin is modified in B compatibility mode. - -## Precautions - -- The tablespace of the added partition cannot be **PG_GLOBAL**. -- The name of the added partition must be different from the names of existing partitions in the partitioned table. -- The key value of the added partition must be consistent with the type of partition keys in the partitioned table. -- If a range partition is added, the key value of the added partition must be greater than the upper limit of the last range partition in the partitioned table. -- If a list partition is added, the key value of the added partition cannot be the same as that of an existing partition. -- Hash partitions cannot be added. -- If the number of partitions in the target partitioned table has reached the maximum (**1048575**), partitions cannot be added. -- If a partitioned table has only one partition, the partition cannot be deleted. -- Use **PARTITION FOR()** to choose partitions. The number of specified values in the brackets should be the same as the column number in customized partitions, and they must be consistent. -- The **Value** partitioned table does not support the **Alter Partition** operation. -- Column-store tables and row-store tables do not support partition splitting. -- Partitions cannot be added to an interval partitioned table. -- Hash partitioned tables do not support splitting, combination, addition, and deletion of partitions. -- List partitioned tables do not support partition splitting or partition combination. -- Only the owner of a partitioned table or users granted with the **ALTER** permission on the partitioned table can run the **ALTER TABLE PARTITION** command. The system administrator has the permission to run the command by default. - -## Syntax - -- Modify the syntax of the table partition. - - ``` - ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )} - action [, ... ]; - ``` - - **action** indicates the following clauses for maintaining partitions. For partition continuity, when multiple clauses are used for partition maintenance, MogDB performs **DROP PARTITION** and then **ADD PARTITION**, and finally runs the rest clauses in sequence. - - ``` - move_clause | - exchange_clause | - row_clause | - merge_clause | - modify_clause | - split_clause | - add_clause | - drop_clause | - truncate_clause | - rebuild_clause | - remove_clause | - repair_clause | - check_clause | - optimize_clause - ``` - - - The **move_clause** syntax is used to move the partition to a new tablespace. - - ``` - MOVE PARTITION { partion_name | FOR ( partition_value [, ...] ) } TABLESPACE tablespacename - ``` - - - The **exchange_clause** syntax is used to move the data from an ordinary table to a specified partition. - - ``` - EXCHANGE PARTITION { ( partition_name ) | FOR ( partition_value [, ...] ) } - WITH TABLE {[ ONLY ] ordinary_table_name | ordinary_table_name * | ONLY ( ordinary_table_name )} - [ { WITH | WITHOUT } VALIDATION ] [ VERBOSE ] [ UPDATE GLOBAL INDEX ] - ``` - - The ordinary table and partition whose data is to be exchanged must meet the following requirements: - - - The number of columns of the ordinary table is the same as that of the partition, and their information should be consistent, including: column name, data type, constraint, collation information, storage parameter, and compression information. - - The compressed information of the ordinary table and partitioned table should be consistent. - - The number and information of indexes of the ordinary table and partition should be consistent. - - The number and information of constraints of the ordinary table and partition should be consistent. - - An ordinary table cannot be a temporary table. A partitioned table can only be a range partitioned table, list partitioned table, or hash partitioned table. - - Ordinary tables and partitioned tables do not support dynamic data masking and row-level access control constraints. - - List partitioned tables and hash partitioned tables cannot be column-store. - - List, hash, and range partitioned tables support exchange_clause. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif)**NOTICE:** - > - > - When the exchange is done, the data and tablespace of the ordinary table and partition are exchanged. The statistics about ordinary tables and partitions become unreliable, and they should be analyzed again. - > - A non-partition key cannot be used to create a local unique index. Therefore, if an ordinary table contains a unique index, data cannot be exchanged. - -- The **row_clause** syntax is used to set row movement of a partitioned table. - - ``` - { ENABLE | DISABLE } ROW MOVEMENT - ``` - -- The **merge_clause** syntax is used to merge partitions into one. - - ``` - MERGE PARTITIONS { partition_name } [, ...] INTO PARTITION partition_name - [ TABLESPACE tablespacename ] [ UPDATE GLOBAL INDEX ] - ``` - -- The **modify_clause** syntax is used to set whether a partitioned index is available. - - ``` - MODIFY PARTITION partition_name { UNUSABLE LOCAL INDEXES | REBUILD UNUSABLE LOCAL INDEXES } - ``` - -- The **split_clause** syntax is used to split one partition into different partitions. - - ``` - SPLIT PARTITION { partition_name | FOR ( partition_value [, ...] ) } { split_point_clause | no_split_point_clause } [ UPDATE GLOBAL INDEX ] - ``` - -- The **split_point_clause** syntax is used to specify a split point. - - ``` - AT ( partition_value ) INTO ( PARTITION partition_name [ TABLESPACE tablespacename ] , PARTITION partition_name [ TABLESPACE tablespacename ] ) - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > - > - Column-store tables and row-store tables cannot be partitioned. - > - The size of the split point should be in the range of partition keys of the partition to be split. The split point can only split one partition into two new partitions. - -- The **no_split_point_clause** syntax does not specify a split point. - - ``` - INTO { ( partition_less_than_item [, ...] ) | ( partition_start_end_item [, ...] ) } - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > - > - The first new partition key specified by partition_less_than_item should be greater than that of the previously split partition (if any), and the last partition key specified by partition_less_than_item should equal that of the partition being split. - > - The start point (if any) of the first new partition specified by **partition_start_end_item** must be equal to the partition key (if any) of the previous partition. The end point (if any) of the last partition specified by **partition_start_end_item** must be equal to the partition key of the splitting partition. - > - partition_less_than_item supports a maximum of 4 partition keys, while partition_start_end_item supports only one partition key. For details about the supported data types, see [PARTITION BY RANGE(parti….](../../../../../../reference-guide/sql-syntax/CREATE-TABLE-PARTITION.md) - > - partition_less_than_item and partition_start_end_item cannot be used in the same statement. There is no restriction on different split statements. - -- The syntax of **partition_less_than_item** is as follows: - - ``` - PARTITION partition_name VALUES LESS THAN ( { partition_value | MAXVALUE } [, ...] ) - [ TABLESPACE tablespacename ] - ``` - -- The syntax of **partition_start_end_item** is as follows. For details about the constraints, see [START END](../../../../../../reference-guide/sql-syntax/CREATE-TABLE-PARTITION.md). - - ``` - PARTITION partition_name { - {START(partition_value) END (partition_value) EVERY (interval_value)} | - {START(partition_value) END ({partition_value | MAXVALUE})} | - {START(partition_value)} | - {END({partition_value | MAXVALUE})} - } [TABLESPACE tablespace_name] - ``` - -- The **add_clause** syntax is used to add one or more partitions to a specified partitioned table. - - ``` - ADD PARTITION ( partition_col1_name = partition_col1_value [, partition_col2_name = partition_col2_value ] [, ...] ) - [ LOCATION 'location1' ] - [ PARTITION (partition_colA_name = partition_colA_value [, partition_colB_name = partition_colB_value ] [, ...] ) ] - [ LOCATION 'location2' ] - ADD {partition_less_than_item | partition_start_end_item| partition_list_item } - ``` - -- The syntax of **partition_list_item** is as follows: - - ``` - PARTITION partition_name VALUES (list_values_clause) - [ TABLESPACE tablespacename ] - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > - > - partition_list_item supports only one partition key. For details about the supported data types, see [PARTITION BY LIST(partit…](../../../../../../reference-guide/sql-syntax/CREATE-TABLE-PARTITION.md). - > - Interval and hash partitioned tables do not support partition addition. - -- The **drop_clause** syntax is used to remove a partition from a specified partitioned table. - - ``` - DROP PARTITION { partition_name | FOR ( partition_value [, ...] ) } [ UPDATE GLOBAL INDEX ] - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** Hash partitioned table does not support partition deletion. - -- The **truncate_clause** syntax is used to remove a specified partition from a partitioned table. - - ``` - TRUNCATE PARTITION { partition_name | FOR ( partition_value [, ...] ) } [ UPDATE GLOBAL INDEX ] - ``` - -- The syntax for modifying the name of a partition is as follows: - - ``` - ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name )} - RENAME PARTITION { partion_name | FOR ( partition_value [, ...] ) } TO partition_new_name; - ``` - -- The syntax for recreating a partition is as follows: It is generally used to reclaim the space used by a partition, which has the same effect as deleting all records stored in the partition and then inserting them again. This is useful for defragmentation. - - Column-store tables are not supported, and level-2 partitions of level-2 partitioned tables cannot be specified. - - ``` - REBUILD PARTITION { partition_name } [, …] - REBUILD PARTITION ALL - ``` - -- The syntax for removing partitions from a table is as follows: Partitions are removed from a table but all data is retained. - - Column-store tables and segment tables are not supported. - - ``` - REMOVE PARTITIONING - ``` - -- The syntax for repairing, checking, and optimizing partitioned tables is as follows: It is used only for syntax and has no actual purpose. - - ``` - CHECK PARTITION { partition_name } [, …] - CHECK PARTITION ALL - REPAIR PARTITION { partition_name } [, …] - REPAIR PARTITION ALL - OPTIMIZE PARTITION { partition_name } [, …] - OPTIMIZE PARTITION ALL - ``` - -- The syntax for truncating partitions in a B-compatible database is as follows: The truncate operation deletes all data corresponding to the current partition. - - ``` - TRUNCATE PARTITION { partition_name } [, …] - TRUNCATE PARTITION all - ``` - -- The syntax for exchanging partitions in a B-compatible database is as follows: It can be used to exchange data between partitioned tables and ordinary tables. Data in ordinary tables and partitions is exchanged, and tablespace information in ordinary tables and partitions is exchanged. In this case, the statistics of ordinary tables and partitions become unreliable. You need to run ANALYZE on ordinary tables and partitions again. - - Level-2 partitions cannot be exchanged. - - ``` - exchange partition partition_name with table table_name (without/with validation); - ``` - -- The syntax for analyzing partitions in a B-compatible database is as follows: It collects statistics related to table contents. The execution plan generator uses the statistics to determine the most effective execution plan. - - Level-2 partitions cannot be specified using ANALYZE. - - ``` - analyze partition { partition_name } [, …] - analyze partition all; - ``` - -- The syntax for adding partitions in a B-compatible database is as follows: - - ``` - ADD {partition_less_than_item | partition_start_end_item| partition_list_item } [, …] - ``` - -- The syntax for dropping partitions in a B-compatible database is as follows: - - ``` - DROP PARTITION { { partition_name } [ UPDATE GLOBAL INDEX ] } [, …] - DROP SUBPARTITION { { partition_name } [ UPDATE GLOBAL INDEX ] } [, …] - ``` - -- The syntax for reorganizing partitions in a B-compatible database is as follows: It splits or merges specified partitions to reorganize the definition of partitions. - - Here are some key points for ALTER TABLE… REORGANIZE PARTITION to repartition: - - - The options used by PARTITION to determine the new partitioning scheme should follow the same rules as those used by the CREATE TABLE statement. - - - The new RANGE partitioning scheme cannot have any overlapping scope. A new LIST partitioning scheme cannot have any overlapping value sets. - - - The partition combination in the partition_definitions list should have the same range or overall value set partition_list as the composite partition named in the list. - - - For a table RANGE with partitions, you can only reorganize adjacent partitions. You cannot skip range partitions. - - - For the LIST partition, the value definition of the corresponding data cannot be deleted. - - - REORGANIZE PARTITION cannot be used to change the partition type used by a table. - - - The original table data cannot be lost. - - - The interval partition and value partition are not supported. - - ``` - REORGANIZE PARTITION {{ partition_name } [, …]} INTO {partition_less_than_item | partition_list_item } [, …] - ``` - -## Parameter Description - -- **table_name** - - Specifies the name of a partitioned table. - - Value range: an existing partitioned table name. - -- **partition_name** - - Specifies the name of a partition. - - Value range: an existing partition name. - -- **tablespacename** - - Specifies which tablespace the partition moves to. - - Value range: an existing tablespace name - -- **partition_value** - - Partition key value - - Values specified by **PARTITION FOR ( partition_value [, …] )** can uniquely identify a partition. - - Value range: partition keys for the partition to be renamed. - -- **UNUSABLE LOCAL INDEXES** - - Sets all the indexes unusable in the partition. - -- **REBUILD UNUSABLE LOCAL INDEXES** - - Rebuilds all the indexes in the partition. - -- **ENABLE/DISABLE ROW MOVEMET** - - Sets row movement. - - If the tuple value is updated on the partition key during the **UPDATE** operation, the partition where the tuple is located is altered. Setting of this parameter enables error messages to be reported or movement of the tuple between partitions. - - Value range: - - - **ENABLE**: Row movement is enabled. - - **DISABLE**: Row movement is disabled. - - The default value is **ENABLE**. - -- **ordinary_table_name** - - Specifies the name of the ordinary table whose data is to be migrated. - - Value range: an existing table name. - -- **{ WITH | WITHOUT } VALIDATION** - - Checks whether the ordinary table data meets the specified partition key range of the partition to be exchanged. - - Value range: - - - **WITH**: checks whether the common table data meets the partition key range of the partition to be exchanged. If any data does not meet the required range, an error is reported. - - **WITHOUT**: does not check whether the common table data meets the partition key range of the partition to be exchanged. - - The default value is **WITH**. - - The check is time consuming, especially when the data volume is large. Therefore, use **WITHOUT** when you are sure that the current ordinary table data meets the partition key range of the partition to be migrated. - -- **VERBOSE** - - When **VALIDATION** is **WITH**, if the ordinary table contains data that is out of the partition key range, insert the data to the correct partition. If there is no correct partition where the data can be inserted to, an error is reported. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** Only when **VALIDATION** is **WITH**, **VERBOSE** can be specified. - -- **partition_new_name** - - Specifies the new name of a partition. - - Value range: String, which must comply with the naming convention. - -## Examples - -For details, see [Examples](../../../../../../reference-guide/sql-syntax/CREATE-TABLE-PARTITION.md) in CREATE TABLE PARTITION. - -## Helpful Links - -[CREATE TABLE PARTITION](../../../../../../reference-guide/sql-syntax/CREATE-TABLE-PARTITION.md),[DROP TABLE](../../../../../../reference-guide/sql-syntax/DROP-TABLE.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table.md deleted file mode 100644 index 19214ebc..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-table.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: dolphin ALTER TABLE -summary: dolphin ALTER TABLE -author: zhang cuiping -date: 2022-10-24 ---- - -# ALTER TABLE - -## Function - -Modifies tables, including modifying table definitions, renaming tables, renaming specified columns in tables, renaming table constraints, setting table schemas, enabling or disabling row-level security policies, and adding or updating multiple columns. - -## Precautions - -- This section describes only the new syntax of Dolphin. The original syntax of MogDB is not deleted or modified. -- If a statement contains multiple subcommands, the DROP INDEX and RENAME INDEX commands are executed first. The two commands have the same priority. - -## Syntax - -- **ALTER TABLE** modifies the definition of a table. - - ``` - ALTER TABLE [ IF EXISTS ] { table_name [*] | ONLY table_name | ONLY ( table_name ) } - action [, ... ]; - ``` - - The **action** can be one of the following clauses: - - ``` - column_clause - | {DISABLE | ENABLE} KEYS - | DROP INDEX index_name [ RESTRICT | CASCADE ] - | DROP PRIMARY KEY [ RESTRICT | CASCADE ] - | DROP FOREIGN KEY foreign_key_name [ RESTRICT | CASCADE ] - | RENAME INDEX index_name to new_index_name - | ADD table_indexclause - | MODIFY column_name column_type ON UPDATE CURRENT_TIMESTAMP - ``` - -- Recreate a table. - - ``` - ALTER TABLE table_name FORCE; - ``` - -- Rename a table. The renaming does not affect stored data. - - ``` - ALTER TABLE [ IF EXISTS ] table_name - RENAME { TO | AS } new_table_name; - ``` - -- Add the ON UPDATE attribute to the timestamp column of the table. - - ```sql - ALTER TABLE table_name - MODIFY column_name column_type ON UPDATE CURRENT_TIMESTAMP; - ``` - -- Delete the ON UPDATE attribute from the timestamp column of the table. - - ```sql - ALTER TABLE table_name - MODIFY column_name column_type; - ``` - -- **ADD table_indexclause** - - Add an index to the table. - - ``` - {INDEX | KEY} [index_name] [index_type] (key_part,...)[index_option]... - ``` - - Values of index_type are as follows: - - ``` - USING {BTREE | HASH | GIN | GIST | PSORT | UBTREE} - ``` - - Values of key_part are as follows: - - ``` - {col_name[(length)] | (expr)} [ASC | DESC] - ``` - - The index_option parameter is as follows: - - ``` - index_option:{ - COMMENT 'string' - | index_type - } - ``` - - The sequence and quantity of COMMENT and index_type can be random, but only the last value of the same column takes effect. - -## Parameter Description - -- **{DISABLE | ENABLE} KEYS** - - Disables or enables all non-unique indexes of a table. - -- **DROP INDEX index_name [ RESTRICT | CASCADE ]** - - Deletes the index of a table. - -- **DROP PRIMARY KEY [ RESTRICT | CASCADE ]** - - Deletes the primary key of a table. - -- **DROP FOREIGN KEY foreign_key_name [ RESTRICT | CASCADE ]** - - Deletes the foreign key of a table. - -- **RENAME INDEX index_name to new_index_name** - - Renames an index of a table. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> For details about the involved parameters, see [ALTER TABLE](../../../../../../reference-guide/sql-syntax/ALTER-TABLE.md). - -## Examples - -— Create tables, foreign keys, and non-unique indexes. - -```sql -MogDB=# CREATE TABLE alter_table_tbl1 (a INT PRIMARY KEY, b INT); -MogDB=# CREATE TABLE alter_table_tbl2 (c INT PRIMARY KEY, d INT); -MogDB=# ALTER TABLE alter_table_tbl2 ADD CONSTRAINT alter_table_tbl_fk FOREIGN KEY (d) REFERENCES alter_table_tbl1 (a); -MogDB=# CREATE INDEX alter_table_tbl_b_ind ON alter_table_tbl1(b); -``` - -— Disable and enable non-unique indexes. - -```sql -MogDB=# ALTER TABLE alter_table_tbl1 DISABLE KEYS; -MogDB=# ALTER TABLE alter_table_tbl1 ENABLE KEYS; -``` - -— Delete the index. - -```sql -MogDB=# ALTER TABLE alter_table_tbl1 DROP KEY alter_table_tbl_b_ind; -``` - -— Deletes a primary key. - -```sql -MogDB=# ALTER TABLE alter_table_tbl2 DROP PRIMARY KEY; -``` - -— Delete a foreign key. - -```sql -MogDB=# ALTER TABLE alter_table_tbl2 DROP FOREIGN KEY alter_table_tbl_fk; -``` - -— Recreate a table. - -```sql -MogDB=# ALTER TABLE alter_table_tbl1 FORCE; -``` - -— Rename the index. - -```sql -MogDB=# CREATE INDEX alter_table_tbl_b_ind ON alter_table_tbl1(b); -MogDB=# ALTER TABLE alter_table_tbl1 RENAME INDEX alter_table_tbl_b_ind TO new_alter_table_tbl_b_ind; -``` - -— Delete a table. - -```sql -MogDB=# DROP TABLE alter_table_tbl1, alter_table_tbl2; -``` - -## Helpful Links - -[ALTER TABLE](../../../../../../reference-guide/sql-syntax/ALTER-TABLE.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-tablespace.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-tablespace.md deleted file mode 100644 index 82bb57bf..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-tablespace.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: dolphin ALTER TABLESPACE -summary: dolphin ALTER TABLESPACE -author: zhang cuiping -date: 2022-10-24 ---- - -# ALTER TABLESPACE - -## Function - -Modifies the attributes of a tablespace. - -## Precautions - -Compared with the original MogDB, Dolphin modifies the ALTER TABLESPACE syntax as follows: - -1. The WAIT option is added for syntax compatibility only. -2. The ENGINE [=] engine_name option is added for syntax compatibility only. - -## Syntax - -- The syntax of renaming a tablespace is as follows: - - ``` - ALTER TABLESPACE tablespace_name - RENAME TO new_tablespace_name [ alter_option_list [ ... ] ]; - ``` - -- The syntax of setting the owner of a tablespace is as follows: - - ``` - ALTER TABLESPACE tablespace_name - OWNER TO new_owner [ alter_option_list [ ... ] ]; - ``` - -- The syntax of setting the attributes of a tablespace is as follows: - - ``` - ALTER TABLESPACE tablespace_name - SET ( {tablespace_option = value} [, ... ] ) - [ alter_option_list [ ... ] ]; - ``` - -- The syntax of resetting the attributes of a tablespace is as follows: - - ``` - ALTER TABLESPACE tablespace_name - RESET ( { tablespace_option } [, ...] ) - [ alter_option_list [ ... ] ]; - ``` - -- The syntax for setting the quota of a tablespace is as follows: - - ``` - ALTER TABLESPACE tablespace_name - RESIZE MAXSIZE { UNLIMITED | 'space_size'} - [ alter_option_list [ ... ] ]; - ``` - - ``` - Where alter_option_list is: - WAIT - | ENGINE [=] engine_name - ``` - -## Parameter Description - -- **tablespace_name** - - Specifies the tablespace to be modified. - - Value range: an existing tablespace name - -- **new_tablespace_name** - - Specifies the new name of a tablespace. - - The new name cannot start with PG_. - - Value range: a string. It must comply with the naming convention. - -- **new_owner** - - Specifies the new owner of the tablespace. - - Value range: an existing username - -- **tablespace_option** - - Sets or resets the parameters of a tablespace. - - Value: - - - **seq_page_cost**: sets the optimizer to calculate the cost of obtaining the disk page in sequence one time. The default value is **1.0**. - - - **random_page_cost**: sets the optimizer to calculate the cost of obtaining the disk page in random sequence one time. The default value is **4.0**. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - random_page_cost is relative to seq_page_cost. It is meaningless when it is equal to or less than seq_page_cost. - - > - The prerequisite of using **4.0** as the default value is that the optimizer uses indexes to scan the table data and that the hit ratio of data in the cache reaches about 90%. - - > - If the table data space is less than the physical memory, decrease the value to a proper level. If the hit ratio of data in the cache is lower than 90%, increase the value. - - > - If random-access memory like SSD is adopted, the value can be decreased to a certain degree to reflect the cost of true random scan. - - A positive floating point. - -- **RESIZE MAXSIZE** - - Resets the maximum size of tablespace. - - Value: - - - **UNLIMITED**: No limit is set for this tablespace. - - - The value is determined by space_size. For details about the format, see [CREATE TABLESPACE](../../../../../../reference-guide/sql-syntax/CREATE-TABLESPACE.md). - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - If the adjusted quota is smaller than the current tablespace usage, the adjustment is successful. You need to decrease the tablespace usage to a value less than the new quota before writing data to the tablespace. - - > - It can be used when you are modifying **MAXSIZE**: - > - > ``` - > ALTER TABLESPACE tablespace_name RESIZE MAXSIZE - > { 'UNLIMITED' | 'space_size'}; - > ``` - -- **engine_name** - - This parameter is meaningless. - - Value: a combination of any characters - -## Examples - -```sql ---Create a tablespace. -MogDB=# CREATE TABLESPACE ds_location1 RELATIVE LOCATION 'tablespace/tablespace_1'; - ---Create user joe. -MogDB=# CREATE ROLE joe IDENTIFIED BY 'xxxxxxxxx'; - ---Create user jay. -MogDB=# CREATE ROLE jay IDENTIFIED BY 'xxxxxxxxx'; - ---Create an ordinary tablespace and set its owner to user joe. -MogDB=# CREATE TABLESPACE ds_location2 OWNER joe RELATIVE LOCATION 'tablespace/tablespace_1'; - ---Rename the tablespace ds_location1 to ds_location3 and specify option WAIT. The actual function is not affected. -MogDB=# ALTER TABLESPACE ds_location1 RENAME TO ds_location3 WAIT; - ---Change the owner of the ds_location2 tablespace by specifying option ENGINE. The actual function is not affected. -MogDB=# ALTER TABLESPACE ds_location2 OWNER TO jay ENGINE = 'test'; - ---Change the quota of the ds_location2 tablespace and specify the options ENGINE and WAIT. The actual function is not affected. -MogDB=# ALTER TABLESPACE ds_location2 RESIZE MAXSIZE UNLIMITED ENGINE = 'test' WAIT; - ---Delete a tablespace. -MogDB=# DROP TABLESPACE ds_location2 ENGINE = 'test2'; -MogDB=# DROP TABLESPACE ds_location3; - ---Delete the user. -MogDB=# DROP ROLE joe; -MogDB=# DROP ROLE jay; -``` - -## Helpful Links - -[CREATE TABLESPACE](../../../../../../reference-guide/sql-syntax/CREATE-TABLESPACE.md), [DROP TABLESPACE](../../../../../../reference-guide/sql-syntax/DROP-TABLESPACE.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-view.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-view.md deleted file mode 100644 index c48fdf2a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-alter-view.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: dolphin ALTER VIEW -summary: dolphin ALTER VIEW -author: Guo Huan -date: 2023-05-15 ---- - -# ALTER VIEW - -## Function - -ALTER VIEW changes various auxiliary attributes of the view. (If the user is changing the query definition of a view, use CREATE OR REPLACE VIEW.) - -## Precautions - -The ALTER VIEW command can be executed only by the owner of the view or by a user who has been granted the view ALTER privilege, which the system administrator has by default. There are also the following privilege constraints on the attributes to be modified: - -- To modify the schema of a view, the current user must be the owner of the view or the system administrator, and must have the CREATE privilege for the new schema, and must not create a naming conflict with an existing synonym in the new schema. -- To modify the owner of a view, the current user must be the owner or system administrator of the view, and the user must be a member of the new owner role, and this role must have the CREATE permission for the schema in which the view resides. -- Modify the naming of the view so that there is no naming conflict with a synonym that already exists in the current schema. - -Add can specify the ALGORITHM option syntax. - -## Syntax - -- Sets the default values for the view columns. - - ``` - ALTER [ALGORITHM = {UNDEFINED | MERGE | TEMPTABLE}] VIEW [ IF EXISTS ] view_name - ALTER [ COLUMN ] column_name SET DEFAULT expression; - ``` - -- Cancels the default values for column view columns. - - ``` - ALTER [ALGORITHM = {UNDEFINED | MERGE | TEMPTABLE}] VIEW [ IF EXISTS ] view_name - ALTER [ COLUMN ] column_name DROP DEFAULT; - ``` - -- Modify the owner of the view. - - ``` - ALTER [ALGORITHM = {UNDEFINED | MERGE | TEMPTABLE}] VIEW [ IF EXISTS ] view_name - OWNER TO new_owner; - ``` - -- Rename the view. - - ``` - ALTER [ALGORITHM = {UNDEFINED | MERGE | TEMPTABLE}] VIEW [ IF EXISTS ] view_name - RENAME TO new_name; - ``` - -- Sets the mode to which the view belongs. - - ``` - ALTER [ALGORITHM = {UNDEFINED | MERGE | TEMPTABLE}] VIEW [ IF EXISTS ] view_name - SET SCHEMA new_schema; - ``` - -- Sets the options for the view. - - ``` - ALTER [ALGORITHM = {UNDEFINED | MERGE | TEMPTABLE}] VIEW [ IF EXISTS ] view_name - SET ( { view_option_name [ = view_option_value ] } [, ... ] ); - ``` - -- Resets the view's options. - - ``` - ALTER [ALGORITHM = {UNDEFINED | MERGE | TEMPTABLE}] VIEW [ IF EXISTS ] view_name - RESET ( view_option_name [, ... ] ); - ``` - -- Set the definition of the view (this syntax is only supported in B-compatible mode) - - ``` - ALTER [ALGORITHM = {UNDEFINED | MERGE | TEMPTABLE}] [DEFINER = user] VIEW view_name [ ( column_name [, ...] ) ] - AS query [WITH [CASCADE | LOCAL] CHECK OPTION]; - ``` - - >![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: - > - >The new query in ALTER VIEW AS cannot change the column definitions of the original query, including order, column name, data type, type precision, etc. You can only add other columns at the end of the list. - -## Parameter Description - -- **IF EXISTS** - - With this option, no error will be generated if the view does not exist, only a message will be displayed. - -- **ALGORITHM** - - Specify the algorithm, options: UNDEFINED, MERGE, TEMPTABLE, currently only for syntax compatibility, no actual function. - -- **view\_name** - - The name of the view, can be modified by a pattern. - - Range of values: string, conforms to identifier naming convention. - -- **column\_name** - - Optional list of names, field names for the view. If not given, the field name is taken from the field name in the query. - - Range of values: string, conforming to the identifier naming convention. - -- **SET/DROP DEFAULT** - - Sets or deletes the default value of a column; this parameter has no practical significance at this time. - -- **new\_owner** - - The user name of the new owner of the view. - -- **new\_name** - - The new name of the view. - -- **new\_schema** - - The new schema for the view. - -- **view\_option\_name \[ = view\_option\_value \]** - - This clause specifies an optional parameter for the view. - - - **security\_barrier** - - This parameter should be used when the VIEW attempts to provide row-level security. - - Range of values: Boolean type, TRUE, FALSE. - - - **check\_option** - - Specifies the checking options for this view. - - Range of values: LOCAL, CASCADED. - -## Examples - -```sql --- Create a view consisting of c_customer_sk less than 150. -MogDB=# CREATE VIEW tpcds.customer_details_view_v1 AS - SELECT * FROM tpcds.customer - WHERE c_customer_sk < 150; - --- Modify the view name. -MogDB=# ALTER VIEW tpcds.customer_details_view_v1 RENAME TO customer_details_view_v2; - --- Modify the schema to which the view belongs. -MogDB=# ALTER VIEW tpcds.customer_details_view_v2 SET schema public; - --- Delete the view -MogDB=# DROP VIEW public.customer_details_view_v2; -``` - -## Helpful Links - -[CREATE VIEW](dolphin-create-view.md), [DROP VIEW](../../../../../../reference-guide/sql-syntax/DROP-VIEW.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-analyze-analyse.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-analyze-analyse.md deleted file mode 100644 index 45b18363..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-analyze-analyse.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: dolphin ANALYZE-ANALYSE -summary: dolphin ANALYZE-ANALYSE -author: zhang cuiping -date: 2022-10-24 ---- - -# ANALYZE | ANALYSE - -## Function - -**ANALYZE** collects statistics on ordinary tables in a database, and stores the results in the **PG_STATISTIC** system catalog. The execution plan generator uses these statistics to determine which one is the most effective execution plan. - -If no parameter is specified, **ANALYZE** analyzes each table and partitioned table in the current database. You can also specify **table_name**, **column**, and **partition_name** to limit the analysis to a specified table, column, or partitioned table. - -**ANALYZE|ANALYSE VERIFY** is used to check whether data files of ordinary tables (row-store tables and column-store tables) in a database are damaged. - -## Precautions - -![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** For details about the precautions, see [ANALYZE](../../../../../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md). - -## Syntax - -- Table Statistics - - ``` - {ANALYZE | ANALYSE} [ VERBOSE ] [ NO_WRITE_TO_BINLOG | LOCAL ] TABLE { [schema.]table_name } [, ... ] - ``` - -## Parameter Description - -- **NO_WRITE_TO_BINLOG | LOCAL** - - It is used only for syntax and has no actual purpose. - -![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** For details about the involved parameters, see [ANALYZE](../../../../../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md). - -## Examples - -— Create a table. - -```sql -MogDB=# CREATE TABLE customer_info -( -WR_RETURNED_DATE_SK INTEGER , -WR_RETURNED_TIME_SK INTEGER , -WR_ITEM_SK INTEGER NOT NULL, -WR_REFUNDED_CUSTOMER_SK INTEGER -) -; -``` - -— Creates a partitioned table. - -```sql -MogDB=# CREATE TABLE customer_par -( -WR_RETURNED_DATE_SK INTEGER , -WR_RETURNED_TIME_SK INTEGER , -WR_ITEM_SK INTEGER NOT NULL, -WR_REFUNDED_CUSTOMER_SK INTEGER -) -PARTITION BY RANGE(WR_RETURNED_DATE_SK) -( -PARTITION P1 VALUES LESS THAN(2452275), -PARTITION P2 VALUES LESS THAN(2452640), -PARTITION P3 VALUES LESS THAN(2453000), -PARTITION P4 VALUES LESS THAN(MAXVALUE) -) -ENABLE ROW MOVEMENT; -``` - -— Run **ANALYZE** to update statistics. - -```sql -MogDB=# ANALYZE TABLE customer_info, customer_par; - Table | Op | Msg_type | Msg_text -----------------------+---------+----------+---------- - public.customer_info | analyze | status | OK - public.customer_par | analyze | status | OK -(2 row) -``` - -— Delete a table. - -```sql -MogDB=# DROP TABLE customer_info; -MogDB=# DROP TABLE customer_par; -``` - -## Helpful Links - -[ANALYZE](../../../../../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-ast.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-ast.md deleted file mode 100644 index 11f36486..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-ast.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: dolphin AST -summary: dolphin AST -author: zhang cuiping -date: 2022-10-24 ---- - -# AST - -## Function - -Verifies the MogDB syntax tree. - -Checks whether the statements following the AST syntax support the generation of the MogDB syntax tree. - -## Precautions - -If the verification fails, a syntax parsing error is thrown. If the verification is successful, no command output is displayed. - -## Syntax - -``` -AST [ STMT ] ; -``` - -## Parameter Description - -- **STMT** - -Any type of SQL statements and stored procedure statements are supported. - -## Examples - -```sql --- Verify table creation statements. -MogDB=# AST CREATE TABLE TEST(ID INT6); - --- Statement verification is not supported. -MogDB=# AST CREATE TABLE TEST; -ERRPR: syntax error at or near ";" -LINE 1:AST CREATE TABLE TEST; - ^ -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-checksum-table.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-checksum-table.md deleted file mode 100644 index a6715c8e..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-checksum-table.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: dolphin CHECKSUM TABLE -summary: dolphin CHECKSUM TABLE -author: zhang cuiping -date: 2022-10-24 ---- - -# CHECKSUM TABLE - -## Function - -Calculates the table data checksum. - -## Precautions - -- The QUICK schema is not supported (NULL is returned). -- NULL is returned for non-ordinary tables (such as views) and tables that do not exist. -- Comparability with table checksums of heterogeneous databases is not supported. (For example, if the number of records is the same, the query results in MogDB and MySQL cannot be compared.) - -## Syntax - -``` -CHECKSUM TABLE tbl_name [, tbl_name] ... [QUICK | EXTENDED] -``` - -## Parameter Description - -- **tbl_name** - - Table name. You can specify a table name or **schema_name.table_name**. - -- **[QUICK | EXTENDED]** Verification mode. Only EXTENDED (default value) is supported. - -## Examples - -```sql ---Create a simple table. -MogDB=# CREATE SCHEMA tst_schema1; -MogDB=# SET SEARCH_PATH TO tst_schema1; -MogDB=# CREATE TABLE tst_t1 -( -id int, -name VARCHAR(20), -addr text, -phone text, -addr_code text -); -MogDB=# CREATE TABLE tst_t2 AS SELECT * FROM tst_t1; -INSERT 0 0 - ---Verify different insertion sequences. -MogDB=# INSERT INTO tst_t1 values(2022001, 'tst_name1', 'tst_addr1', '15600000001', '000001'); -INSERT INTO tst_t1 values(2022002, 'tst_name2', 'tst_addr2', '15600000002', '000002'); -INSERT INTO tst_t1 values(2022003, 'tst_name3', 'tst_addr3', '15600000003', '000003'); -INSERT INTO tst_t1 values(2022004, 'tst_name4', 'tst_addr4', '15600000004', '000004'); - -INSERT INTO tst_t2 (SELECT * FROM tst_t1 ORDER BY id DESC); - -MogDB=# checksum table tst_t1,tst_t2,xxx; - Table | Checksum ---------------------+------------ - tst_schema1.tst_t1 | 1579899754 - tst_schema1.tst_t2 | 1579899754 - tst_schema1.xxx | NULL - - --Test a table containing large columns. - MogDB=# CREATE TABLE blog -( -id int, -title text, -content text -); -MogDB=# CREATE TABLE blog_v2 AS SELECT * FROM blog; -MogDB=# INSERT INTO blog values(1, 'title1', '01234567890'), (2, 'title2', '0987654321'); -MogDB=# CREATE OR REPLACE FUNCTION loop_insert_result_toast(n integer) -RETURNS integer AS $$ -DECLARE - count integer := 0; -BEGIN - LOOP - EXIT WHEN count = n; - UPDATE blog SET content=content||content where id = 2; - count := count + 1; - END LOOP; - RETURN count; -END; $$ -LANGUAGE PLPGSQL; -MogDB=# select loop_insert_result_toast(16); - loop_insert_result_toast --------------------------- - 16 -MogDB=# INSERT INTO blog_v2 (SELECT * FROM blog); -MogDB=# checksum table blog,blog_v2; - Table | Checksum ----------------------+------------ - tst_schema1.blog | 6249493220 - tst_schema1.blog_v2 | 6249493220 - ---Test a segment-page table. -MogDB=# CREATE TABLE tst_seg_t1(id int, name VARCHAR(20)) WITH (segment=on); -MogDB=# CREATE TABLE tst_seg_t2(id int, name VARCHAR(20)) WITH (segment=on); -MogDB=# INSERT INTO tst_seg_t1 values(2022001, 'name_example_1'); -INSERT INTO tst_seg_t1 values(2022002, 'name_example_2'); -INSERT INTO tst_seg_t1 values(2022003, 'name_example_3'); -MogDB=# INSERT INTO tst_seg_t2 (SELECT * FROM tst_seg_t1); -MogDB=# checksum table tst_seg_t1,tst_seg_t2; - Table | Checksum -------------------------+------------ - tst_schema1.tst_seg_t1 | 5620410817 - tst_schema1.tst_seg_t2 | 5620410817 -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-database.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-database.md deleted file mode 100644 index 3a90350e..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-database.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: dolphin CREATE DATABASE -summary: dolphin CREATE DATABASE -author: Guo Huan -date: 2023-05-15 ---- - -# CREATE DATABASE - -## Function - -Create a new database. By default new databases will be created by copying the standard system database template0, and creation using template0 is only supported. - -Create a new schema. The default character set and character order of the schema can be set. - -## Precautions - -Compared to the original MogDB, dolphin's changes to the CREATE DATABASE syntax are: - -- Add modifiable items [ [DEFAULT] CHARACTER SET | CHARSET [ = ] default_charset ] [ [DEFAULT] COLLATE [ = ] default_collation ]. - -## Syntax - -``` -CREATE DATABASE [IF NOT EXISTS] database_name - [ [DEFAULT] CHARACTER SET | CHARSET [ = ] default_charset ] [ [DEFAULT] COLLATE [ = ] default_collation ]; -``` - -## Parameter Description - -- **database\_name** - - The name of the database. - - Range of values: string, to conform to the naming convention for identifiers. - -- **[ [DEFAULT] CHARACTER SET | CHARSET [ = ] default_charset ]** - - Specifies the default character set for the schema, which when specified alone sets the default character order of the schema to the default character order of the specified character set. - -- **[ [DEFAULT] COLLATE [ = ] default_collation ]** - - Specifies the default collation for the schema, which when specified alone sets the default collation for the schema to the character set corresponding to the specified collation. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**Note**: - > - > - With B compatibility, this syntax is only supported if dolphin.b_compatibility_mode is on. - > - When this syntax is used, the syntax is equivalent to CREATE SCHEMA, which is the actual SCHEMA creation syntax, and database\_name is the SCHEMA name. - > - B compatibility, dolphin.b_compatibility_mode is on, do not specify default_charset, default_collation, but specify other CREATE DATABASE syntax options, the syntax is still CREATE DATABASE syntax. - > - Under B-compatibility, when dolphin.b_compatibility_mode is on, and no option is specified, the syntax is equivalent to CREATE SCHEMA syntax; when dolphin.b_compatibility_mode is off, the syntax is equivalent to CREATE DATABASE syntax. - -## Examples - -```sql --- Enable the dolphin.b_compatibility_mode -MogDB=# set dolphin.b_compatibility_mode = on; -SET -MogDB=# create database test1; -CREATE SCHEMA -MogDB=# create database test2 charset 'utf8'; -CREATE SCHEMA -MogDB=# drop database if exists test1; -``` - -## Helpful Links - -[ALTER DATABASE](dolphin-alter-database.md), [DROP DATABASE](dolphin-drop-database.md), [CREATE DATABASE](../../../../../../reference-guide/sql-syntax/CREATE-DATABASE.md) diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-function.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-function.md deleted file mode 100644 index 04db71bb..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-function.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: dolphin CREATE FUNCTION -summary: dolphin CREATE FUNCTION -author: zhang cuiping -date: 2022-10-24 ---- - -# CREATE FUNCTION - -## Function - -Creates a function. - -## Precautions - -Compared with the original MogDB, Dolphin modifies the CREATE FUNCTION syntax as follows: - -1. The default value **plpgsql** of LANGUAGE is added. -2. The syntax compatibility item [NOT] DETERMINISTIC is added. -3. The syntax compatibility item { CONTAINS SQL | NO SQL | READS SQL DATA | MODIFIES SQL DATA } is added. -4. The syntax compatibility item SQL SECURITY { DEFINER | INVOKER } is added. - -## Syntax - -After Dolphin is loaded, the format of the CREATE FUNCTION syntax is: - -- Syntax (compatible with PostgreSQL) for creating a user-defined function: - - ``` - CREATE [ OR REPLACE ] FUNCTION function_name - ( [ { argname [ argmode ] argtype [ { DEFAULT | := | = } expression ] - } [, ...] ] ) - [ RETURNS rettype - | RETURNS TABLE ( { column_name column_type } [, ...] )] - [ - {IMMUTABLE | STABLE | VOLATILE} - | {SHIPPABLE | NOT SHIPPABLE} - | [ NOT ] LEAKPROOF - | WINDOW - | {CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT} - | {[ EXTERNAL| SQL ] SECURITY INVOKER | [ EXTERNAL| SQL ] SECURITY DEFINER | AU - THID DEFINER | AUTHID CURRENT_USER} - | {FENCED | NOT FENCED} - | {PACKAGE} - | COST execution_cost - | ROWS result_rows - | SET configuration_parameter { {TO | =} value | FROM CURRENT } - | COMMENT 'text' - | {DETERMINISTIC | NOT DETERMINISTIC} - | LANGUAGE lang_name - | { CONTAINS SQL | NO SQL | READS SQL DATA | MODIFIES SQL DATA } - ] [...] - { - AS 'definition' - | AS 'obj_file', 'link_symbol' - } - ``` - -- O syntax of creating a customized function: - - ``` - CREATE [ OR REPLACE ] FUNCTION function_name - ( [ { argname [ argmode ] argtype [ { DEFAULT | := | = } expression ] } - [, ...] ] ) - RETURN rettype - [ - {IMMUTABLE | STABLE | VOLATILE } - | {SHIPPABLE | NOT SHIPPABLE} - | {PACKAGE} - | [ NOT ] LEAKPROOF - | {CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT } - | {[ EXTERNAL| SQL ] SECURITY INVOKER | [ EXTERNAL| SQL ] SECURITY DEFINER | | - AUTHID DEFINER | AUTHID CURRENT_USER} - | COST execution_cost - | ROWS result_rows - | SET configuration_parameter { {TO | =} value | FROM CURRENT } - | COMMENT 'text' - | {DETERMINISTIC | NOT DETERMINISTIC} - | LANGUAGE lang_name - | { CONTAINS SQL | NO SQL | READS SQL DATA | MODIFIES SQL DATA } - ][...] - { - IS | AS - } plsql_body - / - ``` - -## Parameter Description - -- **LANGUAGE lang_name** - - Specifies the name of the language that is used to implement the function. PostgreSQL function default value: **sql**. O-style default value: **plpgsql**. - -- **SQL SECURITY INVOKER** - - Indicates that the function is to be executed with the permissions of the user that calls it. This parameter can be omitted. - - The functions of SQL SECURITY INVOKER and SECURITY INVOKER and AUTHID CURRENT_USER are the same. - -- **SQL SECURITY DEFINER** - - Specifies that the function is to be executed with the privileges of the user that created it. - - The functions of SQL SECURITY DEFINER and AUTHID DEFINER and SECURITY DEFINER are the same. - -- **CONTAINS SQL** | **NO SQL** | **READS SQL DATA** | **MODIFIES SQL DATA** - - Syntax compatibility item. - -## Examples - -```sql ---Specify CONTAINS SQL. -MogDB=# CREATE FUNCTION func_test (s CHAR(20)) RETURNS int -CONTAINS SQL AS $$ select 1 $$ ; - ---Specify DETERMINISTIC. -MogDB=# CREATE FUNCTION func_test (s int) RETURNS int -CONTAINS SQL DETERMINISTIC AS $$ select s; $$ ; - ---Specify LANGUAGE SQL. -MogDB=# CREATE FUNCTION func_test (s int) RETURNS int -CONTAINS SQL LANGUAGE SQL AS $$ select s; $$ ; - ---Specify NO SQL. -MogDB=# CREATE FUNCTION func_test (s int) RETURNS int -NO SQL AS $$ select s; $$ ; - ---Specify READS SQL DATA. -MogDB=# CREATE FUNCTION func_test (s int) RETURNS int -CONTAINS SQL READS SQL DATA AS $$ select s; $$ ; - ---Specify MODIFIES SQL DATA. -MogDB=# CREATE FUNCTION func_test (s int) RETURNS int -CONTAINS SQL LANGUAGE SQL NO SQL MODIFIES SQL DATA AS $$ select s; $$ ; - ---Specify SECURITY DEFINER. -MogDB=# CREATE FUNCTION func_test (s int) RETURNS int -NO SQL SQL SECURITY DEFINER AS $$ select s; $$ ; - ---Specify SECURITY INVOKER. -MogDB=# CREATE FUNCTION func_test (s int) RETURNS int -SQL SECURITY INVOKER READS SQL DATA LANGUAGE SQL AS $$ select s; $$ ; -``` - -## Helpful Links - -[CREATE FUNCTION](../../../../../../reference-guide/sql-syntax/CREATE-FUNCTION.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-index.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-index.md deleted file mode 100644 index 18ff4200..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-index.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: dolphin CREATE INDEX -summary: dolphin CREATE INDEX -author: zhang cuiping -date: 2022-10-24 ---- - -# CREATE INDEX - -## Function - -**CREATE INDEX** creates an index in a specified table. - -Indexes are primarily used to enhance database performance (though inappropriate use can result in slower database performance). You are advised to create indexes on: - -- Columns that are often queried -- Join conditions. For a query on joined columns, you are advised to create a composite index on the columns. For example, select * from t1 join t2 on t1. a=t2. a and t1. b=t2.b. You can create a composite index on the a and b columns of table t1. -- Columns having filter criteria (especially scope criteria) of a **where** clause -- Columns that appear after **order by**, **group by**, and **distinct** - -The partitioned table does not support concurrent index creation and partial index creation. - -## Precautions - -- This section describes only the new syntax of Dolphin. The original syntax of MogDB is not deleted or modified. Options can be sorted in random order. - -## Syntax - -- Create an index on a table. - - ``` - CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [schema_name.]index_name ] - { ON table_name [ USING method ] | [ USING method ] ON table_name } - ({ { column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS { FIRST | LAST } ] }[, ...] ) - [ index_option ] - [ WHERE predicate ]; - ``` - -- Create an index on a partitioned table. - - ``` - CREATE [ UNIQUE ] INDEX [ [schema_name.]index_name ] - { ON table_name [ USING method ] | [ USING method ] ON table_name } - ( {{ column_name | ( expression ) } [ COLLATE collation ] [ opclass ] [ ASC | DESC ] [ NULLS LAST ] }[, ...] ) - [ LOCAL [ ( { PARTITION index_partition_name [ TABLESPACE index_partition_tablespace ] } [, ...] ) ] | GLOBAL ] - [ index_option ] - ``` - -## Parameter Description - -- **column_name ( length )** - - Creates a prefix key index based on a column in the table. **column_name** indicates the column name of the prefix key, and **length** indicates the prefix length. - - The prefix key uses the prefix of the specified column data as the index key value, which reduces the storage space occupied by the index. Indexes can be used for filter and join conditions that contain prefix key columns. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - The prefix key supports the following index methods: btree and ubtree. - > - The data type of the prefix key column must be binary or character (excluding special characters). - > - The prefix length must be a positive integer that does not exceed 2676 and cannot exceed the maximum length of the column. For the binary type, the prefix length is measured in bytes. For non-binary character types, the prefix length is measured in characters. The actual length of the key value is restricted by the internal page. If a column contains multi-byte characters or an index has multiple keys, the length of the index line may exceed the upper limit. As a result, an error is reported. Consider this situation when setting a long prefix length. - > - In the CREATE INDEX syntax, the following keywords cannot be used as prefix keys for column names: COALESCE, CONVERT, DAYOFMONTH, DAYOFWEEK, DAYOFYEAR, DB_B_FORMAT, EXTRACT, GREATEST, HOUR_P, IFNULL, LEAST, LOCATE, MICROSECOND_P, MID, MINUTE_P, NULLIF, NVARCHAR, NVL, OVERLAY, POSITION, QUARTER, SECOND_P, SUBSTR, SUBSTRING, TEXT_P, TIME, TIMESTAMP, TIMESTAMPDIFF, TREAT, TRIM, WEEKDAY, WEEKOFYEAR, XMLCONCAT, XMLELEMENT, XMLEXISTS, XMLFOREST, XMLPARSE, XMLPI, XMLROOT, and XMLSERIALIZE. If the index where the prefix key containing the preceding keywords resides is created using the ALTER TABLE or CREATE TABLE syntax, the exported CREATE INDEX statement may fail to be executed. Therefore, do not use the preceding keywords as the column names of the prefix keys. - -- **index_option** - - You can specify options when creating an index. The syntax is as follows: - - ``` - INCLUDE ( column_name [, ...] ) - | WITH ( { storage_parameter = value } [, ...] ) - | TABLESPACE tablespace_name - ``` - - - The TABLESPACE option can be entered multiple times. The latest input prevails. - -## Examples - -```sql ---Create a table named tpcds.ship_mode_t1. -MogDB=# create schema tpcds; -MogDB=# CREATE TABLE tpcds.ship_mode_t1 -( - SM_SHIP_MODE_SK INTEGER NOT NULL, - SM_SHIP_MODE_ID CHAR(16) NOT NULL, - SM_TYPE CHAR(30) , - SM_CODE CHAR(10) , - SM_CARRIER CHAR(20) , - SM_CONTRACT CHAR(20) -) -; - ---Create a common unique index on the SM_SHIP_MODE_SK column in the tpcds.ship_mode_t1 table. -MogDB=# CREATE UNIQUE INDEX ds_ship_mode_t1_index1 ON tpcds.ship_mode_t1(SM_SHIP_MODE_SK); - ---Create a B-tree index on the SM_SHIP_MODE_SK column in the tpcds.ship_mode_t1 table. -MogDB=# CREATE INDEX ds_ship_mode_t1_index4 ON tpcds.ship_mode_t1 USING btree(SM_SHIP_MODE_SK); - ---Create an expression index on the SM_CODE column in the table tpcds.ship_mode_t1 table. -MogDB=# CREATE INDEX ds_ship_mode_t1_index2 ON tpcds.ship_mode_t1(SUBSTR(SM_CODE,1 ,4)); - ---Create a partial index on the SM_SHIP_MODE_SK column where SM_SHIP_MODE_SK is greater than 10 in the tpcds.ship_mode_t1 table. -MogDB=# CREATE UNIQUE INDEX ds_ship_mode_t1_index3 ON tpcds.ship_mode_t1(SM_SHIP_MODE_SK) WHERE SM_SHIP_MODE_SK>10; - ---Rename an existing index. -MogDB=# ALTER INDEX tpcds.ds_ship_mode_t1_index1 RENAME TO ds_ship_mode_t1_index5; - ---Set the index to be unavailable. -MogDB=# ALTER INDEX tpcds.ds_ship_mode_t1_index2 UNUSABLE; - ---Recreate an index. -MogDB=# ALTER INDEX tpcds.ds_ship_mode_t1_index2 REBUILD; - ---Delete an existing index. -MogDB=# DROP INDEX tpcds.ds_ship_mode_t1_index2; - ---Delete a table. -MogDB=# DROP TABLE tpcds.ship_mode_t1; - ---Create a tablespace. -MogDB=# CREATE TABLESPACE example1 RELATIVE LOCATION 'tablespace1/tablespace_1'; -MogDB=# CREATE TABLESPACE example2 RELATIVE LOCATION 'tablespace2/tablespace_2'; -MogDB=# CREATE TABLESPACE example3 RELATIVE LOCATION 'tablespace3/tablespace_3'; -MogDB=# CREATE TABLESPACE example4 RELATIVE LOCATION 'tablespace4/tablespace_4'; ---Create a table named tpcds.customer_address_p1. -MogDB=# CREATE TABLE tpcds.customer_address_p1 -( - CA_ADDRESS_SK INTEGER NOT NULL, - CA_ADDRESS_ID CHAR(16) NOT NULL, - CA_STREET_NUMBER CHAR(10) , - CA_STREET_NAME VARCHAR(60) , - CA_STREET_TYPE CHAR(15) , - CA_SUITE_NUMBER CHAR(10) , - CA_CITY VARCHAR(60) , - CA_COUNTY VARCHAR(30) , - CA_STATE CHAR(2) , - CA_ZIP CHAR(10) , - CA_COUNTRY VARCHAR(20) , - CA_GMT_OFFSET DECIMAL(5,2) , - CA_LOCATION_TYPE CHAR(20) -) -TABLESPACE example1 -PARTITION BY RANGE(CA_ADDRESS_SK) -( - PARTITION p1 VALUES LESS THAN (3000), - PARTITION p2 VALUES LESS THAN (5000) TABLESPACE example1, - PARTITION p3 VALUES LESS THAN (MAXVALUE) TABLESPACE example2 -) -ENABLE ROW MOVEMENT; ---Create the partitioned table index ds_customer_address_p1_index1 without specifying the index partition name. -MogDB=# CREATE INDEX ds_customer_address_p1_index1 ON tpcds.customer_address_p1(CA_ADDRESS_SK) LOCAL; ---Create the partitioned table index ds_customer_address_p1_index2 with the name of the index partition specified. -MogDB=# CREATE INDEX ds_customer_address_p1_index2 ON tpcds.customer_address_p1(CA_ADDRESS_SK) LOCAL -( - PARTITION CA_ADDRESS_SK_index1, - PARTITION CA_ADDRESS_SK_index2 TABLESPACE example3, - PARTITION CA_ADDRESS_SK_index3 TABLESPACE example4 -) -TABLESPACE example2; - ---Create a global partitioned index. -MogDB=CREATE INDEX ds_customer_address_p1_index3 ON tpcds.customer_address_p1(CA_ADDRESS_ID) GLOBAL; - ---If no keyword is specified, a global partitioned index is created by default. -MogDB=CREATE INDEX ds_customer_address_p1_index4 ON tpcds.customer_address_p1(CA_ADDRESS_ID); - ---Change the tablespace of the partitioned table index CA_ADDRESS_SK_index2 to example1. -MogDB=# ALTER INDEX tpcds.ds_customer_address_p1_index2 MOVE PARTITION CA_ADDRESS_SK_index2 TABLESPACE example1; - ---Change the tablespace of the partitioned table index CA_ADDRESS_SK_index3 to example2. -MogDB=# ALTER INDEX tpcds.ds_customer_address_p1_index2 MOVE PARTITION CA_ADDRESS_SK_index3 TABLESPACE example2; - ---Rename a partitioned table index. -MogDB=# ALTER INDEX tpcds.ds_customer_address_p1_index2 RENAME PARTITION CA_ADDRESS_SK_index1 TO CA_ADDRESS_SK_index4; - ---Delete the created indexes and the partition table. -MogDB=# DROP INDEX tpcds.ds_customer_address_p1_index1; -MogDB=# DROP INDEX tpcds.ds_customer_address_p1_index2; -MogDB=# DROP TABLE tpcds.customer_address_p1; ---Delete a tablespace. -MogDB=# DROP TABLESPACE example1; -MogDB=# DROP TABLESPACE example2; -MogDB=# DROP TABLESPACE example3; -MogDB=# DROP TABLESPACE example4; - ---Create a column-store table and its GIN index. -MogDB=# create table cgin_create_test(a int, b text) with (orientation = column); -CREATE TABLE -MogDB=# create index cgin_test on cgin_create_test using gin(to_tsvector('ngram', b)); -CREATE INDEX -``` - -## Helpful Links - -[CREATE INDEX](../../../../../../reference-guide/sql-syntax/CREATE-INDEX.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-procedure.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-procedure.md deleted file mode 100644 index 75014d3c..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-procedure.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: dolphin CREATE PROCEDURE -summary: dolphin CREATE PROCEDURE -author: zhang cuiping -date: 2022-10-24 ---- - -# CREATE PROCEDURE - -## Function Description - -Creates a stored procedure. - -## Precautions - -Compared with the original MogDB, Dolphin modifies the CREATE PROCEDURE syntax as follows: - -1. The LANGUAGE option is added. -2. The syntax compatibility item [NOT] DETERMINISTIC is added. -3. The syntax compatibility item { CONTAINS SQL | NO SQL | READS SQL DATA | MODIFIES SQL DATA } is added. -4. The syntax compatibility item SQL SECURITY { DEFINER | INVOKER } is added. - -## Syntax - -``` -CREATE [ OR REPLACE ] PROCEDURE procedure_name - [ ( {[ argname ] [ argmode ] argtype [ { DEFAULT | := | = } expression ]}[,...]) ] - [ - { IMMUTABLE | STABLE | VOLATILE } - | { SHIPPABLE | NOT SHIPPABLE } - | {PACKAGE} - | [ NOT ] LEAKPROOF - | { CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT } - | {[ EXTERNAL |SQL ] SECURITY INVOKER | [ EXTERNAL|SQL ] SECURITY DEFINER | AUTHID DEFINER | AUTHID CURRENT_USER} - | COST execution_cost - | SET configuration_parameter { TO value | = value | FROM CURRENT } - | COMMENT text - | {DETERMINISTIC | NOT DETERMINISTIC} - | LANGUAGE lang_name - | { CONTAINS SQL | NO SQL | READS SQL DATA | MODIFIES SQL DATA } - ][ ... ] - { IS | AS } -plsql_body -/ -``` - -## Parameter Description - -- **LANGUAGE lang_name** - - Name of the language used to implement the stored procedure. Default value: **plpgsql**. - -- **SQL SECURITY INVOKER** - - Specifies that the stored procedure is to be executed with the permissions of the user that calls it. This parameter can be omitted. - - The functions of SQL SECURITY INVOKER and SECURITY INVOKER and AUTHID CURRENT_USER are the same. - -- **SQL SECURITY DEFINER** - - Specifies that the stored procedure is to be executed with the privileges of the user that created it. - - The functions of SQL SECURITY DEFINER and AUTHID DEFINER and SECURITY DEFINER are the same. - -- **CONTAINS SQL** | **NO SQL** | **READS SQL DATA** | **MODIFIES SQL DATA** - - Syntax compatibility item. - -## Helpful Links - -[CREATE PROCEDURE](../../../../../../reference-guide/sql-syntax/CREATE-PROCEDURE.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-server.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-server.md deleted file mode 100644 index 11285e9f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-server.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: dolphin CREATE SERVER -summary: dolphin CREATE SERVER -author: Guo Huan -date: 2023-05-15 ---- - -# CREATE SERVER - -## Function - -Define a new external server. - -## Precautions - -- This section contains only the syntax added to dolphin, the syntax of the original MogDB has not been removed or modified. -- Compared to the original MogDB, dolphin's changes to the `CREATE SERVER` syntax are mainly: - 1. add new fdw_name optional value mysql, its function is same as mysql_fdw. - 2. for fdw_name is mysql_fdw, add optional OPTIONS: DATABASE, USER, PASSWORD, SOCKET, OWNER. - -## Syntax - -``` -CREATE SERVER server_name - FOREIGN DATA WRAPPER fdw_name - OPTIONS ( { option_name ' value ' } [, ...] ) ; -``` - -## Parameter Description - -- **fdw\_name** - - Specifies the name of the foreign data wrapper. - - Range of values: dist\_fdw, hdfs\_fdw, log\_fdw, file\_fdw, mot\_fdw, oracle\_fdw, mysql\_fdw, mysql, postgres\_fdw. - -- **OPTIONS \( \{ option\_name ' value ' \} \[, ...\] \)** - - This clause specifies options for the server. These options usually define the connection details for that server, but the actual names and values depend on the foreign data wrapper for that server. - - - The options supported by mysql\_fdw include: - - - **host** (default value is 127.0.0.1) - - The IP address of MySQL Server/MariaDB. - - - **port** (default value is 3306) - - The port number on which MySQL Server/MariaDB listens. - - - **user** (default value is NULL) - - The username that MySQL Server/MariaDB will use to connect. If OPTIONS specifies this option, MogDB will automatically create a user mapping from the current user to the newly created server. - - - **password** (default value is NULL) - - The password of the user that MySQL Server/MariaDB will use to connect. If OPTIONS specifies this option, MogDB will automatically create a user mapping from the current user to the newly created server. - - - **database** (default value is NULL) - - No real meaning, only for syntax compatibility. Specify the database to which MySQL Server/MariaDB connects in [CREATE FOREIGN TABLE](../../../../../../reference-guide/sql-syntax/CREATE-FOREIGN-TABLE.md) or [ALTER FOREIGN TABLE](../../../../../../reference-guide/sql-syntax/ALTER-FOREIGN-TABLE.md). - - - **owner** (default value is NULL) - - No real meaning, only for syntax compatibility. - - - **socket** (default value is NULL) - - No real meaning, only for syntax compatibility. - -## Examples - -Create a server. - -```sql -MogDB=# create server server_test foreign data wrapper mysql options(host '192.108.0.1', port '3306', user 'foreign_server_test', -password 'password@123', database 'my_db', owner 'test_user'); -WARNING: Option database will be deprecated for CREATE SERVER. -WARNING: Option owner will be deprecated for CREATE SERVER. -WARNING: USER MAPPING for current user to server server_test created. -CREATE SERVER -``` - -## Helpful Links - -[ALTER SERVER](dolphin-alter-server.md), [DROP SERVER](../../../../../../reference-guide/sql-syntax/DROP-SERVER.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table-as.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table-as.md deleted file mode 100644 index 8fc99b23..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table-as.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: dolphin CREATE TABLE AS -summary: dolphin CREATE TABLE AS -author: zhang cuiping -date: 2022-10-24 ---- - -# CREATE TABLE AS - -## Function - -**CREATE TABLE AS** creates a table based on the results of a query. - -It creates a table and fills it with data obtained using **SELECT**. The table columns have the names and data types associated with the output columns of **SELECT** (except that you can override the **SELECT** output column names by giving an explicit list of new column names). - -**CREATE TABLE AS** queries a source table once and writes the data in a new table. The result in the query view changes with the source table. In contrast, the view re-computes and defines its **SELECT** statement at each query. - -## Precautions - -- This section describes only the new syntax of Dolphin. The original syntax of MogDB is not deleted or modified. - -## Syntax - -``` -CREATE [ [ GLOBAL | LOCAL ] [ TEMPORARY | TEMP ] | UNLOGGED ] TABLE table_name - [ (column_name [, ...] ) ] - [ WITH ( {storage_parameter = value} [, ... ] ) ] - [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ] - [ COMPRESS | NOCOMPRESS ] - [ TABLESPACE tablespace_name ] - [ AS ] query - [ WITH [ NO ] DATA ]; -``` - -## Parameter Description - -- **[ AS ] query** - - Specifies a **SELECT** or **VALUES** command, or an **EXECUTE** command that runs a prepared **SELECT**, or **VALUES** query. - - The AS keyword is optional. However, if the query contains the WITH statement, you must use parentheses to enclose the query. The following is an example: - - ``` - CREATE TABLE t_new (WITH temp_t(a, b) AS (SELECT a, b FROM t_old) SELECT * FROM temp_t); - ``` - -## Examples - -```sql ---Create the tpcds.store_returns table. -MogDB=# CREATE TABLE tpcds.store_returns -( - W_WAREHOUSE_SK INTEGER NOT NULL, - W_WAREHOUSE_ID CHAR(16) NOT NULL, - sr_item_sk VARCHAR(20) , - W_WAREHOUSE_SQ_FT INTEGER -); ---Create the tpcds.store_returns_t1 table and insert numbers that are greater than 16 in the sr_item_sk column of the tpcds.store_returns table: -MogDB=# CREATE TABLE tpcds.store_returns_t1 AS SELECT * FROM tpcds.store_returns WHERE sr_item_sk > '4795'; - ---Copy tpcds.store_returns to create the tpcds.store_returns_t2 table. -MogDB=# CREATE TABLE tpcds.store_returns_t2 AS table tpcds.store_returns; - ---Delete a table. -MogDB=# DROP TABLE tpcds.store_returns_t1 ; -MogDB=# DROP TABLE tpcds.store_returns_t2 ; -MogDB=# DROP TABLE tpcds.store_returns; -``` - -## Helpful Links - -[CREATE TABLE](../../../../../../reference-guide/sql-syntax/CREATE-TABLE.md),[SELECT](../../../../../../reference-guide/sql-syntax/SELECT.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table-partition.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table-partition.md deleted file mode 100644 index 68079b95..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table-partition.md +++ /dev/null @@ -1,1277 +0,0 @@ ---- -title: dolphin CREATE TABLE PARTITION -summary: dolphin CREATE TABLE PARTITION -author: zhang cuiping -date: 2022-10-24 ---- - -# CREATE TABLE PARTITION - -## Function - -Creates a partitioned table. Partitioning refers to splitting what is logically one large table into smaller physical pieces based on specific schemes. The table based on the logic is called a partitioned table, and each physical piece is called a partition. Data is stored on these physical partitions, instead of the logical partitioned table. - -The common forms of partitioning include range partitioning, interval partitioning, hash partitioning, list partitioning, and value partitioning. Currently, row-store tables support range partitioning, interval partitioning, hash partitioning, and list partitioning. Column-store tables support only range partitioning. - -In range partitioning, the table is partitioned into ranges defined by a key column or set of columns, with no overlap between the ranges of values assigned to different partitions. Each range has a dedicated partition for data storage. - -The partitioning policy for Range Partitioning refers to how data is inserted into partitions. Currently, range partitioning only allows the use of the range partitioning policy. - -In range partitioning, a table is partitioned based on partition key values. If a record can be mapped to a partition, it is inserted into the partition; if it cannot, an error message is returned. Range partitioning is the most commonly used partitioning policy. - -Interval partitioning is a special type of range partitioning. Compared with range partitioning, interval value definition is added. When no matching partition can be found for an inserted record, a partition can be automatically created based on the interval value. - -Interval partitioning supports only table-based partitioning of a list where the data type can be TIMESTAMP[(p)] [WITHOUT TIME ZONE], TIMESTAMP[(p)] [WITH TIME ZONE], and DATE. - -Interval partitioning policy: A record is mapped to a created partition based on the partition key value. If the record can be mapped to a created partition, the record is inserted into the corresponding partition. Otherwise, a partition is automatically created based on the partition key value and table definition information, and then the record is inserted into the new partition. The data range of the new partition is equal to the interval value. - -In hash partitioning, a modulus and a remainder are specified for each partition based on a column in the table, and records to be inserted into the table are allocated to the corresponding partition, the rows in each partition must meet the following condition: The value of the partition key divided by the specified modulus generates the remainder specified for the partition key. - -In hash partitioning, table is partitioned based on partition key values. If a record can be mapped to a partition, it is inserted into the partition; if it cannot, an error message is returned. - -List partitioning is to allocate the records to be inserted into a table to the corresponding partition based on the key values in each partition. The key values do not overlap in different partitions. Create a partition for each group of key values to store corresponding data. - -In list partitioning, table is partitioned based on partition key values. If a record can be mapped to a partition, it is inserted into the partition; if it cannot, an error message is returned. - -Partitioning can provide several benefits: - -- Query performance can be improved drastically in certain situations, particularly when most of the heavily accessed rows of the table are in a single partition or a small number of partitions. Partitioning narrows the range of data search and improves data access efficiency. -- When queries or updates access a large percentage of a single partition, performance can be dramatically improved by taking advantage of sequential scan of that partition instead of reads scattered across the whole table. -- Frequent loading or deletion operations on records in a separate partition can be accomplished by reading or removing that partition. It also entirely avoids the **VACUUM** overload caused by bulk **DELETE** operations (only for range partitioning). - -Compared with the kernel syntax, the rebuild, remove, check, repair, optimize, truncate, analyze, exchange of Dolphin is modified in B compatibility mode. - -## Precautions - -- If the constraint key of the unique constraint and primary key constraint contains all partition keys, a local index is created for the constraints. Otherwise, a global index is created. -- Currently, hash partitioning and list partitioning support only single-column partitioning, and do not support multi-column partitioning. -- When you have the **INSERT** permission for an interval partitioned table, partitions can be automatically created when you run **INSERT** to write data to the table. -- In the **PARTITION FOR (values)** syntax for partitioned tables, values can only be constants. -- In the **PARTITION FOR (values)** syntax for partitioned tables, if data type conversion is required for values, you are advised to use forcible type conversion to prevent the implicit type conversion result from being inconsistent with the expected result. -- The maximum number of partitions is 1048575. Generally, it is impossible to create so many partitions, because too many partitions may cause insufficient memory. Create partitions based on the value of **local_syscache_threshold**. The memory used by the partitioned tables is about (number of partitions x 3/1024) MB. Theoretically, the memory occupied by the partitions cannot be greater than the value of **local_syscache_threshold**. In addition, some space must be reserved for other functions. -- table_indexclause is used to create a partitioned table index. The index is a local index and cannot be a global index. - -## Syntax - -``` -CREATE TABLE [ IF NOT EXISTS ] partition_table_name -( [ - { column_name data_type [ COLLATE collation ] [ column_constraint [ ... ] ] - | table_constraint - | table_indexclause - | LIKE source_table [ like_option [...] ] }[, ... ] -] ) - [create_option] - - PARTITION BY { - {RANGE (partition_key) [ INTERVAL ('interval_expr') [ STORE IN (tablespace_name [, ... ] ) ] ] ( partition_less_than_item [, ... ] )} | - {RANGE (partition_key) [ INTERVAL ('interval_expr') [ STORE IN (tablespace_name [, ... ] ) ] ] ( partition_start_end_item [, ... ] )} | - {LIST | HASH (partition_key) (PARTITION partition_name [VALUES [IN] (list_values_clause)] opt_table_space )} - } [ { ENABLE | DISABLE } ROW MOVEMENT ]; - [create_option] - -Where create\_option is: - [ WITH ( {storage_parameter = value} [, ... ] ) ] - [ COMPRESS | NOCOMPRESS ] - [ TABLESPACE tablespace_name ] - [ COMPRESSION [=] compression_arg ] - [ ENGINE [=] engine_name ] -In addition to the WITH option, you can enter the same create\_option for multiple times. The latest input prevails. -``` - -- column_constraint: - - ``` - [ CONSTRAINT constraint_name ] - { NOT NULL | - NULL | - CHECK ( expression ) | - DEFAULT default_e xpr | - GENERATED ALWAYS AS ( generation_expr ) STORED | - UNIQUE index_parameters | - PRIMARY KEY index_parameters | - REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] - [ ON DELETE action ] [ ON UPDATE action ] } - [ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] - ``` - -- table_constraint: - - ``` - [ CONSTRAINT constraint_name ] - { CHECK ( expression ) | - UNIQUE ( column_name [, ... ] ) index_parameters | - PRIMARY KEY ( column_name [, ... ] ) index_parameters | - FOREIGN KEY ( column_name [, ... ] ) REFERENCES reftable [ ( refcolumn [, ... ] ) ] - [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ] [ ON DELETE action ] [ ON UPDATE action ] } - [ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ] - ``` - -- table_indexclause: - - ``` - {INDEX | KEY} [index_name] [index_type] (key_part,...)[index_option]... - ``` - -- Values of index_type are as follows: - - ``` - USING {BTREE | HASH | GIN | GIST | PSORT | UBTREE} - ``` - -- Values of key_part are as follows: - - ``` - {col_name [ ( length ) ] | (expr)} [ASC | DESC] - ``` - -- `col_name ( length )` is the prefix key, `column\_name` is the column name of the prefix key, and `length` is the prefix length. The prefix key uses the prefix of the specified column data as the index key value, which reduces the storage space occupied by the index. Indexes can be used for filter and join conditions that contain prefix key columns. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - The prefix key supports the following index methods: btree and ubtree. - > - The data type of the prefix key column must be binary or character (excluding special characters). - > - The prefix length must be a positive integer that does not exceed 2676 and cannot exceed the maximum length of the column. For the binary type, the prefix length is measured in bytes. For non-binary character types, the prefix length is measured in characters. The actual length of the key value is restricted by the internal page. If a column contains multi-byte characters or an index has multiple keys, the length of the index line may exceed the upper limit. As a result, an error is reported. Consider this situation when setting a long prefix length. - -- The index_option parameter is as follows: - - ``` - index_option:{ - COMMENT 'string' - | index_type - } - ``` - - The sequence and quantity of COMMENT and index_type can be random, but only the last value of the same column takes effect. - -- like_option: - - ``` - { INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | RELOPTIONS| ALL } - ``` - -- index_parameters: - - ``` - [ WITH ( {storage_parameter = value} [, ... ] ) ] - [ USING INDEX TABLESPACE tablespace_name ] - ``` - -- partition_less_than_item: - - ``` - PARTITION partition_name VALUES LESS THAN ( { partition_value | MAXVALUE } ) [TABLESPACE tablespace_name] - ``` - -- partition_start_end_item: - - ``` - PARTITION partition_name { - {START(partition_value) END (partition_value) EVERY (interval_value)} | - {START(partition_value) END ({partition_value | MAXVALUE})} | - {START(partition_value)} | - {END({partition_value | MAXVALUE})} - } [TABLESPACE tablespace_name] - ``` - -## Parameter Description - -- **IF NOT EXISTS** - - Does not throw an error if a relationship with the same name existed. A notice is issued in this case. - -- **partition_table_name** - - Specifies the name of the partitioned table. - - Value range: String, which must comply with the naming convention. - -- **column_name** - - Specifies the name of a column to be created in the new table. - - Value range: String, which must comply with the naming convention. - -- **data_type** - - Specifies the data type of the column. - -- **COLLATE collation** - - Assigns a collation to the column (which must be of a collatable data type). If no collation is specified, the default collation is used. You can run the **select \* from pg_collation** command to query collation rules from the **pg_collation** system catalog. The default collation rule is the row starting with **default** in the query result. - -- **CONSTRAINT constraint_name** - - Specifies the name of a column or table constraint. The optional constraint clauses specify constraints that new or updated rows must satisfy for an INSERT or UPDATE operation to succeed. - - There are two ways to define constraints: - - - A column constraint is defined as part of a column definition, and it is bound to a particular column. - - A table constraint is not bound to a particular column and can apply to more than one column. - -- **LIKE source_table [ like_option … ]** - - The LIKE clause specifies a table from which the new table automatically copies all column names, their data types, and their non-null constraints. - - Unlike INHERITS, the new table and original table are decoupled after creation is complete. Changes to the source table will not be applied to the new table, and it is not possible to include data of the new table in scans of the source table. - - - Default expressions for the copied column definitions will only be copied if **INCLUDING DEFAULTS** is specified. The default behavior is to exclude default expressions, resulting in the copied columns in the new table having default values **NULL**. - - If **INCLUDING GENERATED** is specified, the generated expression of the source table column is copied to the new table. By default, the generated expression is not copied. - - Non-null constraints are always copied to the new table. CHECK constraints will only be copied if **INCLUDING CONSTRAINTS** is specified; other types of constraints will never be copied. These rules also apply to column constraints and table constraints. - - Unlike those of INHERITS, columns and constraints copied by LIKE are not merged with similarly named columns and constraints. If the same name is specified explicitly or in another LIKE clause, an error is reported. - - Any indexes on the original table will not be created on the new table, unless the **INCLUDING INDEXES** clause is specified. - - **STORAGE** settings for the copied column definitions are copied only if **INCLUDING STORAGE** is specified. The default behavior is to exclude **STORAGE** settings. - - Comments for the copied columns, constraints, and indexes will be copied only if **INCLUDING COMMENTS** is specified. The default behavior is to exclude comments. - - If **INCLUDING RELOPTIONS** is specified, the new table will copy the storage parameter (WITH clause of the source table) of the source table. The default behavior is to exclude partition definition of the storage parameter of the original table. - - **INCLUDING ALL** contains the meaning of **INCLUDING DEFAULTS**, **INCLUDING CONSTRAINTS**, **INCLUDING INDEXES**, **INCLUDING STORAGE**, **INCLUDING COMMENTS**, **INCLUDING PARTITION**, and **INCLUDING RELOPTIONS**. - -- **WITH ( storage_parameter [= value] [, … ] )** - - Specifies an optional storage parameter for a table or an index. Optional parameters are as follows: - - - FILLFACTOR - - The fill factor of a table is a percentage from 10 to 100. **100** (complete filling) is the default value. When a smaller fill factor is specified, INSERT operations fill table pages only to the indicated percentage. The remaining space on each page is reserved for updating rows on that page. This gives UPDATE a chance to place the updated copy of a row on the same page, which is more efficient than placing it on a different page. For a table whose entries are never updated, setting the fill factor to **100** (complete filling) is the best choice, but in heavily updated tables a smaller fill factor would be appropriate. The parameter has no meaning for column-store tables. - - Value range: 10 to 100 - - - ORIENTATION - - Determines the data storage mode of the table. - - Value range: - - - **COLUMN**: The data will be stored in columns. - - - **ROW** (default value): The data will be stored in rows. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** **ORIENTATION** cannot be modified. - - - STORAGE_TYPE - - Specifies the storage engine type. This parameter cannot be modified once it is set. - - Value range: - - - **USTORE** indicates that tables support the inplace-update storage engine. Note that the **track\_counts** **and** **track\_activities** parameters must be enabled **when** the Ustore table **is** used. Otherwise, space expansion may occur. - - **ASTORE** indicates that tables support the append-only storage engine. - - Default value. If **no** table **is** specified, data **is** stored **in** append-only mode **by** **default**. - - - COMPRESSION - - - Value range: **LOW**, **MIDDLE**, **HIGH**, **YES**, and **NO** for column-store tables, with compression level increasing in ascending order. The default value is **LOW**. - - Row-store tables cannot be compressed. - - - MAX_BATCHROW - - Specifies the maximum number of records in a storage unit during data loading. The parameter is only valid for column-store tables. - - Value range: 10000 to 60000. The default value is **60000**. - - - PARTIAL_CLUSTER_ROWS - - Specifies the number of records to be partially clustered for storage during data loading. The parameter is only valid for column-store table. - - Value range: greater than or equal to **MAX_BATCHROW**. You are advised to set this parameter to an integer multiple of **MAX_BATCHROW**. - - - DELTAROW_THRESHOLD - - A reserved parameter. The parameter is only valid for column-store table. - - Value range: 0 to 9999 - - - segment - - The data is stored in segment-page mode. This parameter supports only row-store tables. Column-store tables, temporary tables, and unlogged tables are not supported. The Ustore storage engine is not supported. - - Value range: **on** and **off** - - Default value: **off** - -- **COMPRESS / NOCOMPRESS** - - Specifies keyword COMPRESS during the creation of a table, so that the compression feature is triggered in case of BULK INSERT operations. If this feature is enabled, a scan is performed for all tuple data within the page to generate a dictionary and then the tuple data is compressed and stored. If **NOCOMPRESS** is specified, the table is not compressed. Row-store tables cannot be compressed. - - Default value: **NOCOMPRESS**, tuple data is not compressed before storage. - -- **TABLESPACE tablespace_name** - - Specifies that the new table will be created in the **tablespace_name** tablespace. If the tablespace is not specified, the default tablespace is used. - -- **PARTITION BY RANGE(partition_key)** - - Creates a range partition. **partition_key** is the name of the partition key. - - (1)Assume that the **VALUES LESS THAN** syntax is used. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif)**NOTICE:** In this case, a maximum of four partition keys are supported. - - In this case, the partition key supports the following data types: SMALLINT, INTEGER, BIGINT, DECIMAL, NUMERIC, REAL, DOUBLE PRECISION, CHARACTER VARYING(n), VARCHAR(n), CHARACTER(n), CHAR(n), CHARACTER, CHAR, TEXT, NVARCHAR, NVARCHAR2, NAME, TIMESTAMP[(p)] [WITHOUT TIME ZONE], TIMESTAMP[(p)] [WITH TIME ZONE], and DATE. - - (2)Assume that the **START END** syntax is used. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** In this case, only one partition key is supported. - - Data types supported by the partition key are as follows: SMALLINT, INTEGER, BIGINT, DECIMAL, NUMERIC, REAL, DOUBLE PRECISION, TIMESTAMP[(p)] [WITHOUT TIME ZONE], TIMESTAMP[(p)] [WITH TIME ZONE], and DATE. - - (3)Assume that the **INTERVAL** syntax is used. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** In this case, only one partition key is supported. - - In this case, the data types supported by the partition key are TIMESTAMP[(p)] [WITHOUT TIME ZONE], TIMESTAMP[(p)] [WITH TIME ZONE], and DATE. - -- **PARTITION partition_name VALUES LESS THAN ( { partition_value | MAXVALUE } )** - - Indicates specifying the partition information, where **partition_name** indicates the name of a range partition, **partition_value** indicates the upper boundary of a range partition and its value is determined by the type of **partition_key**. **MAXVALUE** specifies the upper boundary of the last range partition. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > - > - Upper boundaries must be specified for each partition. - > - The data type of an upper boundary must be the same as that of the partition key. - > - In a partition list, partitions are arranged in ascending order of upper boundary values. Therefore, a partition with a certain upper boundary value is placed before another partition with a larger upper boundary value. - -- **PARTITION partition_name {START (partition_value) END (partition_value) EVERY (interval_value)}** | **{START (partition_value) END (partition_value|MAXVALUE)}** | **{START(partition_value)}** | **{END (partition_value | MAXVALUE)}** - - Specifies the information of partitions. - - - **partition_name**: name or name prefix of a range partition. It is the name prefix only in the following cases (assuming that **partition_name** is **p1**): - - If START+END+EVERY is used, the names of partitions will be defined as **p1_1**, **p1_2**, and the like. For example, if “PARTITION p1 START(1) END(4) EVERY(1)” is defined, the generated partitions are [1, 2), [2, 3), and [3, 4), and their names are p1_1, p1_2, and p1_3 respectively. That is, p1 is the name prefix. - - If the defined statement is in the first place and has **START** specified, the range (*MINVALUE*, **START**) will be automatically used as the first actual partition, and its name will be **p1_0**. The other partitions are then named **p1_1**, **p1_2**, and so on. For example, if the complete definition is “PARTITION p1 START(1), PARTITION p2 START(2)”, the generated partitions are (MINVALUE, 1), [1, 2) and [2, MAXVALUE), and their names are p1_0, p1_1, and p2. That is, p1 is the name prefix and p2 is the partition name. **MINVALUE** indicates the minimum value. - - partition_value: start point value or end point value of a range partition. The value depends on partition_key and cannot be MAXVALUE. - - **interval_value**: width of each partition for dividing the [**START**, **END**) range. It cannot be **MAXVALUE**. If the value of (**END** – **START**) divided by **EVERY** has a remainder, the width of only the last partition is less than the value of **EVERY**. - - **MAXVALUE**: maximum value. It is usually used to set the upper boundary for the last range partition. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > - > 1. If the defined statement is in the first place and has **START** specified, the range (**MINVALUE**, **START**) will be automatically used as the first actual partition. - > 2. The **START END** syntax must comply with the following rules: - > - > - The value of START (if any, same for the following situations) in each partition_start_end_item must be smaller than that of END. - > - For two adjacent partition_start_end_item, the END value of the first partition_start_end_item must be equal to the START value of the second partition_start_end_item. - > - The value of EVERY in each partition_start_end_item must be in ascending order and must be smaller than the value of END – START. - > - Each partition includes the start value (unless it is **MINVALUE**) and excludes the end value. The format is as follows: [Start value, end value). - > - Partitions created by a partition_start_end_item belong to the same tablespace. - > - If **partition_name** is a name prefix of a partition, the length must not exceed 57 bytes. If there are more than 57 bytes, the prefix will be automatically truncated. - > - When creating or modifying a partitioned table, ensure that the total number of partitions in the table does not exceed the maximum value **1048575**. - > - > 1. In statements for creating partitioned tables, **START END** and **LESS THAN** cannot be used together. - > 2. The **START END** syntax in a partitioned table creation SQL statement will be replaced with the **VALUES LESS THAN** syntax when **gs_dump** is executed. - -- **INTERVAL ('interval_expr') [ STORE IN (tablespace_name [, … ] ) ]** - - Defines interval partitioning. - - - **interval_expr**: interval for automatically creating partitions, for example, 1 day or 1 month. - - STORE IN (tablespace_name [, … ] ): Specifies the list of tablespaces for storing automatically created partitions. If this parameter is specified, the automatically created partitions are cyclically selected from the tablespace list. Otherwise, the default tablespace of the partitioned table is used. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** Column-store tables do not support interval partitioning. - -- **PARTITION BY LIST(partition_key)** - - Create a list partition. partition_key indicates the name of the partition key. - - - For **partition_key**, the list partitioning policy supports only one column of partition keys. - - If the clause is VALUES (list_values_clause), list_values_clause contains the key values of the corresponding partition. It is recommended that the number of key values of each partition be less than or equal to 64. - - Partition keys support the following data types: INT1, INT2, INT4, INT8, NUMERIC, VARCHAR(n), CHAR, BPCHAR, NVARCHAR, NVARCHAR2, TIMESTAMP[(p)] [WITHOUT TIME ZONE], TIMESTAMP[(p)] [WITH TIME ZONE], and DATE. The number of partitions cannot exceed 1048575. - -- **PARTITION BY HASH(partition_key)** - - Create a hash partition. partition_key indicates the name of the partition key. - - For **partition_key**, the hash partitioning policy supports only one column of partition keys. - - Partition keys support the following data types: INT1, INT2, INT4, INT8, NUMERIC, VARCHAR(n), CHAR, BPCHAR, TEXT, NVARCHAR, NVARCHAR2, TIMESTAMP[(p)] [WITHOUT TIME ZONE], TIMESTAMP[(p)] [WITH TIME ZONE], and DATE. The number of partitions cannot exceed 1048575. - -- **{ ENABLE | DISABLE } ROW MOVEMENT** - - Specifies whether to enable row movement. - - If the tuple value is updated on the partition key during the **UPDATE** operation, the partition where the tuple is located is altered. Setting of this parameter enables error messages to be reported or movement of the tuple between partitions. - - Value range: - - - **ENABLE** (default value): Row movement is enabled. - - **DISABLE**: Row movement is disabled. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** Currently, list and hash partitioned tables do not support **ROW MOVEMENT**. - -- **NOT NULL** - - The column is not allowed to contain null values. **ENABLE** can be omitted. - -- **NULL** - - Indicates that the column is allowed to contain **NULL** values. This is the default setting. - - This clause is only provided for compatibility with non-standard SQL databases. It is not recommended. - -- **CHECK (condition) [ NO INHERIT ]** - - The CHECK constraint specifies an expression producing a Boolean result where the INSERT or UPDATE operation of new or updated rows can succeed only when the expression result is **TRUE** or **UNKNOWN**; otherwise, an error is thrown and the database is not altered. - - A check constraint specified as a column constraint should reference only the column's values, while an expression in a table constraint can reference multiple columns. - - A constraint marked with **NO INHERIT** will not propagate to child tables. - - **ENABLE** can be omitted. - -- **DEFAULT default_expr** - - Assigns a default data value to a column. The value can be any variable-free expressions (Subqueries and cross-references to other columns in the current table are not allowed). The data type of the default expression must match that of the column. - - The default expression will be used in any INSERT operation that does not specify a value for the column. If there is no default value for a column, then the default value is **NULL**. - -- GENERATED ALWAYS AS ( generation_expr ) STORED - - This clause creates a column as a generated column. The value of the generated column is calculated by **generation_expr** when data is written (inserted or updated). **STORED** indicates that the value of the generated column is stored as a common column. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > - > - The generation expression cannot refer to data other than the current row in any way. The generation expression cannot reference other generation columns or system columns. The generation expression cannot return a result set. No subquery, aggregate function, or window function can be used. The function called by the generation expression can only be an immutable function. - > - > - Default values cannot be specified for generated columns. - > - The generated column cannot be used as a part of the partition key. - > - Do not specify the generated column and the CASCADE, SET NULL, and SET DEFAULT actions of the ON UPDATE constraint at the same time. Do not specify the generated column and the SET NULL, and SET DEFAULT actions of the ON DELETE constraint at the same time. - > - The method of modifying and deleting generated columns is the same as that of common columns. Delete the common column that the generated column depends on. The generated column is automatically deleted. The type of the column on which the generated column depends cannot be changed. - > - The generated column cannot be directly written. In the INSERT or UPDATE statement, values cannot be specified for generated columns, but the keyword DEFAULT can be specified. - > - The permission control for generated columns is the same as that for common columns. - > - Columns cannot be generated for column-store tables and MOTs. In foreign tables, only postgres_fdw supports generated columns. - -- **UNIQUE index_parameters** - - **UNIQUE ( column_name [, … ] ) index_parameters** - - The UNIQUE constraint specifies that a group of one or more columns of a table can contain only unique values. - - For the UNIQUE constraint, **NULL** is not considered equal. - -- **PRIMARY KEY index_parameters** - - **PRIMARY KEY ( column_name [, … ] ) index_parameters** - - Specifies that a column or columns of a table can contain only unique (non-duplicate) and non-**NULL** values. - - Only one primary key can be specified for a table. - -- **DEFERRABLE | NOT DEFERRABLE** - - They determine whether the constraint is deferrable. A constraint that is not deferrable will be checked immediately after every command. Checking of constraints that are deferrable can be postponed until the end of the transaction using the **SET CONSTRAINTS** command. **NOT DEFERRABLE** is the default value. Currently, only UNIQUE constraints, primary key constraints, and foreign key constraints accept this clause. All the other constraints are not deferrable. - -- **INITIALLY IMMEDIATE | INITIALLY DEFERRED** - - If a constraint is deferrable, this clause specifies the default time to check the constraint. - - - If the constraint is **INITIALLY IMMEDIATE** (default value), it is checked after each statement. - - If the constraint is **INITIALLY DEFERRED**, it is checked only at the end of the transaction. - - The constraint check time can be altered using the **SET CONSTRAINTS** statement. - -- **USING INDEX TABLESPACE tablespace_name** - - Allows selection of the tablespace in which the index associated with a **UNIQUE** or **PRIMARY KEY** constraint will be created. If not specified, the index is created in **default_tablespace**. If **default_tablespace** is empty, the default tablespace of the database is used. - -## Examples - -- Example 1: Create a range-partitioned table **tpcds.web_returns_p1**. The table has eight partitions and their partition keys are of type integer. The partition ranges are wr_returned_date_sk< 2450815, 2450815<= wr_returned_date_sk< 2451179, 2451179<=wr_returned_date_sk< 2451544, 2451544 <= wr_returned_date_sk< 2451910, 2451910 <= wr_returned_date_sk< 2452275, 2452275 <= wr_returned_date_sk< 2452640, 2452640 <= wr_returned_date_sk< 2453005, and wr_returned_date_sk>=2453005. - - ```sql - --Create a table named tpcds.web_returns. - Mog=# CREATE TABLE tpcds.web_returns - ( - W_WAREHOUSE_SK INTEGER NOT NULL, - W_WAREHOUSE_ID CHAR(16) NOT NULL, - W_WAREHOUSE_NAME VARCHAR(20) , - W_WAREHOUSE_SQ_FT INTEGER , - W_STREET_NUMBER CHAR(10) , - W_STREET_NAME VARCHAR(60) , - W_STREET_TYPE CHAR(15) , - W_SUITE_NUMBER CHAR(10) , - W_CITY VARCHAR(60) , - W_COUNTY VARCHAR(30) , - W_STATE CHAR(2) , - W_ZIP CHAR(10) , - W_COUNTRY VARCHAR(20) , - W_GMT_OFFSET DECIMAL(5,2) - ); - --Create a partitioned table named tpcds.web_returns_p1. - MogDB=# CREATE TABLE tpcds.web_returns_p1 - ( - WR_RETURNED_DATE_SK INTEGER , - WR_RETURNED_TIME_SK INTEGER , - WR_ITEM_SK INTEGER NOT NULL, - WR_REFUNDED_CUSTOMER_SK INTEGER , - WR_REFUNDED_CDEMO_SK INTEGER , - WR_REFUNDED_HDEMO_SK INTEGER , - WR_REFUNDED_ADDR_SK INTEGER , - WR_RETURNING_CUSTOMER_SK INTEGER , - WR_RETURNING_CDEMO_SK INTEGER , - WR_RETURNING_HDEMO_SK INTEGER , - WR_RETURNING_ADDR_SK INTEGER , - WR_WEB_PAGE_SK INTEGER , - WR_REASON_SK INTEGER , - WR_ORDER_NUMBER BIGINT NOT NULL, - WR_RETURN_QUANTITY INTEGER , - WR_RETURN_AMT DECIMAL(7,2) , - WR_RETURN_TAX DECIMAL(7,2) , - WR_RETURN_AMT_INC_TAX DECIMAL(7,2) , - WR_FEE DECIMAL(7,2) , - WR_RETURN_SHIP_COST DECIMAL(7,2) , - WR_REFUNDED_CASH DECIMAL(7,2) , - WR_REVERSED_CHARGE DECIMAL(7,2) , - WR_ACCOUNT_CREDIT DECIMAL(7,2) , - WR_NET_LOSS DECIMAL(7,2) - ) - WITH (ORIENTATION = COLUMN,COMPRESSION=MIDDLE) - PARTITION BY RANGE(WR_RETURNED_DATE_SK) - ( - PARTITION P1 VALUES LESS THAN(2450815), - PARTITION P2 VALUES LESS THAN(2451179), - PARTITION P3 VALUES LESS THAN(2451544), - PARTITION P4 VALUES LESS THAN(2451910), - PARTITION P5 VALUES LESS THAN(2452275), - PARTITION P6 VALUES LESS THAN(2452640), - PARTITION P7 VALUES LESS THAN(2453005), - PARTITION P8 VALUES LESS THAN(MAXVALUE) - ); - - --Import data from the example data table. - MogDB=# INSERT INTO tpcds.web_returns_p1 SELECT * FROM tpcds.web_returns; - - --Delete partition **P8**. - MogDB=# ALTER TABLE tpcds.web_returns_p1 DROP PARTITION P8; - - --Add a partition **WR_RETURNED_DATE_SK** with values ranging from 2453005 to 2453105. - MogDB=# ALTER TABLE tpcds.web_returns_p1 ADD PARTITION P8 VALUES LESS THAN (2453105); - - --Add a partition **WR_RETURNED_DATE_SK** with values ranging from 2453105 to **MAXVALUE**. - MogDB=# ALTER TABLE tpcds.web_returns_p1 ADD PARTITION P9 VALUES LESS THAN (MAXVALUE); - - --Delete partition **P8**. - MogDB=# ALTER TABLE tpcds.web_returns_p1 DROP PARTITION FOR (2453005); - - --Rename the **P7** partition as **P10**. - MogDB=# ALTER TABLE tpcds.web_returns_p1 RENAME PARTITION P7 TO P10; - - --Rename the **P6** partition as **P11**. - MogDB=# ALTER TABLE tpcds.web_returns_p1 RENAME PARTITION FOR (2452639) TO P11; - - --Query rows in the **P10** partition. - MogDB=# SELECT count(*) FROM tpcds.web_returns_p1 PARTITION (P10); - count - -------- - 0 - (1 row) - - --Query the number of rows in the **P1** partition. - MogDB=# SELECT COUNT(*) FROM tpcds.web_returns_p1 PARTITION FOR (2450815); - count - -------- - 0 - (1 row) - ``` - -- Example 2: Create a range partitioned table **tpcds.web_returns_p2**. The table has eight partitions and their partition keys are of type integer. The upper limit of the eighth partition is **MAXVALUE**. - - The ranges of the eight partitions are wr_returned_date_sk< 2450815, 2450815<= wr_returned_date_sk< 2451179, 2451179<=wr_returned_date_sk< 2451544, 2451544 <= wr_returned_date_sk< 2451910, 2451910 <= wr_returned_date_sk< 2452275, 2452275 <= wr_returned_date_sk< 2452640, 2452640 <= wr_returned_date_sk< 2453005, and wr_returned_date_sk>=2453005. - - The tablespace of the **tpcds.web_returns_p2** partitioned table is **example1**. Partitions **P1** to **P7** have no specified tablespaces, and use the **example1** tablespace of the **tpcds.web_returns_p2** partitioned table. The tablespace of the **P8** partitioned table is **example2**. - - Assume that the following data directories of the database nodes are empty directories for which user **dwsadmin** has the read and write permissions: **/pg_location/mount1/path1**, **/pg_location/mount2/path2**, **/pg_location/mount3/path3**, and **/pg_location/mount4/path4**. - - ```sql - MogDB=# CREATE TABLESPACE example1 RELATIVE LOCATION 'tablespace1/tablespace_1'; - MogDB=# CREATE TABLESPACE example2 RELATIVE LOCATION 'tablespace2/tablespace_2'; - MogDB=# CREATE TABLESPACE example3 RELATIVE LOCATION 'tablespace3/tablespace_3'; - MogDB=# CREATE TABLESPACE example4 RELATIVE LOCATION 'tablespace4/tablespace_4'; - - MogDB=# CREATE TABLE tpcds.web_returns_p2 - ( - WR_RETURNED_DATE_SK INTEGER , - WR_RETURNED_TIME_SK INTEGER , - WR_ITEM_SK INTEGER NOT NULL, - WR_REFUNDED_CUSTOMER_SK INTEGER , - WR_REFUNDED_CDEMO_SK INTEGER , - WR_REFUNDED_HDEMO_SK INTEGER , - WR_REFUNDED_ADDR_SK INTEGER , - WR_RETURNING_CUSTOMER_SK INTEGER , - WR_RETURNING_CDEMO_SK INTEGER , - WR_RETURNING_HDEMO_SK INTEGER , - WR_RETURNING_ADDR_SK INTEGER , - WR_WEB_PAGE_SK INTEGER , - WR_REASON_SK INTEGER , - WR_ORDER_NUMBER BIGINT NOT NULL, - WR_RETURN_QUANTITY INTEGER , - WR_RETURN_AMT DECIMAL(7,2) , - WR_RETURN_TAX DECIMAL(7,2) , - WR_RETURN_AMT_INC_TAX DECIMAL(7,2) , - WR_FEE DECIMAL(7,2) , - WR_RETURN_SHIP_COST DECIMAL(7,2) , - WR_REFUNDED_CASH DECIMAL(7,2) , - WR_REVERSED_CHARGE DECIMAL(7,2) , - WR_ACCOUNT_CREDIT DECIMAL(7,2) , - WR_NET_LOSS DECIMAL(7,2) - ) - TABLESPACE example1 - PARTITION BY RANGE(WR_RETURNED_DATE_SK) - ( - PARTITION P1 VALUES LESS THAN(2450815), - PARTITION P2 VALUES LESS THAN(2451179), - PARTITION P3 VALUES LESS THAN(2451544), - PARTITION P4 VALUES LESS THAN(2451910), - PARTITION P5 VALUES LESS THAN(2452275), - PARTITION P6 VALUES LESS THAN(2452640), - PARTITION P7 VALUES LESS THAN(2453005), - PARTITION P8 VALUES LESS THAN(MAXVALUE) TABLESPACE example2 - ) - ENABLE ROW MOVEMENT; - - --Create a partitioned table using **LIKE**. - MogDB=# CREATE TABLE tpcds.web_returns_p3 (LIKE tpcds.web_returns_p2 INCLUDING PARTITION); - - --Change the tablespace of the **P1** partition to **example2**. - MogDB=# ALTER TABLE tpcds.web_returns_p2 MOVE PARTITION P1 TABLESPACE example2; - - --Change the tablespace of the **P2** partition to **example3**. - MogDB=# ALTER TABLE tpcds.web_returns_p2 MOVE PARTITION P2 TABLESPACE example3; - - --Split the **P8** partition at 2453010. - MogDB=# ALTER TABLE tpcds.web_returns_p2 SPLIT PARTITION P8 AT (2453010) INTO - ( - PARTITION P9, - PARTITION P10 - ); - - --Merge the **P6** and **P7** partitions into one. - MogDB=# ALTER TABLE tpcds.web_returns_p2 MERGE PARTITIONS P6, P7 INTO PARTITION P8; - - --Modify the migration attribute of a partitioned table. - MogDB=# ALTER TABLE tpcds.web_returns_p2 DISABLE ROW MOVEMENT; - --Drop tables and tablespaces. - MogDB=# DROP TABLE tpcds.web_returns_p1; - MogDB=# DROP TABLE tpcds.web_returns_p2; - MogDB=# DROP TABLE tpcds.web_returns_p3; - MogDB=# DROP TABLESPACE example1; - MogDB=# DROP TABLESPACE example2; - MogDB=# DROP TABLESPACE example3; - MogDB=# DROP TABLESPACE example4; - ``` - -- Example 3: Use **START END** to create and modify a range partitioned table. - - Assume that **/home/omm/startend_tbs1**, **/home/omm/startend_tbs2**, **/home/omm/startend_tbs3**, and **/home/omm/startend_tbs4** are empty directories on which user **omm** has the read and write permissions. - - ```sql - -- Creating Tablespaces - MogDB=# CREATE TABLESPACE startend_tbs1 LOCATION '/home/omm/startend_tbs1'; - MogDB=# CREATE TABLESPACE startend_tbs2 LOCATION '/home/omm/startend_tbs2'; - MogDB=# CREATE TABLESPACE startend_tbs3 LOCATION '/home/omm/startend_tbs3'; - MogDB=# CREATE TABLESPACE startend_tbs4 LOCATION '/home/omm/startend_tbs4'; - - -- Create a temporary schema. - MogDB=# CREATE SCHEMA tpcds; - MogDB=# SET CURRENT_SCHEMA TO tpcds; - - -- Create a partitioned table with the partition key of type integer. - MogDB=# CREATE TABLE tpcds.startend_pt (c1 INT, c2 INT) - TABLESPACE startend_tbs1 - PARTITION BY RANGE (c2) ( - PARTITION p1 START(1) END(1000) EVERY(200) TABLESPACE startend_tbs2, - PARTITION p2 END(2000), - PARTITION p3 START(2000) END(2500) TABLESPACE startend_tbs3, - PARTITION p4 START(2500), - PARTITION p5 START(3000) END(5000) EVERY(1000) TABLESPACE startend_tbs4 - ) - ENABLE ROW MOVEMENT; - - -- View the information of the partitioned table. - MogDB=# SELECT relname, boundaries, spcname FROM pg_partition p JOIN pg_tablespace t ON p.reltablespace=t.oid and p.parentid='tpcds.startend_pt'::regclass ORDER BY 1; - relname | boundaries | spcname - -------------+------------+--------------- - p1_0 | {1} | startend_tbs2 - p1_1 | {201} | startend_tbs2 - p1_2 | {401} | startend_tbs2 - p1_3 | {601} | startend_tbs2 - p1_4 | {801} | startend_tbs2 - p1_5 | {1000} | startend_tbs2 - p2 | {2000} | startend_tbs1 - p3 | {2500} | startend_tbs3 - p4 | {3000} | startend_tbs1 - p5_1 | {4000} | startend_tbs4 - p5_2 | {5000} | startend_tbs4 - startend_pt | | startend_tbs1 - (12 rows) - - -- Import data and check the data volume in the partition. - MogDB=# INSERT INTO tpcds.startend_pt VALUES (GENERATE_SERIES(0, 4999), GENERATE_SERIES(0, 4999)); - MogDB=# SELECT COUNT(*) FROM tpcds.startend_pt PARTITION FOR (0); - count - ------- - 1 - (1 row) - - MogDB=# SELECT COUNT(*) FROM tpcds.startend_pt PARTITION (p3); - count - ------- - 500 - (1 row) - - -- Add partitions [5000, 5300), [5300, 5600), [5600, 5900), and [5900, 6000). - MogDB=# ALTER TABLE tpcds.startend_pt ADD PARTITION p6 START(5000) END(6000) EVERY(300) TABLESPACE startend_tbs4; - - -- Add the partition p7, specified by **MAXVALUE**. - MogDB=# ALTER TABLE tpcds.startend_pt ADD PARTITION p7 END(MAXVALUE); - - -- Rename the partition p7 to p8. - MogDB=# ALTER TABLE tpcds.startend_pt RENAME PARTITION p7 TO p8; - - -- Delete the partition p8. - MogDB=# ALTER TABLE tpcds.startend_pt DROP PARTITION p8; - - -- Rename the partition where 5950 is located to p71. - MogDB=# ALTER TABLE tpcds.startend_pt RENAME PARTITION FOR(5950) TO p71; - - -- Split the partition [4000, 5000) where 4500 is located. - MogDB=# ALTER TABLE tpcds.startend_pt SPLIT PARTITION FOR(4500) INTO(PARTITION q1 START(4000) END(5000) EVERY(250) TABLESPACE startend_tbs3); - - -- Change the tablespace of the partition p2 to startend_tbs4. - MogDB=# ALTER TABLE tpcds.startend_pt MOVE PARTITION p2 TABLESPACE startend_tbs4; - - -- View the partition status. - MogDB=# SELECT relname, boundaries, spcname FROM pg_partition p JOIN pg_tablespace t ON p.reltablespace=t.oid and p.parentid='tpcds.startend_pt'::regclass ORDER BY 1; - relname | boundaries | spcname - -------------+------------+--------------- - p1_0 | {1} | startend_tbs2 - p1_1 | {201} | startend_tbs2 - p1_2 | {401} | startend_tbs2 - p1_3 | {601} | startend_tbs2 - p1_4 | {801} | startend_tbs2 - p1_5 | {1000} | startend_tbs2 - p2 | {2000} | startend_tbs4 - p3 | {2500} | startend_tbs3 - p4 | {3000} | startend_tbs1 - p5_1 | {4000} | startend_tbs4 - p6_1 | {5300} | startend_tbs4 - p6_2 | {5600} | startend_tbs4 - p6_3 | {5900} | startend_tbs4 - p71 | {6000} | startend_tbs4 - q1_1 | {4250} | startend_tbs3 - q1_2 | {4500} | startend_tbs3 - q1_3 | {4750} | startend_tbs3 - q1_4 | {5000} | startend_tbs3 - startend_pt | | startend_tbs1 - (19 rows) - - -- Delete tables and tablespaces: - MogDB=# DROP SCHEMA tpcds CASCADE; - MogDB=# DROP TABLESPACE startend_tbs1; - MogDB=# DROP TABLESPACE startend_tbs2; - MogDB=# DROP TABLESPACE startend_tbs3; - MogDB=# DROP TABLESPACE startend_tbs4; - ``` - -- Example 4: Create interval partitioned table **sales**. The table initially contains two partitions and the partition key is of the DATE type. Ranges of the two partitions are as follows: **time_id** < '2019-02-01 00:00:00' and '2019-02-01 00:00:00' ≤ **time_id** < '2019-02-02 00:00:00', respectively. - - ```sql - --Create table sales. - MogDB=# CREATE TABLE sales - (prod_id NUMBER(6), - cust_id NUMBER, - time_id DATE, - channel_id CHAR(1), - promo_id NUMBER(6), - quantity_sold NUMBER(3), - amount_sold NUMBER(10,2) - ) - PARTITION BY RANGE (time_id) - INTERVAL('1 day') - ( PARTITION p1 VALUES LESS THAN ('2019-02-01 00:00:00'), - PARTITION p2 VALUES LESS THAN ('2019-02-02 00:00:00') - ); - - -- Insert data into partition p1. - MogDB=# INSERT INTO sales VALUES(1, 12, '2019-01-10 00:00:00', 'a', 1, 1, 1); - - -- Insert data into partition p2. - MogDB=# INSERT INTO sales VALUES(1, 12, '2019-02-01 00:00:00', 'a', 1, 1, 1); - - -- View partition information. - MogDB=# SELECT t1.relname, partstrategy, boundaries FROM pg_partition t1, pg_class t2 WHERE t1.parentid = t2.oid AND t2.relname = 'sales' AND t1.parttype = 'p'; - relname | partstrategy | boundaries - ---------+--------------+------------------------- - p1 | r | {"2019-02-01 00:00:00"} - p2 | r | {"2019-02-02 00:00:00"} - (2 rows) - - -- If the data to be inserted does not match any partition, create a partition and insert the data into the new partition. - -- The range of the new partition is '2019-02-05 00:00:00' ≤ time_id < '2019-02-06 00:00:00'. - MogDB=# INSERT INTO sales VALUES(1, 12, '2019-02-05 00:00:00', 'a', 1, 1, 1); - - -- If the data to be inserted does not match any partition, create a partition and insert the data into the new partition. - -- The range of the new partition is '2019-02-03 00:00:00' ≤ time_id < '2019-02-04 00:00:00'. - MogDB=# INSERT INTO sales VALUES(1, 12, '2019-02-03 00:00:00', 'a', 1, 1, 1); - - -- View partition information. - MogDB=# SELECT t1.relname, partstrategy, boundaries FROM pg_partition t1, pg_class t2 WHERE t1.parentid = t2.oid AND t2.relname = 'sales' AND t1.parttype = 'p'; - relname | partstrategy | boundaries - ---------+--------------+------------------------- - sys_p1 | i | {"2019-02-06 00:00:00"} - sys_p2 | i | {"2019-02-04 00:00:00"} - p1 | r | {"2019-02-01 00:00:00"} - p2 | r | {"2019-02-02 00:00:00"} - (4 rows) - ``` - -- Example 5: Create list partitioned table **test_list**. The table initially contains four partitions and the partition key is of the INT type. The ranges of the four partitions are 2000, 3000, 4000, and 5000 respectively. - - ```sql - --Create the test_list table. - MogDB=# create table test_list (col1 int, col2 int) - partition by list(col1) - ( - partition p1 values (2000), - partition p2 values (3000), - partition p3 values (4000), - partition p4 values (5000) - ); - - -- Insert data. - MogDB=# INSERT INTO test_list VALUES(2000, 2000); - INSERT 0 1 - MogDB=# INSERT INTO test_list VALUES(3000, 3000); - INSERT 0 1 - - -- View partition information. - MogDB=# SELECT t1.relname, partstrategy, boundaries FROM pg_partition t1, pg_class t2 WHERE t1.parentid = t2.oid AND t2.relname = 'test_list' AND t1.parttype = 'p'; - relname | partstrategy | boundaries - ---------+--------------+------------ - p1 | l | {2000} - p2 | l | {3000} - p3 | l | {4000} - p4 | l | {5000} - (4 rows) - - -- The inserted data does not match the partition, and an error is reported. - MogDB=# INSERT INTO test_list VALUES(6000, 6000); - ERROR: inserted partition key does not map to any table partition - - -- Add a partition. - MogDB=# alter table test_list add partition p5 values (6000); - ALTER TABLE - MogDB=# SELECT t1.relname, partstrategy, boundaries FROM pg_partition t1, pg_class t2 WHERE t1.parentid = t2.oid AND t2.relname = 'test_list' AND t1.parttype = 'p'; - relname | partstrategy | boundaries - ---------+--------------+------------ - p5 | l | {6000} - p4 | l | {5000} - p1 | l | {2000} - p2 | l | {3000} - p3 | l | {4000} - (5 rows) - MogDB=# INSERT INTO test_list VALUES(6000, 6000); - INSERT 0 1 - - -- Exchange data between the partitioned table and ordinary table. - MogDB=# create table t1 (col1 int, col2 int); - CREATE TABLE - MogDB=# select * from test_list partition (p1); - col1 | col2 - ------+------ - 2000 | 2000 - (1 row) - MogDB=# alter table test_list exchange partition (p1) with table t1; - ALTER TABLE - MogDB=# select * from test_list partition (p1); - col1 | col2 - ------+------ - (0 rows) - MogDB=# select * from t1; - col1 | col2 - ------+------ - 2000 | 2000 - (1 row) - - -- Truncate the partition. - MogDB=# select * from test_list partition (p2); - col1 | col2 - ------+------ - 3000 | 3000 - (1 row) - MogDB=# alter table test_list truncate partition p2; - ALTER TABLE - MogDB=# select * from test_list partition (p2); - col1 | col2 - ------+------ - (0 rows) - - -- Delete a partition. - MogDB=# alter table test_list drop partition p5; - ALTER TABLE - MogDB=# SELECT t1.relname, partstrategy, boundaries FROM pg_partition t1, pg_class t2 WHERE t1.parentid = t2.oid AND t2.relname = 'test_list' AND t1.parttype = 'p'; - relname | partstrategy | boundaries - ---------+--------------+------------ - p4 | l | {5000} - p1 | l | {2000} - p2 | l | {3000} - p3 | l | {4000} - (4 rows) - - MogDB=# INSERT INTO test_list VALUES(6000, 6000); - ERROR: inserted partition key does not map to any table partition - - -- Delete a partitioned table. - MogDB=# drop table test_list; - ``` - -- Example 6: Create a hash partitioned table **test_hash**. The table initially contains two partitions and the partition key is of the INT type. - - ```sql - --Create the test_hash table. - MogDB=# create table test_hash (col1 int, col2 int) - partition by hash(col1) - ( - partition p1, - partition p2 - ); - - -- Insert data. - MogDB=# INSERT INTO test_hash VALUES(1, 1); - INSERT 0 1 - MogDB=# INSERT INTO test_hash VALUES(2, 2); - INSERT 0 1 - MogDB=# INSERT INTO test_hash VALUES(3, 3); - INSERT 0 1 - MogDB=# INSERT INTO test_hash VALUES(4, 4); - INSERT 0 1 - - -- View partition information. - MogDB=# SELECT t1.relname, partstrategy, boundaries FROM pg_partition t1, pg_class t2 WHERE t1.parentid = t2.oid AND t2.relname = 'test_hash' AND t1.parttype = 'p'; - relname | partstrategy | boundaries - ---------+--------------+------------ - p1 | h | {0} - p2 | h | {1} - (2 rows) - - -- View data. - MogDB=# select * from test_hash partition (p1); - col1 | col2 - ------+------ - 3 | 3 - 4 | 4 - (2 rows) - - MogDB=# select * from test_hash partition (p2); - col1 | col2 - ------+------ - 1 | 1 - 2 | 2 - (2 rows) - - -- Exchange data between the partitioned table and ordinary table. - MogDB=# create table t1 (col1 int, col2 int); - CREATE TABLE - MogDB=# alter table test_hash exchange partition (p1) with table t1; - ALTER TABLE - MogDB=# select * from test_hash partition (p1); - col1 | col2 - ------+------ - (0 rows) - MogDB=# select * from t1; - col1 | col2 - ------+------ - 3 | 3 - 4 | 4 - (2 rows) - - -- Truncate the partition. - MogDB=# alter table test_hash truncate partition p2; - ALTER TABLE - MogDB=# select * from test_hash partition (p2); - col1 | col2 - ------+------ - (0 rows) - - -- Delete a partitioned table. - MogDB=# drop table test_hash; - - - - --Examples of B-compatible REBUILD, REMOVE, CHECK, REPAIR, and OPTIMIZE syntax - --Create a partitioned table test_part. - CREATE TABLE IF NOT EXISTS test_part - ( - a int primary key not null default 5, - b int, - c int, - d int - ) - PARTITION BY RANGE(a) - ( - PARTITION p0 VALUES LESS THAN (100000), - PARTITION p1 VALUES LESS THAN (200000), - PARTITION p2 VALUES LESS THAN (300000) - ); - create unique index idx_c on test_part (c); - create index idx_b on test_part using btree(b) local; - alter table test_part add constraint uidx_d unique(d); - alter table test_part add constraint uidx_c unique using index idx_c; - --Insert data to a partitioned table. - insert into test_part (with RECURSIVE t_r(i,j,k,m) as(values(0,1,2,3) union all select i+1,j+2,k+3,m+4 from t_r where i < 250000) select * from t_r); - --Check partitioned table system information. - select relname from pg_partition where (parentid in (select oid from pg_class where relname = 'test_part')) and parttype = 'p' and oid != relfilenode order by relname; - --Select data from a partitioned table by index. - explain select * from test_part where ((99990 < c and c < 100000) or (219990 < c and c < 220000)); - select * from test_part where ((99990 < c and c < 100000) or (219990 < c and c < 220000)); - select * from test_part where ((99990 < d and d < 100000) or (219990 < d and d < 220000)); - select * from test_part where ((99990 < b and b < 100000) or (219990 < b and b < 220000)); - - --Check the REBUILD syntax. - ALTER TABLE test_part REBUILD PARTITION p0, p1; - --Check the system information and actual data of the partitioned table. - select relname from pg_partition where (parentid in (select oid from pg_class where relname = 'test_part')) and parttype = 'p' and oid != relfilenode order by relname; - explain select * from test_part where ((99990 < c and c < 100000) or (219990 < c and c < 220000)); - select * from test_part where ((99990 < c and c < 100000) or (219990 < c and c < 220000)); - select * from test_part where ((99990 < d and d < 100000) or (219990 < d and d < 220000)); - select * from test_part where ((99990 < b and b < 100000) or (219990 < b and b < 220000)); - - --Check the REBUILD PARTITION ALL syntax. - ALTER TABLE test_part REBUILD PARTITION all; - --Check the system information and actual data of the partitioned table. - select relname from pg_partition where (parentid in (select oid from pg_class where relname = 'test_part')) and parttype = 'p' and oid != relfilenode order by relname; - explain select * from test_part where ((99990 < c and c < 100000) or (219990 < c and c < 220000)); - select * from test_part where ((99990 < c and c < 100000) or (219990 < c and c < 220000)); - select * from test_part where ((99990 < d and d < 100000) or (219990 < d and d < 220000)); - select * from test_part where ((99990 < b and b < 100000) or (219990 < b and b < 220000)); - - --Check the REPAIR CHECK OPTIMIZE syntax. - ALTER TABLE test_part repair PARTITION p0,p1; - ALTER TABLE test_part check PARTITION p0,p1; - ALTER TABLE test_part optimize PARTITION p0,p1; - ALTER TABLE test_part repair PARTITION all; - ALTER TABLE test_part check PARTITION all; - ALTER TABLE test_part optimize PARTITION all; - - --Check the REMOVE PARTITIONING syntax. - select relname, boundaries from pg_partition where parentid in (select parentid from pg_partition where relname = 'test_part') order by relname; - select parttype,relname from pg_class where relname = 'test_part' and relfilenode != oid; - ALTER TABLE test_part remove PARTITIONING; - --Check the system information and actual data after partition information is removed from the partitioned table. - explain select * from test_part where ((99990 < c and c < 100000) or (219990 < c and c < 220000)); - select * from test_part where ((99990 < c and c < 100000) or (219990 < c and c < 220000)); - select relname, boundaries from pg_partition where parentid in (select parentid from pg_partition where relname = 'test_part') order by relname; - select parttype,relname from pg_class where relname = 'test_part' and relfilenode != oid; - - - --Examples of B-compatible TRUNCATE, ANALYZE, and EXCHANGE syntax - CREATE TABLE IF NOT EXISTS test_part1 - ( - a int, - b int - ) - PARTITION BY RANGE(a) - ( - PARTITION p0 VALUES LESS THAN (100), - PARTITION p1 VALUES LESS THAN (200), - PARTITION p2 VALUES LESS THAN (300) - ); - create table test_no_part1(a int, b int); - insert into test_part1 values(99,1),(199,1),(299,1); - select * from test_part1; - --Check the B-compatible TRUNCATE PARTITION syntax. - ALTER TABLE test_part1 truncate PARTITION p0, p1; - select * from test_part1; - insert into test_part1 (with RECURSIVE t_r(i,j) as(values(0,1) union all select i+1,j+2 from t_r where i < 20) select * from t_r); - select * from test_part1; - ALTER TABLE test_part1 truncate PARTITION all; - select * from test_part1; - --Check the MogDB TRUNCATE PARTITION syntax. - insert into test_part1 values(99,1),(199,1); - select * from test_part1; - ALTER TABLE test_part1 truncate PARTITION p0, truncate PARTITION p1; - select * from test_part1; - --Check the B-compatible EXCHANGE PARTITION syntax. - insert into test_part1 values(99,1),(199,1),(299,1); - alter table test_part1 exchange partition p2 with table test_no_part1 without validation; - select * from test_part1; - select * from test_no_part1; - alter table test_part1 exchange partition p2 with table test_no_part1 without validation; - select * from test_part1; - select * from test_no_part1; - --Check the MogDB EXCHANGE PARTITION syntax. - alter table test_part1 exchange partition (p2) with table test_no_part1 without validation; - select * from test_part1; - select * from test_no_part1; - alter table test_part1 exchange partition (p2) with table test_no_part1 without validation; - select * from test_part1; - select * from test_no_part1; - --Check the B-compatible ANALYZE PARTITION syntax. - alter table test_part1 analyze partition p0,p1; - alter table test_part1 analyze partition all; - --Check the MogDB ANALYZE PARTITION syntax. - analyze test_part1 partition (p1); - - - --Examples of B-compatible ADD and DROP syntax - CREATE TABLE IF NOT EXISTS test_part2 - ( - a int, - b int - ) - PARTITION BY RANGE(a) - ( - PARTITION p0 VALUES LESS THAN (100), - PARTITION p1 VALUES LESS THAN (200), - PARTITION p2 VALUES LESS THAN (300), - PARTITION p3 VALUES LESS THAN (400) - ); - - CREATE TABLE IF NOT EXISTS test_subpart2 - ( - a int, - b int - ) - PARTITION BY RANGE(a) SUBPARTITION BY RANGE(b) - ( - PARTITION p0 VALUES LESS THAN (100) - ( - SUBPARTITION p0_0 VALUES LESS THAN (100), - SUBPARTITION p0_1 VALUES LESS THAN (200), - SUBPARTITION p0_2 VALUES LESS THAN (300) - ), - PARTITION p1 VALUES LESS THAN (200) - ( - SUBPARTITION p1_0 VALUES LESS THAN (100), - SUBPARTITION p1_1 VALUES LESS THAN (200), - SUBPARTITION p1_2 VALUES LESS THAN (300) - ), - PARTITION p2 VALUES LESS THAN (300) - ( - SUBPARTITION p2_0 VALUES LESS THAN (100), - SUBPARTITION p2_1 VALUES LESS THAN (200), - SUBPARTITION p2_2 VALUES LESS THAN (300) - ), - PARTITION p3 VALUES LESS THAN (400) - ( - SUBPARTITION p3_0 VALUES LESS THAN (100), - SUBPARTITION p3_1 VALUES LESS THAN (200), - SUBPARTITION p3_2 VALUES LESS THAN (300) - ) - ); - --test b_compatibility drop and add partition syntax - select relname, boundaries from pg_partition where parentid in (select parentid from pg_partition where relname = 'test_part2'); - ALTER TABLE test_part2 DROP PARTITION p3; - select relname, boundaries from pg_partition where parentid in (select parentid from pg_partition where relname = 'test_part2'); - ALTER TABLE test_part2 add PARTITION (PARTITION p3 VALUES LESS THAN (400),PARTITION p4 VALUES LESS THAN (500),PARTITION p5 VALUES LESS THAN (600)); - select relname, boundaries from pg_partition where parentid in (select parentid from pg_partition where relname = 'test_part2'); - ALTER TABLE test_part2 add PARTITION (PARTITION p6 VALUES LESS THAN (700),PARTITION p7 VALUES LESS THAN (800)); - ALTER TABLE test_part2 DROP PARTITION p4,p5,p6; - select relname, boundaries from pg_partition where parentid in (select parentid from pg_partition where relname = 'test_part2'); - ALTER TABLE test_part2 add PARTITION (PARTITION p4 VALUES LESS THAN (500)); - select relname, boundaries from pg_partition where parentid in (select oid from pg_partition where parentid in (select parentid from pg_partition where relname = 'test_subpart2')); - ALTER TABLE test_subpart2 DROP SUBPARTITION p0_0; - ALTER TABLE test_subpart2 DROP SUBPARTITION p0_2, p1_0, p1_2; - select relname, boundaries from pg_partition where parentid in (select oid from pg_partition where parentid in (select parentid from pg_partition where relname = 'test_subpart2')); - - --Examples of B-compatible REORGANIZE syntax - CREATE TABLE test_range_subpart - ( - a INT4 PRIMARY KEY, - b INT4 - ) - PARTITION BY RANGE (a) SUBPARTITION BY HASH (b) - ( - PARTITION p1 VALUES LESS THAN (200) - ( - SUBPARTITION s11, - SUBPARTITION s12, - SUBPARTITION s13, - SUBPARTITION s14 - ), - PARTITION p2 VALUES LESS THAN (500) - ( - SUBPARTITION s21, - SUBPARTITION s22 - ), - PARTITION p3 VALUES LESS THAN (800), - PARTITION p4 VALUES LESS THAN (1200) - ( - SUBPARTITION s41 - ) - ); - insert into test_range_subpart values(199,1),(499,1),(799,1),(1199,1); - --test test_range_subpart - alter table test_range_subpart reorganize partition p1,p2 into (partition m1 values less than(100),partition m2 values less than(500)(subpartition m21,subpartition m22)); - select pg_get_tabledef('test_range_subpart'); - select * from test_range_subpart subpartition(m22); - select * from test_range_subpart subpartition(m21); - select * from test_range_subpart partition(m1); - explain select /*+ indexscan(test_range_subpart test_range_subpart_pkey) */ * from test_range_subpart where a > 0; - select * from test_range_subpart; - - -- Create an index for a partitioned table. The default value of index in CREATE TABLE is local. Global/local cannot be specified. - CREATE TABLE test_partition_btree - ( - f1 INTEGER, - f2 INTEGER, - f3 INTEGER, - key part_btree_idx using btree(f1) - ) - PARTITION BY RANGE(f1) - ( - PARTITION P1 VALUES LESS THAN(2450815), - PARTITION P2 VALUES LESS THAN(2451179), - PARTITION P3 VALUES LESS THAN(2451544), - PARTITION P4 VALUES LESS THAN(MAXVALUE) - ); - - -- Create a composite index for partitioned tables. - CREATE TABLE test_partition_index - ( - f1 INTEGER, - f2 INTEGER, - f3 INTEGER, - key part_btree_idx2 using btree(f1 desc, f2 asc) - ) - PARTITION BY RANGE(f1) - ( - PARTITION P1 VALUES LESS THAN(2450815), - PARTITION P2 VALUES LESS THAN(2451179), - PARTITION P3 VALUES LESS THAN(2451544), - PARTITION P4 VALUES LESS THAN(MAXVALUE) - ); - - -- Create indexes for a column-store partitioned table. - CREATE TABLE test_partition_column - ( - f1 INTEGER, - f2 INTEGER, - f3 INTEGER, - key part_column(f1) - ) with (ORIENTATION = COLUMN) - PARTITION BY RANGE(f1) - ( - PARTITION P1 VALUES LESS THAN(2450815), - PARTITION P2 VALUES LESS THAN(2451179), - PARTITION P3 VALUES LESS THAN(2451544), - PARTITION P4 VALUES LESS THAN(MAXVALUE) - ); - - -- Create an expression index for a partitioned table. - CREATE TABLE test_partition_expr - ( - f1 INTEGER, - f2 INTEGER, - f3 INTEGER, - key part_expr_idx using btree((abs(f1)+1)) - ) - PARTITION BY RANGE(f1) - ( - PARTITION P1 VALUES LESS THAN(2450815), - PARTITION P2 VALUES LESS THAN(2451179), - PARTITION P3 VALUES LESS THAN(2451544), - PARTITION P4 VALUES LESS THAN(MAXVALUE) - ); - ``` - -## Helpful Links - -[ALTER TABLE PARTITION](../../../../../../reference-guide/sql-syntax/ALTER-TABLE-PARTITION.md), [DROP TABLE](../../../../../../reference-guide/sql-syntax/DROP-TABLE.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table.md deleted file mode 100644 index 87aa912d..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-table.md +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: dolphin CREATE TABLE -summary: dolphin CREATE TABLE -author: zhang cuiping -date: 2022-10-24 ---- - -# CREATE TABLE - -## Function - -Creates an empty table in the current database. The table will be owned by the creator. - -## Precautions - -- This section describes only the new syntax of Dolphin. The original syntax of MogDB is not deleted or modified. - -## Syntax - -Create a table using LIKE. - -``` -CREATE [ [ GLOBAL | LOCAL ] [ TEMPORARY | TEMP ] | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name LIKE source_table [ like_option [...] ] -``` - -Create a table. - -``` -CREATE [ [ GLOBAL | LOCAL ] [ TEMPORARY | TEMP ] | UNLOGGED ] TABLE [ IF NOT EXISTS ] table_name - ({ column_name data_type [ compress_mode ] [ COLLATE collation ] [ column_constraint [ ... ] ] - | table_constraint - | table_indexclause - | LIKE source_table [ like_option [...] ] } - [, ... ]) - [ AUTO_INCREMENT [ = ] value ] - [ WITH ( {storage_parameter = value} [, ... ] ) ] - [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ] - [ COMPRESS | NOCOMPRESS ] - [ TABLESPACE tablespace_name ] - [ COMMENT {=| } 'text' ]; - [ create_option ] - -Where create\_option is: - - [ WITH ( {storage_parameter = value} [, ... ] ) ] - [ ON COMMIT { PRESERVE ROWS | DELETE ROWS | DROP } ] - [ COMPRESS | NOCOMPRESS ] - [ TABLESPACE tablespace_name ] - [ COMPRESSION [=] compression_arg ] - [ ENGINE [=] engine_name ] - [ COLLATE [=] collation_name ] - [ [DEFAULT] { CHARSET | CHARACTER SET } [=] charset_name ] - [ ROW_FORMAT [=] row_format_name ] - - In addition to the WITH option, you can enter the same create\_option for multiple times. The latest input prevails. -``` - -- table_indexclause: - - ``` - {INDEX | KEY} [index_name] [index_type] (key_part,...)[index_option]... - ``` - - This syntax does not support CREATE FOREIGN TABLE (such as MOT). - -- Values of index_type are as follows: - - ``` - USING {BTREE | HASH | GIN | GIST | PSORT | UBTREE} - ``` - -- Values of key_part are as follows: - - ``` - {col_name[(length)] | (expr)} [ASC | DESC] - ``` - - **length** indicates the prefix index. - -- The index_option parameter is as follows: - - ``` - index_option:{ - COMMENT 'string' - | index_type - } - ``` - - The sequence and quantity of COMMENT and index_type can be random, but only the last value of the same column takes effect. - -- The like_option is as follows: - - ``` - { INCLUDING | EXCLUDING } { DEFAULTS | GENERATED | CONSTRAINTS | INDEXES | STORAGE | COMMENTS | PARTITION | RELOPTIONS | ALL } - ``` - -## Parameter Description - -- **data_type** - - Specifies the data type of the column. - - For the enumeration type ENUM and character types such as CHAR, CHARACTER, VARCHAR, TEXT, you can use the keyword CHARSET or CHARACTER SET to specify the column character set when creating a table. Currently, it is used only for syntax and has no actual purpose. - -- **column_constraint** - - The ON UPDATE feature of MySQL is added to the column type constraint. The constraint is of the same type as the DEFAULT attribute. The ON UPDATE attribute is used to automatically update the timestamp column when the timestamp column of the UPDATE operation is set to the default value. - - ```sql - CREATE TABLE table_name(column_name timestamp ON UPDATE CURRENT_TIMESTAMP); - ``` - -- **COLLATE collation** - - Assigns a collation to the column (which must be of a collatable data type). If no collation is specified, the default collation is used. You can run the **select \* from pg_collation** command to query collation rules from the **pg_collation** system catalog. The default collation rule is the row starting with **default** in the query result. - - If a collation is not supported, the database issues a warning and sets the column as the default collation. - -- **{ [DEFAULT] CHARSET | CHARACTER SET } [=] charset_name** - - Selects the character set used by the table. Currently, it is used only for syntax and has no actual purpose. - -- **COLLATE [=] collation_name** - - Selects the collation used by a table. Currently, it is used only for syntax and has no actual purpose. - -- **ROW_FORMAT [=] row_format_name** - - Selects the row-store format used by a table. Currently, it is used only for syntax and has no actual purpose. - -## Examples - -```sql ---Create an index on a table. -MogDB=# CREATE TABLE tpcds.warehouse_t24 -( - W_WAREHOUSE_SK INTEGER NOT NULL, - W_WAREHOUSE_ID CHAR(16) NOT NULL, - W_WAREHOUSE_NAME VARCHAR(20) , - W_WAREHOUSE_SQ_FT INTEGER , - W_STREET_NUMBER CHAR(10) , - W_STREET_NAME VARCHAR(60) , - W_STREET_TYPE CHAR(15) , - W_SUITE_NUMBER CHAR(10) , - W_CITY VARCHAR(60) , - W_COUNTY VARCHAR(30) , - W_STATE CHAR(2) , - W_ZIP CHAR(10) , - W_COUNTRY VARCHAR(20) , - W_GMT_OFFSET DECIMAL(5,2) , - key (W_WAREHOUSE_SK) , - index idx_ID using btree (W_WAREHOUSE_ID) -); - ---Create composite indexes, expression indexes, and function indexes on tables. -MogDB=# CREATE TABLE tpcds.warehouse_t25 -( - W_WAREHOUSE_SK INTEGER NOT NULL, - W_WAREHOUSE_ID CHAR(16) NOT NULL, - W_WAREHOUSE_NAME VARCHAR(20) , - W_WAREHOUSE_SQ_FT INTEGER , - W_STREET_NUMBER CHAR(10) , - W_STREET_NAME VARCHAR(60) , - W_STREET_TYPE CHAR(15) , - W_SUITE_NUMBER CHAR(10) , - W_CITY VARCHAR(60) , - W_COUNTY VARCHAR(30) , - W_STATE CHAR(2) , - W_ZIP CHAR(10) , - W_COUNTRY VARCHAR(20) , - W_GMT_OFFSET DECIMAL(5,2) , - key using btree (W_WAREHOUSE_SK, W_WAREHOUSE_ID desc) , - index idx_SQ_FT using btree ((abs(W_WAREHOUSE_SQ_FT))) , - key idx_SK using btree ((abs(W_WAREHOUSE_SK)+1)) -); - ---The index\_option column is included. -MogDB=# create table test_option(a int, index idx_op using btree(a) comment 'idx comment'); ---Specify the character set for the column when creating a table. -MogDB=# CREATE TABLE t_column_charset(c text CHARSET test_charset); -WARNING: character set "test_charset" for type text is not supported yet. default value set -CREATE TABLE - ---Specify the character order for the table when creating the table. -MogDB=# CREATE TABLE t_table_collate(c text) COLLATE test_collation; -WARNING: COLLATE for TABLE is not supported for current version. skipped -CREATE TABLE - ---Specify the character set for the table when creating the table. -MogDB=# CREATE TABLE t_table_charset(c text) CHARSET test_charset; -WARNING: CHARSET for TABLE is not supported for current version. skipped -CREATE TABLE - ---Specify the row record format for the table when creating the table. -MogDB=# CREATE TABLE t_row_format(c text) ROW_FORMAT test_row_format; -WARNING: ROW_FORMAT for TABLE is not supported for current version. skipped -CREATE TABLE -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-tablespace.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-tablespace.md deleted file mode 100644 index 4ce82ac8..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-tablespace.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: dolphin CREATE TABLESPACE -summary: dolphin CREATE TABLESPACE -author: zhang cuiping -date: 2022-10-24 ---- - -# CREATE TABLESPACE - -## Function - -Creates a tablespace in a database. - -## Precautions - -- This section describes only the new syntax of Dolphin. The original syntax of MogDB is not deleted or modified. - -## Syntax - -``` -CREATE TABLESPACE tablespace_name - tablespace_details; -``` - -In the preceding information, tablespace_details is as follows: - -``` -[ OWNER user_name ] [RELATIVE] LOCATION 'directory' [ MAXSIZE 'space_size' ] [with_option_clause] [ ENGINE [=] engine_name ] -| ADD DATAFILE 'directory' [ ENGINE [=] engine_name ] -``` - -## Parameter Description - -- **ENGINE [=] engine_name** - - Specifies the storage engine. Currently, it is used only for syntax and has no actual purpose. - -## Examples - -```sql ---Run the ADD DATAFILE syntax to create a tablespace. -CREATE TABLESPACE t_tbspace ADD DATAFILE 'my_tablespace' ENGINE = test_engine; -CREATE TABLESPACE -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-trigger.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-trigger.md deleted file mode 100644 index 532ae018..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-trigger.md +++ /dev/null @@ -1,449 +0,0 @@ ---- -title: dolphin CREATE TRIGGER -summary: dolphin CREATE TRIGGER -author: Guo Huan -date: 2023-05-15 ---- - -# CREATE TRIGGER - -## Function - -- Creates a trigger. The trigger will be associated with the specified table or view and will execute the specified function under certain conditions. - -- New syntax for creating triggers using MySQL formatting compared to the original MogDB syntax. -- Added syntax for creating triggers using a single sql. - -## Precautions - -- Currently, you can only create triggers on normal row-stored tables, but not on column-stored tables, temporary tables, unlogged tables, etc. If you define multiple triggers of the same type for the same event, they are triggered in the alphabetical order of their names. -- If multiple triggers of the same type are defined for the same event, they are triggered in alphabetical order of the trigger name. -- Triggers are often used in synchronization scenarios of data association between multiple tables, which have a big impact on SQL execution performance, and are not recommended to be used in synchronization scenarios with large data volume and high performance requirements. -- The user who creates the trigger needs to have the TRIGGER privilege of the specified table or be granted the CREATE ANY TRIGGER privilege. - -## Syntax - -- **Syntax for Oracle style trigger creation** - -``` -CREATE [ CONSTRAINT ] TRIGGER trigger_name { BEFORE | AFTER | INSTEAD OF } { event [ OR ... ] } - ON table_name - [ FROM referenced_table_name ] - { NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED } } - [ FOR [ EACH ] { ROW | STATEMENT } ] - [ WHEN ( condition ) ] - EXECUTE PROCEDURE function_name ( arguments ); -``` - -- **Syntax for creating triggers in mysql-compatible style** - -``` -CREATE [ CONSTRAINT ] [ DEFINER=user ] TRIGGER [ IF NOT EXISTS ] trigger_name { BEFORE | AFTER | INSTEAD OF } { event [ OR ... ] } - ON table_name - [ FROM referenced_table_name ] - { NOT DEFERRABLE | [ DEFERRABLE ] { INITIALLY IMMEDIATE | INITIALLY DEFERRED } } - [ FOR [ EACH ] { ROW | STATEMENT } ] - [ WHEN ( condition ) ] - [ trigger_order ] - trigger_body -``` - -Where event contains the following: - -``` - INSERT - UPDATE [ OF column_name [, ... ] ] - DELETE - TRUNCATE -``` - -Where trigger_order is: - -``` - { FOLLOWS|PRECEDES } other_trigger_name -``` - -## Parameter Description - -- **CONSTRAINT** - - Optional, specifying this parameter creates a constraint trigger, i.e. the trigger is used as a constraint. This is the same as for regular triggers except that you can use SET CONSTRAINTS to adjust the time at which the trigger is triggered. The constraint trigger must be an AFTER ROW trigger. - -- **DEFINER** - - Optional, specify this parameter to affect the permission control of the referenced objects within the trigger. - -- **IF NOT EXISTS** - - Optional, specify this parameter to prevent an error from occurring if the trigger has the same name, the same table, and the same table in the same schema. - -- **trigger\_name** - - The name of the trigger, which cannot qualify the schema because triggers automatically inherit the schema of the table in which they reside, and triggers from the same table cannot be renamed. For constrained triggers, this name is also used when using [SET CONSTRAINTS](../../../../../../reference-guide/sql-syntax/SET-CONSTRAINTS.md) to modify the trigger behavior. - - Range of values: a string that conforms to the identifier naming convention and has a maximum length of 63 characters. - -- **BEFORE** - - Trigger functions are executed before the trigger event occurs. - -- **AFTER** - - Trigger functions are executed after a trigger event occurs, and constraint triggers can only be specified as AFTER. - -- **INSTEAD OF** - - Trigger functions are direct substitutes for triggering events. - -- **event** - - The event that initiates the trigger, with values ranging from: INSERT, UPDATE, DELETE, or TRUNCATE, or you can specify more than one trigger event at the same time via OR. - - For the UPDATE event type, columns can be specified using the following syntax: - - ``` - UPDATE OF column_name1 [, column_name2 ... ] - ``` - - Indicates that the trigger is fired when these columns are used as the target columns of the UPDATE statement, but the INSTEAD OF UPDATE type does not support specifying column information. - -- **table\_name** - - The name of the table where the trigger needs to be created. - - Range of values: the name of a table that already exists in the database. - -- **referenced\_table\_name** - - The name of the other table referenced by the constraint. Can only be specified for constraint triggers, commonly used for foreign key constraints. - - Range of values: the name of a table that already exists in the database. - -- **DEFERRABLE | NOT DEFERRABLE** - - The timing of the start of a constraint trigger acts only on constraint triggers. These two keywords set whether the constraint is deferrable. - - For details, please refer to [CREATE TABLE](../../../../../../reference-guide/sql-syntax/CREATE-TABLE.md). - -- **INITIALLY IMMEDIATE** **| INITIALLY DEFERRED** - - If the constraint is deferrable, this clause declares the default time to check the constraint, acting only on constraint triggers. - - For details, please refer to [CREATE TABLE](../../../../../../reference-guide/sql-syntax/CREATE-TABLE.md). - -- **FOR EACH ROW | FOR EACH STATEMENT** - - Trigger frequency of the trigger. - - - FOR EACH ROW means that the trigger is triggered once for each row affected by the trigger event. - - FOR EACH STATEMENT means that the trigger is triggered only once per SQL statement. - - The default value is FOR EACH STATEMENT when not specified. constraint triggers can only be specified as FOR EACH ROW. - -- **condition** - - A conditional expression that determines whether the trigger function is actually executed. When WHEN is specified, the function is called only if the condition returns true. - - In FOR EACH ROW triggers, the WHEN condition can reference columns with old or new row values by writing to OLD.column\_name or NEW.column\_name, respectively. Of course, INSERT triggers cannot reference OLD and DELETE triggers cannot reference NEW. - - INSTEAD OF triggers do not support WHEN conditions. - - WHEN expressions cannot contain subqueries. - - For constrained triggers, the evaluation of the WHEN condition is not delayed, but occurs immediately after the update operation is performed. If the condition returns a value other than TRUE, the trigger is not queued for delayed execution. - -- **function\_name** - - User-defined functions, which must be declared without parameters and with a return type of trigger, are executed when the trigger is triggered. - -- **arguments** - - An optional comma-separated list of arguments to be supplied to the function when executing the trigger. The parameters are literal string constants; simple name and numeric constants can also be written here, but they will all be converted to strings. Check the description of the implementation language of the trigger function to see how to access these parameters within the function. - -- **trigger\_order** - - Optionally, the {FOLLOWS|PRECEDES} in the trigger_order feature controls the trigger priority order. B-compatibility mode allows multiple triggers to be defined for the same table, on the same triggering event, and the priority of triggering is determined according to the order in which the triggers are created (the first one to be created takes precedence). The priority can be adjusted by {FOLLOWS|PRECEDES}. With FOLLOWS, the last used trigger is closest to the original trigger, and all other triggers are squeezed backward in priority order; with PRECEDES, the last used trigger is closest to the original trigger, and all other triggers are squeezed forward in priority order. - -- **trigger\_body** - - Define the work to be done after the trigger by writing a block of code directly between begin.... . end to define the work to be done after the trigger. - - It can also be a single sql statement, currently supported statements: insert, update, delete, set, call. - - When the separator is set, using the MySQL style syntax for creating triggers, the trigger_body is formatted according to MySQL's formatting rules, and the declare paragraph needs to be written between the begin ... end paragraphs. - - >![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: - > - >About trigger types: - > - >- INSTEAD OF triggers must be labeled FOR EACH ROW and can only be defined on views. - >- BEFORE and AFTER triggers can only be labeled FOR EACH STATEMENT when acting on a view. - >- TRUNCATE type triggers are limited to FOR EACH STATEMENT. - - **Table 1** Types of triggers supported on tables and views: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Trigger timing

-

Trigger Event

-

Row-level

-

Statement-level

-

BEFORE

-

INSERT/UPDATE/DELETE

-

Table

-

Table and views

-

TRUNCATE

-

not support

-

Table

-

AFTER

-

INSERT/UPDATE/DELETE

-

Table

-

Table and views

-

TRUNCATE

-

not support

-

Table

-

INSTEAD OF

-

INSERT/UPDATE/DELETE

-

Views

-

not support

-

TRUNCATE

-

not support

-

not support

-
- - **Table 2** PLPGSQL Type Trigger Function Special Variables: - - | Variable name | Variable Meaning | - | :-------------- | :----------------------------------------------------------- | - | NEW | INSERT and UPDATE operations involve new values in the tuple information and are null for DELETE. | - | OLD | UPDATE and DELETE operations involve old values in the tuple information and are empty for INSERT. | - | TG_NAME | Trigger name. | - | TG_WHEN | Trigger timing (BEFORE/AFTER/INSTEAD OF). | - | TG_LEVEL | Trigger frequency (ROW/STATEMENT). | - | TG_OP | Trigger operations (INSERT/UPDATE/DELETE/TRUNCATE). | - | TG_RELID | OID of the table where the trigger is located. | - | TG_RELNAME | The name of the table where the trigger is located (deprecated and now replaced by TG_TABLE_NAME). | - | TG_TABLE_NAME | The name of the table where the trigger is located. | - | TG_TABLE_SCHEMA | SCHEMA information for the table where the trigger is located. | - | TG_NARGS | Number of trigger function arguments. | - | TG_ARGV[] | Trigger function parameter list. | - -## Examples - -```sql --- Creating a source table and trigger table -MogDB=# CREATE TABLE test_trigger_src_tbl(id1 INT, id2 INT, id3 INT); -MogDB=# CREATE TABLE test_trigger_des_tbl(id1 INT, id2 INT, id3 INT); - --- Creating a trigger function -MogDB=# CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS - $$ - DECLARE - BEGIN - INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3); - RETURN NEW; - END - $$ LANGUAGE PLPGSQL; - -MogDB=# CREATE OR REPLACE FUNCTION tri_update_func() RETURNS TRIGGER AS - $$ - DECLARE - BEGIN - UPDATE test_trigger_des_tbl SET id3 = NEW.id3 WHERE id1=OLD.id1; - RETURN OLD; - END - $$ LANGUAGE PLPGSQL; - -MogDB=# CREATE OR REPLACE FUNCTION TRI_DELETE_FUNC() RETURNS TRIGGER AS - $$ - DECLARE - BEGIN - DELETE FROM test_trigger_des_tbl WHERE id1=OLD.id1; - RETURN OLD; - END - $$ LANGUAGE PLPGSQL; - --- Creates an INSERT trigger -MogDB=# CREATE TRIGGER insert_trigger - BEFORE INSERT ON test_trigger_src_tbl - FOR EACH ROW - EXECUTE PROCEDURE tri_insert_func(); - --- Creates an UPDATE trigger -MogDB=# CREATE TRIGGER update_trigger - AFTER UPDATE ON test_trigger_src_tbl - FOR EACH ROW - EXECUTE PROCEDURE tri_update_func(); - --- Creates an DELETE trigger -MogDB=# CREATE TRIGGER delete_trigger - BEFORE DELETE ON test_trigger_src_tbl - FOR EACH ROW - EXECUTE PROCEDURE tri_delete_func(); - --- Execute the INSERT trigger event and check the trigger result -MogDB=# INSERT INTO test_trigger_src_tbl VALUES(100,200,300); -MogDB=# SELECT * FROM test_trigger_src_tbl; -MogDB=# SELECT * FROM test_trigger_des_tbl; //Check if the triggered action takes effect. - --- Execute the UPDATE trigger event and check the trigger result -MogDB=# UPDATE test_trigger_src_tbl SET id3=400 WHERE id1=100; -MogDB=# SELECT * FROM test_trigger_src_tbl; -MogDB=# SELECT * FROM test_trigger_des_tbl; //Check if the triggered action takes effect - --- Execute DELETE to trigger the event and check the trigger result -MogDB=# DELETE FROM test_trigger_src_tbl WHERE id1=100; -MogDB=# SELECT * FROM test_trigger_src_tbl; -MogDB=# SELECT * FROM test_trigger_des_tbl; //Check if the triggered action takes effect - --- Modify Trigger -MogDB=# ALTER TRIGGER delete_trigger ON test_trigger_src_tbl RENAME TO delete_trigger_renamed; - --- Disable insert_trigger trigger -MogDB=# ALTER TABLE test_trigger_src_tbl DISABLE TRIGGER insert_trigger; - --- Disable all triggers on the current table -MogDB=# ALTER TABLE test_trigger_src_tbl DISABLE TRIGGER ALL; - --- Delete a Trigger -MogDB=# DROP TRIGGER insert_trigger ON test_trigger_src_tbl; -MogDB=# DROP TRIGGER update_trigger ON test_trigger_src_tbl; -MogDB=# DROP TRIGGER delete_trigger_renamed ON test_trigger_src_tbl; --- Create a B-compatible database -MogDB=# create database db_mysql dbcompatibility 'B'; --- Create a trigger to define the user. -MogDB=# create user test_user password 'Gauss@123'; --- Create the original table and trigger table. -db_mysql=# create table test_mysql_trigger_src_tbl (id INT); -db_mysql=# create table test_mysql_trigger_des_tbl (id INT); -db_mysql=# create table animals (id INT, name CHAR(30)); -db_mysql=# create table food (id INT, foodtype VARCHAR(32), remark VARCHAR(32), time_flag TIMESTAMP); --- Create MySQL compatible definer syntax trigger. -db_mysql=# create definer=test_user trigger trigger1 - after insert on test_mysql_trigger_src_tbl - for each row - begin - insert into test_mysql_trigger_des_tbl values(1); - end; - / --- Create MySQL compatible trigger_order syntax trigger. -db_mysql=# create trigger animal_trigger1 - after insert on animals - for each row - begin - insert into food(id, foodtype, remark, time_flag) values (1,'ice cream', 'sdsdsdsd', now()); - end; - / --- Create MySQL compatible FOLLOWS trigger -db_mysql=# create trigger animal_trigger2 - after insert on animals - for each row - follows animal_trigger1 - begin - insert into food(id, foodtype, remark, time_flag) values (2,'chocolate', 'sdsdsdsd', now()); - end; - / -db_mysql=# create trigger animal_trigger3 - after insert on animals - for each row - follows animal_trigger1 - begin - insert into food(id, foodtype, remark, time_flag) values (3,'cake', 'sdsdsdsd', now()); - end; - / -db_mysql=# create trigger animal_trigger4 - after insert on animals - for each row - follows animal_trigger1 - begin - insert into food(id, foodtype, remark, time_flag) values (4,'sausage', 'sdsdsdsd', now()); - end; - / --- Execute the insert trigger event and check the result. -db_mysql=# insert into animals (id, name) values(1,'lion'); -db_mysql=# select * from animals; -db_mysql=# select id, foodtype, remark from food; --- Create MySQL compatible PROCEDES trigger -db_mysql=# create trigger animal_trigger5 - after insert on animals - for each row - precedes animal_trigger3 - begin - insert into food(id, foodtype, remark, time_flag) values (5,'milk', 'sdsds', now()); - end; - / -db_mysql=# create trigger animal_trigger6 - after insert on animals - for each row - precedes animal_trigger2 - begin - insert into food(id, foodtype, remark, time_flag) values (6,'strawberry', 'sdsds', now()); - end; - / --- Execute the insert trigger event and check the result. -db_mysql=# insert into animals (id, name) values(2, 'dog'); -db_mysql=# select * from animals; -db_mysql=# select id, foodtype, remark from food; - --- Create MySQL compatible trigger with if not exists syntax. -db_mysql=# create trigger if not exists animal_trigger1 - after insert on animals - for each row - begin - insert into food(id, foodtype, remark, time_flag) values (1,'ice cream', 'sdsdsdsd', now()); - end; - / --- Create MySQL format trigger. -db_mysql=# delimiter // - -db_mysql=# create trigger animal_d_trigger1 - after insert on animals - for each row - begin - insert into food (id ,foodtype, remark, time_flag) values(1,'ice','avcs', now()); - end; - // - -db_mysql=# delimiter ; --- Create MySQL compatible trigger_body for single SQL syntax trigger. -db_mysql=# create trigger animal_trigger_single - after insert on animals - for each row - insert into food(id, foodtype, remark, time_flag) values (1,'ice cream', 'sdsdsdsd', now()); -``` - -## Helpful Links - -[ALTER TRIGGER](../../../../../../reference-guide/sql-syntax/ALTER-TRIGGER.md), [DROP TRIGGER](../../../../../../reference-guide/sql-syntax/DROP-TRIGGER.md), [ALTER TABLE](../../../../../../reference-guide/sql-syntax/ALTER-TABLE.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-view.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-view.md deleted file mode 100644 index bc0d108d..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-create-view.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: dolphin CREATE VIEW -summary: dolphin CREATE VIEW -author: Guo Huan -date: 2023-05-15 ---- - -# CREATE VIEW - -## Function - -Create a view. A view, unlike a basic table, is a virtual table. Only the definition of the view is stored in the database, but not the data corresponding to the view, which is still stored in the original basic table. If the data in the basic table changes, the data queried from the view also changes. In this sense, the view is like a window through which you can see the data and changes in the database that are of interest to the user. - -## Precautions - -Users granted the CREATE ANY TABLE privilege can create views in both public and user mode. -No naming conflicts can be created with a synonym that already exists in the same schema. - -Added the ability to specify the ALGORITHM option syntax. - -## Syntax - -``` -CREATE [ OR REPLACE ] [ALGORITHM = {UNDEFINED | MERGE | TEMPTABLE}] [DEFINER = user] [ TEMP | TEMPORARY ] VIEW view_name [ ( column_name [, ...] ) ] - [ WITH ( {view_option_name [= view_option_value]} [, ... ] ) ] - AS query - [ WITH [ CASCADED | LOCAL ] CHECK OPTION ]; -``` - ->![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: -> ->- Use WITH\(security\_barrier\) when creating a view to create a relatively secure view that prevents attackers from using the RAISE statement of the low-cost function to print out hidden base table data. ->- When the view is created, you are not allowed to use REPLACE to modify the column names in the view, nor are you allowed to delete columns. - -## Parameter Description - -- **OR REPLACE** - - When OR REPLACE exists in CREATE VIEW, it means that the view is replaced if it previously existed, but the new query cannot change the column definitions of the original query, including the order, column name, data type, type precision, etc., and only other columns can be added at the end of the list. - -- **ALGORITHM** - - Specify the algorithm, options: UNDEFINED, MERGE, TEMPTABLE. currently only do syntax compatibility, no actual function for the time bein - -- **DEFINER = user** - - Specifies user as the owner of the view. This option is only available in B-compatible mode. - -- **TEMP | TEMPORARY** - - Create a temporary view. - -- **view\_name** - - The name of the view to be created. Can be modified with a pattern. - - Range of values: string, conforming to the identifier naming convention. - -- **column\_name** - - Optional list of names to use as field names for the view. If not given, the field names are taken from the field names in the query. - - Range of values: string, conforming to the identifier naming convention. - -- **view\_option\_name \[= view\_option\_value\]** - - This clause specifies an optional parameter for the view. - - - **security\_barrier** - - This parameter should be used when the VIEW attempts to provide row-level security. - - Value range: Boolean type, TRUE, FALSE. - - - **check\_option** - - Specifies the checking options for this view. - - Range of values: LOCAL, CASCADED. - -- **query** - - SELECT or VALUES statements that provide rows and columns for the view. - -- **WITH [ CASCADED | LOCAL ] CHECK OPTION** - - This option controls the behavior of automatic view updates. INSERT and UPDATE of a view are checked to ensure that the new row satisfies the conditions defined by the view, i.e., that the new row is visible through the view. If the check is not passed, the modification is rejected. If this option is not added, then inserts and updates to a view are allowed to create rows that are not visible through the view. The following checking options are supported: - - - **LOCAL** - - Only conditions directly defined by the view itself are checked, unless the underlying view also defines CHECK OPTION, none of the conditions defined by them are checked. - - - **CASCADED** - - Checks the conditions defined for this view and all underlying views. If only CHECK OPTION is declared and not LOCAL and CASCADED, the default is CASCADED. - - Notes: - - 1. CHECK OPTION is supported only on views that can be auto-updated without INSTEAD OF triggers or INSTEAD rules. if an auto-updating view is defined on a view with INSTEAD OF triggers, CHECK OPTION can be used to check the conditions on the auto-updating view but the conditions on views with INSTEAD OF trigger will not be checked. If the view or any underlying relationship has an INSTEAD rule that causes the INSERT or UPDATE command to be rewritten, then all checking options will be ignored in the rewritten query, including any checking from the auto-updatable view defined on the relationship with the STEAD rule. - 2. The CHECK OPTION option is not supported for views based on MySQL foreign tables. - -## Automatically Updatable Views - -Simple views are auto-updatable, and the system allows INSERT, UPDATE, and DELETE statements to be executed on such views. A view is auto-updatable if it meets the following conditions. - -- The view has only one item in the FROM list and must either be a table or another auto-updatable view. -- The top level of the view definition cannot contain a view with a WITH, DISTINCT, GROUP BY, HAVING, LIMIT, OFFSET clause -- The top level of a view definition cannot contain views with aggregate operations (UNION, INTERSET, EXCEPT). -- The target list of a view cannot contain aggregate functions, window functions, or functions that return a collection. - -An auto-updatable view can have a mix of updatable columns as well as non-updatable columns. A column is updatable if it is a simple reference to an updatable column in the underlying relationship. Otherwise, the column is read-only and will report an error if an INSERT or UPDATE statement attempts to assign a value to it. - -If the view is automatically updatable, the system converts any INSERT, UPDATE, or DELETE statement on the view into a corresponding statement on the underlying relationship. - -If an auto-updatable view contains a WHERE condition, the condition restricts which rows of the underlying relationship can be modified by UPDATE and DELETE statements on the view. However, a row that is allowed to be modified by an UPDATE may make that row no longer satisfy the WHERE condition and therefore no longer be visible from the view. Similarly, an INSERT command may insert rows that do not satisfy the WHERE condition and therefore are not visible from the view. CHECK OPTION can be used to prevent INSERT and UPDATE commands from creating such rows that are not visible from the view. - -A more complex view that does not fulfill the above conditions is read-only by default, and the system does not allow INSERT, UPDATE, and DELETE statements to be executed on that view. The effect of an updatable view can be obtained by creating an INSTEAD OF trigger on that view, which must convert an attempted insert on that view into a legal action on another table; see [CREATE TRIGGER](../../../../../../reference-guide/sql-syntax/CREATE-TRIGGER.md). Another way is to create rules (see [CREATE RULE](../../../../../../reference-guide/sql-syntax/CREATE-RULE.md)). - -Note that the user performing an insert, update, or delete on a view must have the corresponding insert, update, or delete privilege on that view. In addition, the owner of the view must have the corresponding privileges on the underlying relationship, but the user performing the execution does not need any privileges on the underlying relationship. - -## Examples - -```sql --- Create a view consisting of the field spcname as pg_default. -MogDB=# CREATE VIEW myView AS - SELECT * FROM pg_tablespace WHERE spcname = 'pg_default'; - --- Check out the view. -MogDB=# SELECT * FROM myView ; - --- Delete myView. -MogDB=# DROP VIEW myView; -``` - -## Helpful Links - -[ALTER VIEW](../../../../../../reference-guide/sql-syntax/ALTER-VIEW.md), [DROP VIEW](../../../../../../reference-guide/sql-syntax/DROP-VIEW.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-describe-table.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-describe-table.md deleted file mode 100644 index a6a5ad24..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-describe-table.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: dolphin DESCRIBE -summary: dolphin DESCRIBE -author: zhang cuiping -date: 2022-10-24 ---- - -# DESCRIBE - -## Function - -Views the structure of a specified table. - -## Precautions - -- You need to specify the schema corresponding to the temporary table for query. -- All participating columns of a composite primary key index are displayed as PRI in the Key column. -- All participating columns of the composite unique index are displayed as UNI in the Key column. -- If a column is involved in the creation of multiple indexes, the Key column is displayed based on the first index created in the column. -- The generated column is displayed in Default. -- Table synonyms are not supported. - -## Syntax - -``` -{DESCRIBE | DESC} tbl_name -``` - -## Parameter Description - -- **{DESCRIBE | DESC}** - - ``` - The effect of using DESCRIBE is equivalent to that of using DESCRIBE. - ``` - -- **tbl_name** - - ``` - Table name. You can specify a table name or **schema\_name.table\_name**. - ``` - -## Examples - -```sql ---Create a test table. -MogDB=# CREATE TABLE test2 -MogDB-# ( -MogDB(# id int PRIMARY KEY -MogDB(# ); -NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test2_pkey" for table "test2" -CREATE TABLE -MogDB=# create table test -MogDB-# ( -MogDB(# a SERIAL, -MogDB(# b varchar(10), -MogDB(# c varchar(10), -MogDB(# d varchar(10), -MogDB(# e varchar(10), -MogDB(# f varchar(10), -MogDB(# g varchar(10) DEFAULT 'g', -MogDB(# h varchar(10) NOT NULL, -MogDB(# i int DEFAULT 0, -MogDB(# j int DEFAULT 0, -MogDB(# k int GENERATED ALWAYS AS (i + j) STORED, -MogDB(# l int DEFAULT 0, -MogDB(# m int CHECK (m < 50), -MogDB(# PRIMARY KEY (a, b), -MogDB(# FOREIGN KEY(l) REFERENCES test2(id) -MogDB(# ); -NOTICE: CREATE TABLE will create implicit sequence "test_a_seq" for serial column "test.a" -NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_pkey" for table "test" -CREATE TABLE -MogDB=# CREATE UNIQUE INDEX idx_c on test (c); -CREATE INDEX -MogDB=# CREATE UNIQUE INDEX idx_d_e on test (d, e); -CREATE INDEX -MogDB=# CREATE INDEX idx_f on test (f); -CREATE INDEX - ---View the structure of the test table. -MogDB=# desc test; - Field | Type | Null | Key | Default | Extra --------+-----------------------+------+-----+---------------------------------+------- - a | integer | NO | PRI | nextval('test_a_seq'::regclass) | - b | character varying(10) | NO | PRI | NULL | - c | character varying(10) | YES | UNI | NULL | - d | character varying(10) | YES | UNI | NULL | - e | character varying(10) | YES | UNI | NULL | - f | character varying(10) | YES | MUL | NULL | - g | character varying(10) | YES | | 'g'::character varying | - h | character varying(10) | NO | | NULL | - i | integer | YES | | 0 | - j | integer | YES | | 0 | - k | integer | YES | | (i + j) | - l | integer | YES | MUL | 0 | - m | integer | YES | | NULL | -(13 rows) - -MogDB=# desc public.test; - Field | Type | Null | Key | Default | Extra --------+-----------------------+------+-----+---------------------------------+------- - a | integer | NO | PRI | nextval('test_a_seq'::regclass) | - b | character varying(10) | NO | PRI | NULL | - c | character varying(10) | YES | UNI | NULL | - d | character varying(10) | YES | UNI | NULL | - e | character varying(10) | YES | UNI | NULL | - f | character varying(10) | YES | MUL | NULL | - g | character varying(10) | YES | | 'g'::character varying | - h | character varying(10) | NO | | NULL | - i | integer | YES | | 0 | - j | integer | YES | | 0 | - k | integer | YES | | (i + j) | - l | integer | YES | MUL | 0 | - m | integer | YES | | NULL | -(13 rows) - -MogDB=# describe public.test; - Field | Type | Null | Key | Default | Extra --------+-----------------------+------+-----+---------------------------------+------- - a | integer | NO | PRI | nextval('test_a_seq'::regclass) | - b | character varying(10) | NO | PRI | NULL | - c | character varying(10) | YES | UNI | NULL | - d | character varying(10) | YES | UNI | NULL | - e | character varying(10) | YES | UNI | NULL | - f | character varying(10) | YES | MUL | NULL | - g | character varying(10) | YES | | 'g'::character varying | - h | character varying(10) | NO | | NULL | - i | integer | YES | | 0 | - j | integer | YES | | 0 | - k | integer | YES | | (i + j) | - l | integer | YES | MUL | 0 | - m | integer | YES | | NULL | -(13 rows) -``` - -## Helpful Links - -N/A \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-do.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-do.md deleted file mode 100644 index 5c4b38d5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-do.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: dolphin DO -summary: dolphin DO -author: Guo Huan -date: 2023-05-15 ---- - -# DO - -## Function - -Execute an anonymous block of code. - -A code block is viewed as a piece of function body with no arguments and a return value of type void. its parsing and execution occur at the same moment. - -Or, it executes an expression and does not return a result. - -## Precautions - -Compared to the original MogDB, dolphin's changes to the DO syntax are: - -The DO expr_list syntax is added to the original syntax to execute expressions without returning results. - -## Syntax - -``` -DO [ LANGUAGE lang_name ] code; -``` - -Or - -``` -DO expr[,expr...]; -``` - -## Parameter Description - -- **lang\_name** - - The name of the program language used to parse the code; if default, the default language is plpgsql. - -- **code** - - Program language code that can be executed. The program language must be specified as a string for this to work. - -- **expr** - - Expressions, multiple expressions are separated using commas, and expression support is referenced to [Expression](../../../../../../reference-guide/sql-reference/expressions/expressions.md)。 - -## Examples - -```sql --- Executes the expression and does not return a result. -MogDB=# DO 1; - -MogDB=# DO pg_sleep(1); - --- Execute multiple expressions without returning results. - -MogDB=# DO 1+2; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-database.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-database.md deleted file mode 100644 index abea6802..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-database.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: dolphin DROP DATABASE -summary: dolphin DROP DATABASE -author: Guo Huan -date: 2023-05-15 ---- - -# DROP DATABASE - -## Function - -Delete a database or a schema. - -## Precautions - -Compared to the original MogDB, dolphin's changes to the DROP DATABASE syntax are: - -- Add DATABASE resolves to SCHEMA meaning. - -## Syntax - -``` -DROP DATABASE [ IF EXISTS ] database_name ; -``` - -## Parameter Description - -- **IF EXISTS** - - If the specified database does not exist, issue a NOTICE instead of throwing an error. - -- **database\_name** - - The name of the database to delete. - - Range of values: string, name of an existing database. - - >![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: - > - >For B compatibility, if dolphin.b_compatibility_mode is on, the syntax is equivalent to the DROP SCHEMA syntax without dolphin; if dolphin.b_compatibility_mode is off, the syntax is the DROP DATABASE syntax without dolphin. - -## Examples - -See [Examples](dolphin-create-database.md#Examples) for CREATE DATABASE. - -## Helpful Links - -[CREATE DATABASE](dolphin-create-database.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-index.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-index.md deleted file mode 100644 index 39abac23..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-index.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: dolphin DROP INDEX -summary: dolphin DROP INDEX -author: Guo Huan -date: 2023-05-15 ---- - -# DROP INDEX - -## Function - -Delete the index. - -## Precautions - -Only the owner of the index or the user with INDEX privileges on the table where the index is located has the privilege to execute the DROP INDEX command, and the system administrator has this privilege by default. - -For a global temporary table, when a session has initialized the global temporary table object (including the creation of the global temporary table and the first insertion of data into the global temporary table), other sessions cannot perform the DROP operation of the index on the table. - -New You can specify the ALGORITHM option syntax. - -## Syntax - -``` -DROP INDEX [ CONCURRENTLY ] [ IF EXISTS ] - index_name [, ...] [ CASCADE | RESTRICT ] [ALGORITHM [=] {DEFAULT | INPLACE | COPY}]; -``` - -## Parameter Description - -- **CONCURRENTLY** - - Deletes an index in an unlocked manner. Deleting an index generally blocks access to the table on which the index depends by other statements. Add this keyword to enable the deletion process without blocking. - - This option specifies the name of only one index and is not supported by the CASCADE option. - - The normal DROP INDEX command can be executed within a transaction, but DROP INDEX CONCURRENTLY cannot be executed within a transaction. - -- **IF EXISTS** - - If the specified index does not exist, issue a NOTICE instead of throwing an error. - -- **index\_name** - - The name of the index to delete. - - Range of values: indexes that already exist. - -- **CASCADE | RESTRICT** - - - CASCADE: Indicates that cascading deletion of objects dependent on this index is allowed. - - RESTRICT (default): Indicates that objects that depend on this index exist, then the index cannot be deleted. - -- **ALGORITHM** - - Specify the algorithm, options: DEFAULT, INPLACE, COPY. currently only do syntax compatibility, no actual function for the time being. - -## Examples - -See [Examples](../../../../../../reference-guide/sql-syntax/CREATE-INDEX.md#Example) for CREATE INDEX . - -## Helpful Links - -[ALTER INDEX](../../../../../../reference-guide/sql-syntax/ALTER-INDEX.md), [CREATE INDEX](../../../../../../reference-guide/sql-syntax/CREATE-INDEX.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-tablespace.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-tablespace.md deleted file mode 100644 index cf7935c2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-drop-tablespace.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: dolphin DROP TABLESPACE -summary: dolphin DROP TABLESPACE -author: zhang cuiping -date: 2022-10-24 ---- - -# DROP TABLESPACE - -## Function Description - -Deletes a tablespace. - -## Precautions - -Compared with the original MogDB, Dolphin modifies the DROP TABLESPACE syntax as follows: - -The ENGINE [=] engine_name option is added for syntax compatibility only. - -## Syntax - -``` -DROP TABLESPACE [ IF EXISTS ] tablespace_name [ENGINE [=] engine_name]; -``` - -## Parameter Description - -- **IF EXISTS** - - Sends a notice instead of an error if the specified tablespace does not exist. - -- **tablespace_name** - - Indicates the name of tablespace. - - Value range: an existing tablespace name - -- **engine_name** - - This parameter is meaningless. - - Value: a combination of any characters - -## Examples - -For details, see [CREATE TABLESPACE](./dolphin-create-tablespace.md). - -## Helpful Links - -[ALTER TABLESPACE](./dolphin-alter-tablespace.md), [CREATE TABLESPACE](./dolphin-create-tablespace.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-execute.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-execute.md deleted file mode 100644 index afacd2f1..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-execute.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: dolphin EXECUTE -summary: dolphin EXECUTE -author: Guo Huan -date: 2023-05-15 ---- - -# EXECUTE - -## Function - -Execute a prepared statement that was prepared earlier. Because a prepared statement exists only for the life of the session, the prepared statement must have been created with a PREPARE statement some time before the current session. - -## Precautions - -- If the PREPARE statement that creates the prepared statement declares some parameters, then what is passed to the EXECUTE statement must be a compatible set of parameters or an error will be generated. -- Compared to the original MogDB, dolphin's modification to the PREPARE syntax is that it supports the EXECUTE USING syntax. - -## Syntax - -``` -EXECUTE name [ ( parameter [, ...] ) ]; -EXECUTE name USING parameter [, ...]; -``` - -## Parameter Description - -- **name** - - The name of the preparatory statement to be executed. - -- **parameter** - - The specific value given to one of the parameters of the prepared statement. It must be an expression that generates a value compatible with the data type of the parameter specified when the prepared statement was created. - -## Examples - -```sql --- Create the table reason. -MogDB=# CREATE TABLE tpcds.reason ( - CD_DEMO_SK INTEGER NOT NULL, - CD_GENDER character(16) , - CD_MARITAL_STATUS character(100) -) -; - --- Insert data. -MogDB=# INSERT INTO tpcds.reason VALUES(51, 'AAAAAAAADDAAAAAA', 'reason 51'); - --- Create the table reason_t1. -MogDB=# CREATE TABLE tpcds.reason_t1 AS TABLE tpcds.reason; - --- Create a prepared statement for an INSERT statement and then executes it. -MogDB=# PREPARE insert_reason(integer,character(16),character(100)) AS INSERT INTO tpcds.reason_t1 VALUES($1,$2,$3); - -MogDB=# EXECUTE insert_reason(52, 'AAAAAAAADDAAAAAA', 'reason 52'); - -MogDB=# EXECUTE insert_reason USING 52, 'AAAAAAAADDAAAAAA', 'reason 52'; - --- Delete the tables reason and reason_t1. -MogDB=# DROP TABLE tpcds.reason; -MogDB=# DROP TABLE tpcds.reason_t1; -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-explain.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-explain.md deleted file mode 100644 index f678e66b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-explain.md +++ /dev/null @@ -1,304 +0,0 @@ ---- -title: dolphin EXPLAIN -summary: dolphin EXPLAIN -author: Guo Huan -date: 2023-05-15 ---- - -# EXPLAIN - -## Function - -EXPLAIN and [DESCRIBE](dolphin-describe-table.md) are synonyms for each other, and can be used to view the structure of a specified table, or to view the execution plan of a specified SQL. - -Please refer to [DESCRIBE](dolphin-describe-table.md) for the syntax of viewing a table structure, and the following content only describes the part of viewing the execution plan. - -The execution plan will show what kind of scanning method will be used for the table referenced by the SQL statement, e.g. simple sequential scanning, index scanning, etc. If multiple tables are referenced, the execution plan will also show the JOIN algorithm used. - -The most critical part of the execution plan is the statement's expected execution overhead, which is the plan generator estimates how long it will take to execute the statement. - -If the ANALYZE option is specified, the statement is executed, and then statistics are displayed based on the actual run results, including the total time overhead in milliseconds and the total number of rows actually returned within each plan node. This is useful for determining whether the plan generator's estimates are close to reality. - -## Precautions - -- The statement is executed when the ANALYZE option is specified. If the user wants to use EXPLAIN to analyze INSERT, UPDATE, DELETE, CREATE TABLE AS, or EXECUTE statements and does not want to alter the data (executing these statements affects the data), use the following method. - - ``` - START TRANSACTION; - EXPLAIN ANALYZE ...; - ROLLBACK; - ``` - -- Since the parameters DETAIL, NODES, and NUM_NODES are functions in distributed mode, their use is prohibited in standalone mode. If they are used, the following error is generated. - - ```sql - MogDB=# create table student(id int, name char(20)); - CREATE TABLE - MogDB=# explain (nodes true) insert into student values(5,'a'),(6,'b'); - ERROR: unrecognized EXPLAIN option "nodes" - MogDB=# explain (num_nodes true) insert into student values(5,'a'),(6,'b'); - ERROR: unrecognized EXPLAIN option "num_nodes" - ``` - -## Syntax - -- Displays the execution plan of the SQL statement, supports multiple options, and has no requirement for the order of the options. - - ``` - {EXPLAIN | DESCRIBE | DESC} [ ( option [, ...] ) ] statement; - ``` - - or - - ``` - {EXPLAIN | DESCRIBE | DESC} [FORMAT = format_name] statement; - ``` - - or - - ``` - {EXPLAIN | DESCRIBE | DESC} [EXTENDED] statement; - ``` - - where {EXPLAIN | DESCRIBE | DESC} means that using DESCRIBE, DESC and EXPLAIN are equivalent. - - The syntax of the option clause is: - - ``` - ANALYZE [ boolean ] | - ANALYSE [ boolean ] | - VERBOSE [ boolean ] | - COSTS [ boolean ] | - CPU [ boolean ] | - DETAIL [ boolean ] | (Not available) - NODES [ boolean ] | (Not available) - NUM_NODES [ boolean ] | (Not available) - BUFFERS [ boolean ] | - TIMING [ boolean ] | - PLAN [ boolean ] | - FORMAT { TEXT | XML | JSON | YAML } - ``` - -- Display the execution plan of the SQL statement and give the options in order. - - ``` - {EXPLAIN | DESCRIBE | DESC} { [ { ANALYZE | ANALYSE } ] [ VERBOSE ] | PERFORMANCE } statement; - ``` - -## Parameter Description - -- **statement** - - Specifies the SQL statement to be analyzed. - -- **ANALYZE boolean | ANALYSE boolean** - - Displays the actual runtime and other statistics. - - Takes a range of values: - - - TRUE (default): Displays the actual running time and other statistics. - - FALSE: Do not display. - -- **VERBOSE boolean** - - Displays additional information about the program. - - Range of values: - - - TRUE (default): additional information is displayed. - - FALSE: not displayed. - -- **COSTS boolean**. - - Includes the estimated total cost for each planning node, as well as the estimated number of rows and the width of each row. - - Takes a range of values: - - - TRUE (default value): the estimated total cost and width are displayed. - - FALSE: not displayed. - -- **CPU boolean**. - - Prints information about CPU usage. - - Range of values: - - - TRUE (default value): displays CPU utilization. - - FALSE: Not displayed. - -- **DETAIL boolean** (not available) - - Prints information on the database node. - - Takes a range of values: - - - TRUE (default value): prints information on the database node. - - FALSE: Do not print. - -- **NODES boolean** (not available) - - Prints information on the node on which the query was executed. - - Takes a range of values: - - - TRUE (default): prints information about the node on which the query was executed. - - FALSE: do not print. - -- **NUM_NODES boolean** (not available) - - Prints information about the number of nodes in execution. - - Takes a range of values: - - - TRUE (default value): prints information about the number of database nodes. - - FALSE: not printed. - -- **BUFFERS boolean**. - - Includes information about buffer usage. - - Takes a range of values: - - - TRUE: Displays the usage of the buffers. - - FALSE (default value): not displayed. - -- **TIMING boolean**. - - Includes information about the actual startup time and time spent on the output node. - - Takes a range of values: - - - TRUE (default value): information about the startup time and the time spent on the output node is displayed. - - FALSE: not displayed. - -- **PLAN**. - - Whether to store execution plans in plan_table. When this option is on, the execution plan is stored in PLAN_TABLE and is not printed to the current screen, so this option cannot be used in conjunction with other options when it is on. - - Range of values: - - - ON (default): stores the execution plan in PLAN_TABLE and does not print to the current screen. Successful execution returns EXPLAIN SUCCESS. - - OFF: does not store the execution plan, prints the execution plan to the current screen. - -- **FORMAT**. - - Specifies the output format. - - The range of values: TEXT, XML, JSON and YAML. - - Default: TEXT. - -- **PERFORMANCE** - - When this option is used, all relevant information from the execution is printed. - -- **format_name** - - Specifies the output format. - - Range of values: JSON or TRADITIONAL. - - Default: TRADITIONAL - -- **EXTENDED** - - Optional, makes no difference. - -## Examples - -```sql --- 1. First of all, create a compatibility for the B mode of the database, and switch to the database -MogDB=# create database MogDB with dbcompatibility 'B'; -CREATE DATABASE -MogDB=# \c MogDB -Non-SSL connection (SSL connection is recommended when requiring high-security) -You are now connected to database "MogDB" as user "omm". - --- 2. Create a table on the new database -MogDB=# create table test_t(c1 int, c2 varchar(30)); -CREATE TABLE - --- 3. View the SQL execution plan -MogDB=# explain select * from test_t; - QUERY PLAN ----------------------------------------------------------- - Seq Scan on test_t (cost=0.00..17.29 rows=729 width=82) -(1 row) - --- 4. You can specify the output format when viewing the plan --- Note: The json format is only supported if explain_perf_mode is normal. -MogDB=# SET explain_perf_mode=normal; -SET -MogDB=# explain (format json) select * from test_t; - QUERY PLAN ----------------------------------- - [ + - { + - "Plan": { + - "Node Type": "Seq Scan", + - "Relation Name": "test_t",+ - "Alias": "test_t", + - "Startup Cost": 0.00, + - "Total Cost": 17.29, + - "Plan Rows": 729, + - "Plan Width": 82 + - } + - } + - ] -(1 row) - -MogDB=# explain format=json select * from test_t; - QUERY PLAN ----------------------------------- - [ + - { + - "Plan": { + - "Node Type": "Seq Scan", + - "Relation Name": "test_t",+ - "Alias": "test_t", + - "Startup Cost": 0.00, + - "Total Cost": 17.29, + - "Plan Rows": 729, + - "Plan Width": 82 + - } + - } + - ] -(1 row) - --- 5. If the columns of the where clause in a query are indexed, different execution plans may be displayed when the conditions are different, or the amount of data, etc. is different -MogDB=# create index idx_test_t_c1 on test_t(c1); -CREATE INDEX -MogDB=# insert into test_t values(generate_series(1, 200), 'hello MogDB'); -INSERT 0 200 -MogDB=# explain select c1, c2 from test_t where c1=100; - QUERY PLAN ----------------------------------------------------------------------------- - Bitmap Heap Scan on test_t (cost=4.28..12.74 rows=4 width=82) - Recheck Cond: (c1 = 100) - -> Bitmap Index Scan on idx_test_t_c1 (cost=0.00..4.28 rows=4 width=0) - Index Cond: (c1 = 100) -(4 rows) - --- 6. You can specify whether or not to show overhead with the costs option. -MogDB=# explain (costs false) select * from test_t where c1=100; - QUERY PLAN ------------------------------------------- - Bitmap Heap Scan on test_t - Recheck Cond: (c1 = 100) - -> Bitmap Index Scan on idx_test_t_c1 - Index Cond: (c1 = 100) -(4 rows) - --- 7. In a B-compatible database, explain and desc (describe) are equivalent and can be used to view table structure information. -MogDB=# explain test_t; - Field | Type | Null | Key | Default | Extra --------+-----------------------+------+-----+---------+------- - c1 | integer | YES | MUL | NULL | - c2 | character varying(30) | YES | | NULL | -(2 rows) -``` - -## Helpful Links - -[ANALYZE | ANALYSE](../../../../../../reference-guide/sql-syntax/ANALYZE-ANALYSE.md), [DESCRIBE](dolphin-describe-table.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-flush-binary-logs.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-flush-binary-logs.md deleted file mode 100644 index d9fcdacf..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-flush-binary-logs.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: dolphin FLUSH BINARY LOGS -summary: dolphin FLUSH BINARY LOGS -author: Guo Huan -date: 2023-05-15 ---- - -# FLUSH BINARY LOGS - -## Function - -Manually archive pg_xlog logs. - -## Precautions - -N/A - -## Syntax - -``` -FLUSH BINARY LOGS -``` - -## Parameter Description - -N/A - -## Examples - -```sql -MogDB=# flush binary logs; - pg_switch_xlog ----------------- - 0/FE8DD60 -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-grant-revoke-proxy.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-grant-revoke-proxy.md deleted file mode 100644 index fdd3476c..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-grant-revoke-proxy.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: dolphin GRANT/REVOKE PROXY -summary: dolphin GRANT/REVOKE PROXY -author: zhang cuiping -date: 2022-10-24 ---- - -# GRANT/REVOKE PROXY - -## Function - -Grants or revokes proxy permissions. - -## Precautions - -N/A - -## Syntax - -``` -GRANT PROXY ON user - TO user [, user] ... - [WITH GRANT OPTION] - -REVOKE PROXY ON user - FROM user [, user] ... -``` - -## Parameter Description - -- **{PROXY}** - - Syntax keyword. - -- **user** - - User (role) name. - -## Examples - -```sql ---Create a simple table. -MogDB=# CREATE SCHEMA tst_schema1; -MogDB=# SET SEARCH_PATH TO tst_schema1; -MogDB=# CREATE TABLE tst_t1 -( -id int, -name varchar(20) -); -INSERT INTO tst_t1 values(20220101, 'proxy_example'); - ---Create a user. -MogDB=# DROP ROLE if EXISTS test_proxy_u1; -MogDB=# CREATE ROLE test_proxy_u1 IDENTIFIED BY 'test_proxy_u1@123'; -MogDB=# DROP ROLE if EXISTS test_proxy_u2; -MogDB=# CREATE ROLE test_proxy_u3 IDENTIFIED BY 'test_proxy_u2@123'; -MogDB=# DROP ROLE if EXISTS test_proxy_u3; -MogDB=# CREATE ROLE test_proxy_u3 IDENTIFIED BY 'test_proxy_u3@123'; - ---Grant schema and table permissions. -MogDB=# GRANT ALL ON SCHEMA tst_schema1 TO test_proxy_u2; -MogDB=# GRANT ALL ON SCHEMA tst_schema1 TO test_proxy_u2; -MogDB=# GRANT ALL ON SCHEMA tst_schema1 TO test_proxy_u2; -MogDB=# GRANT ALL ON tst_t1 to test_proxy_u1; - ---Test permissions (no permissions). -MogDB=# SET ROLE test_proxy_u2 PASSWORD 'test_proxy_u2@123'; -MogDB=> SELECT * FROM tst_schema1.tst_t1; -ERROR: permission denied for relation tst_t1 -DETAIL: N/A - ---Test permissions (with proxy permissions). -MogDB=> RESET ROLE; -MogDB=# GRANT PROXY ON test_proxy_u1 TO test_proxy_u2; -MogDB=# SET ROLE test_proxy_u2 PASSWORD 'test_proxy_u2@123'; -MogDB=> SELECT * FROM tst_schema1.tst_t1; - id | name -----------+--------------- - 20220101 | proxy_example - - --Test permissions (cascading test: usr_1->usr_2->usr_3). -MogDB=> RESET ROLE; -MogDB=# GRANT PROXY ON test_proxy_u2 TO test_proxy_u3; -MogDB=# SET ROLE test_proxy_u3 PASSWORD 'test_proxy_u3@123'; -MogDB=> SELECT * FROM tst_schema1.tst_t1; - id | name -----------+--------------- - 20220101 | proxy_example - ---Test permissions granted by the proxy (with grant option). -MogDB=> RESET ROLE; -MogDB=# SET ROLE test_proxy_u2 PASSWORD 'test_proxy_u2@123'; -MogDB=> grant proxy on test_proxy_u1 to test_proxy_u3; -ERROR: must have admin option on role "test_proxy_u1" -MogDB=> RESET ROLE; -RESET -MogDB=# SET ROLE test_proxy_u2 PASSWORD 'test_proxy_u2@123'; -SET -MogDB=> grant proxy on test_proxy_u1 to test_proxy_u3; -ERROR: must have admin option on role "test_proxy_u1" -MogDB=> RESET ROLE; -MogDB=# grant proxy on test_proxy_u1 to test_proxy_u2 with grant option; -MogDB=# SET ROLE test_proxy_u2 PASSWORD 'test_proxy_u2@123'; -MogDB=> grant proxy on test_proxy_u1 to test_proxy_u3; - ---Test the proxy permission revoking. -MogDB=> revoke proxy on test_proxy_u1 from test_proxy_u3; -MogDB=> revoke proxy on test_proxy_u1 from test_proxy_u2; -MogDB=> SET ROLE test_proxy_u3 password 'test_proxy_u3@123'; -MogDB=> SELECT * FROM tst_schema1.tst_t1; -ERROR: permission denied for relation tst_t1 -DETAIL: N/A -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-grant.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-grant.md deleted file mode 100644 index 8fbd9266..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-grant.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: dolphin GRANT -summary: dolphin GRANT -author: zhang cuiping -date: 2022-10-24 ---- - -# GRANT - -## Function - -GRANT is used to grant permissions to one or more roles. - -## Precautions - -This section describes only the new syntax of Dolphin. The original syntax of MogDB is not deleted or modified. The ALTER ROUTINE, CRAETE ROUTINE, CREATE TEMPORARY TABLES, CREATE USER, CREATE TABLESPACE and INDEX permissions are added. - -## Syntax - -- The ALTER ROUTINE permission is added. - -The ALTER permission is basically the same as that of the function and procedure. - -The syntax after modification is described as follows: - -``` -GRANT { { EXECUTE | ALTER ROUTINE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } -ON {FUNCTION {function_name ( [ {[ argmode ] [ arg_name ] arg_type} [, ...] ] )} | PROCEDURE {proc_name ( [ {[ argmode ] [ arg_name ] arg_type} [, ...] ] )} [, ...] | ALL FUNCTIONS IN SCHEMA schema_name [, ...] | ALL PROCEDURE IN SCHEMA schema_name [, ...] | schema_name.*} -TO { [ GROUP ] role_name | PUBLIC } [, ...] -[ WITH GRANT OPTION ]; -``` - -- The CREATE ROUTINE permission is added. - -The permission is basically the same as that of CREATE ANY FUNCTION. - -The syntax after modification is described as follows: - -``` -GRANT { CREATE ANY TABLE | ALTER ANY TABLE | DROP ANY TABLE | SELECT ANY TABLE | INSERT ANY TABLE | UPDATE ANY TABLE | - DELETE ANY TABLE | CREATE ANY SEQUENCE | CREATE ANY INDEX | CREATE ANY FUNCTION | CREATE ROUTINE | EXECUTE ANY FUNCTION | - CREATE ANY PACKAGE | EXECUTE ANY PACKAGE | CREATE ANY TYPE } [, ...] - [ON *.*] - TO [ GROUP ] role_name [, ...] - [ WITH ADMIN OPTION ]; -``` - -- The CREATE TEMPORARY TABLES permission is added. - -The permission is basically the same as that of TEMPORARY. - -The syntax after modification is described as follows: - -``` -GRANT { { CREATE | CONNECT | CREATE TEMPORARY TABLES | TEMPORARY | TEMP | ALTER | DROP | COMMENT } [, ...] - | ALL [ PRIVILEGES ] } - ON { DATABASE database_name [, ...] | database_name.* } - TO { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -``` - -- The CREATE USER permission is added. - -It controls users' permission to create new users, which is basically the same as the CREATEROLE and NOCREATEROLE permissions of users. - -The new syntax is described as follows: - -``` -GRANT CREATE USER ON *.* TO ROLE_NAME; -``` - -- The CREATE TABLESPACE permission is added. - -It controls users' permission to create tablespaces. - -The new syntax is described as follows: - -``` -GRANT CREATE TABLESPACE ON *.* TO ROLE_NAME; -``` - -- The INDEX permission is added. - -The permission is basically the same as that of CREATE ANY INDEX. - -The syntax after modification is described as follows: - -``` -GRANT INDEX - ON *.* - TO [ GROUP ] role_name [, ...] - [ WITH ADMIN OPTION ]; -``` - -## Parameter Description - -N/A - -## Examples - -``` -GRANT ALTER ROUTINE ON FUNCTION TEST TO USER_TESTER; -GRANT CREATE ANY FUNCTION TO USER_TESTER; -GRANT CREATE TEMPORARY TABLES ON DATABASE DATABASE_TEST TO USER_TESTER; -GRANT CREATE USER ON *.* TO USER_TESTER; -GRANT CREATE TABLESPACE ON *.* TO USER_TESTER; -GRANT INDEX TO TEST_USER; -``` - -## Helpful Links - -[GRANT](../../../../../../reference-guide/sql-syntax/GRANT.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-insert.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-insert.md deleted file mode 100644 index d36b4ff9..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-insert.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: dolphin INSERT -summary: dolphin INSERT -author: zhang cuiping -date: 2022-10-24 ---- - -# INSERT - -## Function - -Inserts new rows into a table. - -## Precautions - -- This section describes only the new syntax of Dolphin. The original syntax of MogDB is not deleted or modified. -- The operation of inserting null values is added. The insertion effect varies according to the value of sql_mode. -- The set_clause_values clause is added. - -## Syntax - -``` -[ WITH [ RECURSIVE ] with_query [, ...] ] -INSERT [/*+ plan_hint */] [IGNORE] [INTO] table_name [partition_clause] [ AS alias ] [ ( column_name [, ...] ) ] - { DEFAULT VALUES - | [ VALUES | VALUE ] [{( { expression | DEFAULT } [, ...] ) }][, ...] - | query - | set_clause_values } - [ ON DUPLICATE KEY UPDATE { NOTHING | { column_name = { expression | DEFAULT } } [, ...] [ WHERE condition ] }] - [ RETURNING {* | {output_expression [ [ AS ] output_name ] }[, ...]} ]; -``` - -## Parameter Description - -- **IGNORE** - - If an error occurs in a specified scenario when an INSERT statement containing the keyword IGNORE is executed, the error is degraded to warning and the statement execution continues without affecting other data operations. Error degradation can be enabled in the following scenarios: - - 1. The non-null constraint is violated. - - If the executed SQL statement violates the non-null constraint of the table, you can use this hint to degrade errors to warnings and use one of the following strategies based on the value of the GUC parameter **sql_ignore_strategy**: - - - If **sql_ignore_strategy** is set to **ignore_null**, the INSERT operations on rows that violate non-null constraints are ignored and remaining data operations are performed. - - - If **sql_ignore_strategy** is set to **overwrite_null**, the null value that violates the constraint is overwritten by the default value of the target type, and the remaining data operations are performed. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** The GUC parameter sql_ignore_strategy is of the enumeration type. The options are ignore_null and overwrite_null. - - 2. The unique constraint is violated. - - If the executed SQL statement violates the unique constraint of a table, you can use this hint to degrade errors to warnings, ignore the INSERT operation on the row that violates the constraint, and continue to perform the remaining data operations. - - 3. The partitioned table cannot match a valid partition. - - When INSERT is performed on a partitioned table, if a row of data does not match a valid partition of the table, you can use this hint to degrade errors to warnings, ignore the row, and continue to perform operations on the remaining data. - - 4. Failed to convert the inserted value to the target column type. - - During the execution of the INSERT statement, if the new value does not match the type of the target column, you can use this hint to degrade errors to warnings and continue the execution based on the new value type and the target column type: - - - When the new value type and column type are both numeric: - - If the new value is within the range of the column type, insert the value directly. If the new value is beyond the range of the column type, replace the value with the maximum or minimum value of the column type. - - - When the new value type and column type are both character strings: - - If the length of the new value is within the range specified by the column type, insert the value directly. If the length of the new value is beyond the range specified by the column type, the first n characters of the column type are retained. - - - When the new value type cannot be converted to the column type: - - Insert the default value of a column type. - - The IGNORE keyword does not support column store and cannot take effect in column-store tables. - -- **VALUES()** - - When the GUC parameter **sql_mode** is set to **stric_all_tables**, NULL is inserted into all columns. Otherwise, if the corresponding column name has a default value, the default value is inserted. If the corresponding column name does not have a default value, check whether the corresponding column has the not_null constraint. If not, NULL is inserted. If yes, the basic value of the type is inserted, for details about the basic values, see the pg_type_basic_value view. - -- **set_clause_values** - - It is equivalent to **insert into table_name set column_name = value, column_name = value, …** set_clause_values indicates **set column_name = value**. Multiple column insertion values are separated by commas (,). This is an extended syntax of INSERT INTO to prevent write errors caused by disordered field sequence and value sequence during INSERT INTO. - -## Examples - -### IGNORE - -To use the ignore_error hints, you need to create a database named **db_ignore** in B-compatible mode. - -``` -create database db_ignore dbcompatibility 'B'; -\c db_ignore -``` - -- **Ignore the non-null constraint.** - - ```sql - db_ignore=# create table t_not_null(num int not null); - CREATE TABLE - -- The ignore strategy is used. - db_ignore=# set sql_ignore_strategy = 'ignore_null'; - SET - db_ignore=# insert /*+ ignore_error */ into t_not_null values(null), (1); - WARNING: null value in column "num" violates not-null constraint - DETAIL: Failing row contains (null). - INSERT 0 1 - db_ignore=# select * from t_not_null ; - num - ----- - 1 - (1 row) - - -- The overwrite strategy is used. - db_ignore=# delete from t_not_null; - db_ignore=# set sql_ignore_strategy = 'overwrite_null'; - SET - db_ignore=# insert /*+ ignore_error */ into t_not_null values(null), (1); - WARNING: null value in column "num" violates not-null constraint - DETAIL: Failing row contains (null). - INSERT 0 2 - db_ignore=# select * from t_not_null ; - num - ----- - 0 - 1 - (2 rows) - ``` - -- **Ignore the unique constraint.** - - ```sql - db_ignore=# create table t_unique(num int unique); - NOTICE: CREATE TABLE / UNIQUE will create implicit index "t_unique_num_key" for table "t_unique" - CREATE TABLE - db_ignore=# insert into t_unique values(1); - INSERT 0 1 - db_ignore=# insert /*+ ignore_error */ into t_unique values(1),(2); - WARNING: duplicate key value violates unique constraint in table "t_unique" - INSERT 0 1 - db_ignore=# select * from t_unique; - num - ----- - 1 - 2 - (2 rows - ``` - -- **Ignore the partitioned table that cannot match a valid partition.** - - ```sql - db_ignore=# CREATE TABLE t_ignore - db_ignore-# ( - db_ignore(# col1 integer NOT NULL, - db_ignore(# col2 character varying(60) - db_ignore(# ) WITH(segment = on) PARTITION BY RANGE (col1) - db_ignore-# ( - db_ignore(# PARTITION P1 VALUES LESS THAN(5000), - db_ignore(# PARTITION P2 VALUES LESS THAN(10000), - db_ignore(# PARTITION P3 VALUES LESS THAN(15000) - db_ignore(# ); - CREATE TABLE - db_ignore=# insert /*+ ignore_error */ into t_ignore values(20000); - WARNING: inserted partition key does not map to any table partition - INSERT 0 0 - db_ignore=# select * from t_ignore ; - col1 | col2 - ------+------ - (0 rows) - ``` - -- **Failed to convert the inserted value to the target column type.** - - ```sql - -- When the new value type and column type are both numeric: - db_ignore=# create table t_tinyint(num tinyint); - CREATE TABLE - db_ignore=# insert /*+ ignore_error */ into t_tinyint values(10000); - WARNING: tinyint out of range - CONTEXT: referenced column: num - INSERT 0 1 - db_ignore=# select * from t_tinyint; - num - ----- - 255 - (1 row) - - -- When the new value type and column type are both character strings: - db_ignore=# create table t_varchar5(content varchar(5)); - CREATE TABLE - db_ignore=# insert /*+ ignore_error */ into t_varchar5 values('abcdefghi'); - WARNING: value too long for type character varying(5) - CONTEXT: referenced column: content - INSERT 0 1 - db_ignore=# select * from t_varchar5 ; - content - --------- - abcde - (1 row) - ``` - - ```sql - --Create the table value_test. - MogDB=# create table value_test(a int not null, b int default 3); - - --Insert VALUES() into the table. - MogDB=# insert into value_test values(); - - ERROR: null value in column "a" violates not-null constraint - - --Disable sql\_mode and insert VALUES() into the table. - MogDB=# set sql_mode = ''; - MogDB=# insert into value_test values(); - - --View table data. - MogDB=# select * from value_test; - - a | b - ---+--- - 0 | 3 - (1 row) - - --Delete the table value_test. - MogDB=# DROP TABLE value_test. - ``` - -## Helpful Links - -[INSERT](../../../../../../reference-guide/sql-syntax/INSERT.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-kill.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-kill.md deleted file mode 100644 index 18bcdc23..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-kill.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: dolphin KILL -summary: dolphin KILL -author: zhang cuiping -date: 2022-10-24 ---- - -# KILL - -## Function - -Terminates a specified connection or an SQL statement executed under the connection. - -## Precautions - -- The KILL syntax is valid in both non-thread pool mode and thread pool mode. -- Generally, this parameter is used together with the Id column in the query result of SHOW PROCESSSLIST. -- It can also be used together with SELECT sessionid from pg_stat_activity WHERE (filtering condition). - -## Syntax - -``` -KILL [CONNECTION | QUERY] processlist_id -``` - -## Parameter Description - -- **CONNECTION** - - When the CONNECTION keyword is used to modify the KILL statement, the effect is the same as that of KILL processlist_id, and the current connection is terminated. - -- **QUERY** - - When the QUERY keyword is used to modify the KILL statement, the SQL statement executed by the current connection is terminated, and the connection itself is not affected. - -- **processlist_id** - - Connection ID. - -## Examples - -```sql ---Check the current connection. -MogDB=# show processlist; - Id | Pid | QueryId | UniqueSqlId | User | Host | db | Command | - BackendStart | XactStart | Time | State | Info ------------------+-----------------+-------------------+-------------+-----------+------+----------+------------------------+--- -----------------------------+-------------------------------+--------+--------+---------------------------------------- - 139653370304256 | 139653370304256 | 0 | 0 | MogDB | | postgres | ApplyLauncher | 20 -22-06-21 16:46:19.656076+08 | | | | - 139653319255808 | 139653319255808 | 0 | 0 | MogDB | | postgres | Asp | 20 -22-06-21 16:46:19.728521+08 | | 1 | active | - 139653336483584 | 139653336483584 | 0 | 0 | MogDB | | postgres | PercentileJob | 20 -22-06-21 16:46:19.728527+08 | | 8 | active | - 139653302175488 | 139653302175488 | 0 | 0 | MogDB | | postgres | statement flush thread | 20 -22-06-21 16:46:19.728558+08 | | 508507 | idle | - 139653198239488 | 139653198239488 | 0 | 0 | MogDB | | postgres | WorkloadMonitor | 20 -22-06-21 16:46:19.750133+08 | | | | - 139653181298432 | 139653181298432 | 0 | 0 | MogDB | | postgres | WLMArbiter | 20 -22-06-21 16:46:19.750976+08 | | | | - 139653215110912 | 139653215110912 | 0 | 0 | MogDB | | postgres | workload | 20 -22-06-21 16:46:19.754504+08 | 2022-06-21 16:46:19.769585+08 | 508507 | active | WLM fetch collect info from data nodes - 139653421840128 | 139653421840128 | 0 | 0 | MogDB | | postgres | JobScheduler | 20 -22-06-27 10:00:54.754007+08 | | 0 | active | - 139653044328192 | 139653044328192 | 48976645947655327 | 1772643515 | MogDB | -1 | dolphin | gsql | 20 -22-06-27 14:00:53.163338+08 | 2022-06-27 14:01:26.794658+08 | 0 | active | show processlist; - 139653027546880 | 139653027546880 | 48976645947655326 | 1775585557 | MogDB | -1 | postgres | gsql | 20 -22-06-27 14:01:03.969962+08 | 2022-06-27 14:01:19.967521+08 | 7 | active | select pg_sleep(100); -(10 rows) - ---SQL statement for terminating the 139653027546880 connection -MogDB=# kill query 139653027546880; - result --------- - t -(1 row) - ---The connection status of 139653027546880 in the processlist is changed to idle. -MogDB=# show processlist; - Id | Pid | QueryId | UniqueSqlId | User | Host | db | Command | - BackendStart | XactStart | Time | State | Info ------------------+-----------------+-------------------+-------------+-----------+------+----------+------------------------+--- -----------------------------+-------------------------------+--------+--------+---------------------------------------- - 139653370304256 | 139653370304256 | 0 | 0 | MogDB | | postgres | ApplyLauncher | 20 -22-06-21 16:46:19.656076+08 | | | | - 139653319255808 | 139653319255808 | 0 | 0 | MogDB | | postgres | Asp | 20 -22-06-21 16:46:19.728521+08 | | 0 | active | - 139653336483584 | 139653336483584 | 0 | 0 | MogDB | | postgres | PercentileJob | 20 -22-06-21 16:46:19.728527+08 | | 5 | active | - 139653302175488 | 139653302175488 | 0 | 0 | MogDB | | postgres | statement flush thread | 20 -22-06-21 16:46:19.728558+08 | | 508573 | idle | - 139653198239488 | 139653198239488 | 0 | 0 | MogDB | | postgres | WorkloadMonitor | 20 -22-06-21 16:46:19.750133+08 | | | | - 139653181298432 | 139653181298432 | 0 | 0 | MogDB | | postgres | WLMArbiter | 20 -22-06-21 16:46:19.750976+08 | | | | - 139653215110912 | 139653215110912 | 0 | 0 | MogDB | | postgres | workload | 20 -22-06-21 16:46:19.754504+08 | 2022-06-21 16:46:19.769585+08 | 508573 | active | WLM fetch collect info from data nodes - 139653421840128 | 139653421840128 | 0 | 0 | MogDB | | postgres | JobScheduler | 20 -22-06-27 10:00:54.754007+08 | | 1 | active | - 139653044328192 | 139653044328192 | 48976645947655329 | 1772643515 | MogDB | -1 | dolphin | gsql | 20 -22-06-27 14:00:53.163338+08 | 2022-06-27 14:02:33.180256+08 | 0 | active | show processlist; - 139653027546880 | 139653027546880 | 0 | 0 | MogDB | -1 | postgres | gsql | 20 -22-06-27 14:01:03.969962+08 | | 11 | idle | select pg_sleep(100); -(10 rows) - ---Terminate the connection to 139653027546880. -MogDB=# kill 139653027546880; - result --------- - t -(1 row) - ---Alternatively, run the following command: - -MogDB=# kill connection 139653027546880; - result --------- - t -(1 row) - ---The connection does not exist in the processlist. -MogDB=# show processlist; - Id | Pid | QueryId | UniqueSqlId | User | Host | db | Command | - BackendStart | XactStart | Time | State | Info ------------------+-----------------+-------------------+-------------+-----------+------+----------+------------------------+--- -----------------------------+-------------------------------+--------+--------+---------------------------------------- - 139653370304256 | 139653370304256 | 0 | 0 | MogDB | | postgres | ApplyLauncher | 20 -22-06-21 16:46:19.656076+08 | | | | - 139653319255808 | 139653319255808 | 0 | 0 | MogDB | | postgres | Asp | 20 -22-06-21 16:46:19.728521+08 | | 1 | active | - 139653336483584 | 139653336483584 | 0 | 0 | MogDB | | postgres | PercentileJob | 20 -22-06-21 16:46:19.728527+08 | | 7 | active | - 139653302175488 | 139653302175488 | 0 | 0 | MogDB | | postgres | statement flush thread | 20 -22-06-21 16:46:19.728558+08 | | 508696 | idle | - 139653198239488 | 139653198239488 | 0 | 0 | MogDB | | postgres | WorkloadMonitor | 20 -22-06-21 16:46:19.750133+08 | | | | - 139653181298432 | 139653181298432 | 0 | 0 | MogDB | | postgres | WLMArbiter | 20 -22-06-21 16:46:19.750976+08 | | | | - 139653215110912 | 139653215110912 | 0 | 0 | MogDB | | postgres | workload | 20 -22-06-21 16:46:19.754504+08 | 2022-06-21 16:46:19.769585+08 | 508696 | active | WLM fetch collect info from data nodes - 139653421840128 | 139653421840128 | 0 | 0 | MogDB | | postgres | JobScheduler | 20 -22-06-27 10:00:54.754007+08 | | 1 | active | - 139653044328192 | 139653044328192 | 48976645947655331 | 1772643515 | MogDB | -1 | dolphin | gsql | 20 -22-06-27 14:00:53.163338+08 | 2022-06-27 14:04:35.418518+08 | 0 | active | show processlist; -(9 rows) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-load-data.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-load-data.md deleted file mode 100644 index 438d3dc5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-load-data.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: dolphin LOAD DATA -summary: dolphin LOAD DATA -author: Guo Huan -date: 2023-05-15 ---- - -# LOAD DATA - -## Function - -Copying data from a file to a table is accomplished with the LOAD DATA command. - -## Precautions - -- When the parameter enable\_copy\_server\_files is turned off, only the initial user is allowed to execute the LOAD DATA command. When the parameter enable\_copy\_server\_files is turned on, users with SYSADMIN privileges or users who inherited the built-in role gs\_role\_copy\_files privileges are allowed to execute.files privilege, but prohibits the execution of database configuration files, key files, certificate files and audit logs by default to prevent users from overstepping their authority to view or modify sensitive files. can only be used for tables, not for views. -- Can only be used for tables, not for views. -- Column-stored tables and tables of appearance are not supported. -- Requires insert permission on the table being inserted. The replace option also requires delete permission on the table. -- If a list of fields is declared, LOAD will only copy the data of the declared fields between the file and the table. If there are any fields in the table that are not in the field list, default values will be inserted for those fields. -- The declared data source file, which must be accessible to the server. -- If any row of the data file contains more or fewer fields than expected. dolphin.sql_mode for strict mode will throw an error. loose mode the missing fields will be inserted as NULL. if the field has a NOT NULL constraint then the type base value will be inserted. -- \\\N is NULL, if you want to enter the actual data value\\\N , use \\\\N. - -## Syntax - -``` - LOAD DATA - INFILE 'file_name' - [REPLACE | IGNORE] - INTO TABLE tbl_name - [CHARACTER SET charset_name] - [{FIELDS | COLUMNS} - [TERMINATED BY 'string'] - [[OPTIONALLY] ENCLOSED BY 'char'] - [ESCAPED BY 'char'] - ] - [LINES - [STARTING BY 'string'] - [TERMINATED BY 'string'] - ] - [IGNORE number {LINES | ROWS}] - [(col_name_or_user_var - [, col_name_or_user_var] ...)] -``` - -## Parameter Description - -- **REPLACE** - - The inserted data will only work when there is a primary or unique key conflict, the conflicting rows in the table will be deleted first, and then the inserted data will be continued. - -- **IGNORE** - - The inserted data will only work when there is a primary key or unique key conflict, the conflicting rows will be ignored and the data will be inserted. - -- **tbl\_name** - - Name of the table (can have schema modifiers). - - Range of values: the name of a table that already exists. - -- **col\_name** - - Optional list of fields to be copied. - - Range of values: if no field list is declared, all fields will be used. - -- **ESCAPED BY 'char'** - - Used to specify the escape character, which can only be specified as a single-byte character. - - The default value is double quotes. When the same as the ENCLOSED BY value, it is replaced with '\\0'. - -- **LINES TERMINATED BY 'string'** - - Specifies the line break style for exported data files. - - Range of values: multi-character line breaks are supported, but the line breaks cannot exceed 10 bytes. Common line breaks, such as \\\r, \\\n, \\\r\\\\n (set to 0x0D, 0x0A, 0x0D0A effect is the same), other characters or strings, such as $, \#. - - >![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif)**Note**: - > - >- The LINES TERMINATED BY parameter cannot be the same as the delimiter, null parameter. - >- The LINES TERMINATED BY parameter cannot contain: .abcdefghijklmnopqrstuvwxyz0123456789. - -- **CHARACTER SET 'charset\_name'** - - Specifies the name of the file encoding format. - - Range of values: valid encoding format. - - Default value: current encoding format. - -- **\[OPTIONALLY\] ENCLOSED BY 'char'** - - Specify the wrapper, the data inside the full wrapper will be parsed as a column of parameters, OPTIONALLY has no practical meaning. - - Default value: double quotes. - - >![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **Note**: - > - >- The ENCLOSED BY parameter cannot be the same as the delimiter parameter. - >- The ENCLOSED BY parameter can only be a single-byte character. - -- **FIELDS \| COLUMNS TERMINATED BY 'string'** - - The string that separates the individual fields in the file. The maximum length of the separator is no more than 10 bytes. - - Default: The default is the horizontal tab. - -- **IGNORE number \{LINES \| ROWS\}** - - Specifies that the first number of rows of the data file will be skipped when the data is exported. - -## Examples - -```sql --- Create the load_t1 table. -MogDB=# CREATE TABLE load_t1 -( - SM_SHIP_MODE_SK INTEGER NOT NULL, - SM_SHIP_MODE_ID CHAR(16) NOT NULL, - SM_TYPE CHAR(30) , - SM_CODE CHAR(10) , - SM_CARRIER CHAR(20) , - SM_CONTRACT CHAR(20) -); --- /home/omm/test.csv -1,a,b,c,d,e -,a,b,c,d,e -3,\N,a,b,c,d -\N,a,b,c,d,e - --- Copy the data from the file /home/omm/test.csv to the table load_t1. -MogDB=# LOAD DATA INFILE '/home/omm/test.csv' INTO TABLE load_t1; - --- Copy data from /home/omm/test.csv file to table load_t1 with the following parameters: field separator '\t' (fields terminated by E'\t') line breaks '\r' (lines terminated by E'\r') skip the first two lines (IGNORE 2 LINES). -MogDB=# LOAD DATA INFILE '/home/omm/test.csv' INTO TABLE load_t1 fields terminated by ',' lines terminated by E'\n' IGNORE 2 LINES; - -MogDB=# select * from load_t1; - sm_ship_mode_sk | sm_ship_mode_id | sm_type | sm_code | sm_carrier | sm_contract ------------------+------------------+--------------------------------+------------+----------------------+---------------------- - 3 | | a | b | c | d - 0 | a | b | c | d | e -(2 rows) - --- Delete table load_t1. -MogDB=# DROP TABLE load_t1; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-optimize-table.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-optimize-table.md deleted file mode 100644 index a2ddfa17..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-optimize-table.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: dolphin OPTIMIZE TABLE -summary: dolphin OPTIMIZE TABLE -author: zhang cuiping -date: 2022-10-24 ---- - -# OPTIMIZE TABLE - -## Function - -Recreates the physical space of tables and indexes, releases recyclable space to the operating system, and updates statistics in related tables. - -## Precautions - -- The vacuum/owner or superuser permission on the table is required. -- The B-compatible optimize operation supports multiple tables, and the MogDB optimize operation supports only a single table. -- The B-compatible optimize operation is an online DDL operation. The main phase of the processing does not affect the read and write of the table. However, the MogDB optimize operation blocks the read and write of the table. If the table contains a large amount of data, the table may be locked for a long time. Therefore, exercise caution when performing this operation. -- The optimize operation is also blocked by other transactions or two-phase transactions. -- Do not optimize multiple tables concurrently. If you need to optimize multiple tables concurrently, reduce the number of concurrent tables. Generally, the number of concurrent tables is less than 3. -- Before running the optimize command, ensure that the remaining space of the data directory is greater than the space occupied by the table. Otherwise, the operation may fail. -- After a large amount of data is deleted from a table within a short period of time, do not perform optimize immediately. Wait for several seconds or perform other transactions before performing optimize. Otherwise, tuples may be in the HEAPTUPLE_RECENTLY_DEAD state and cannot be reclaimed. - -## Syntax - -``` -OPTIMIZE [VERBOSE] [NO_WRITE_TO_BINLOG | LOCAL] TABLE tbl_name -``` - -## Parameter Description - -- **[VERBOSE]** - - ``` - View the optimization processing details. You can retain the default value. - ``` - -- **[NO_WRITE_TO_BINLOG | LOCAL]** - - ``` - This parameter is compatible only with the syntax and has no actual effect. You can use the default value. - ``` - -- **tbl_name** - - ``` - Table name. You can specify a table name or **schema\_name.table\_name**. - ``` - -## Examples - -```sql ---Create a doc table. -MogDB=# create table doc(id serial primary key, content varchar(255)); -NOTICE: CREATE TABLE will create implicit sequence "doc_id_seq" for serial column "doc.id" -NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "doc_pkey" for table "doc" -CREATE TABLE - ---Insert 10,000 data records. -MogDB=# insert into doc(content) select 'abcd1234' from generate_series(1,10000) as content; -INSERT 0 100000 - ---Delete 9000 data records. -MogDB=# delete from doc where id <= 9000; -DELETE 9000 - ---Optimize table. -MogDB=# optimize table doc; -VACUUM - ---Optimize table (view processing details). -MogDB=# optimize verbose table doc; -INFO: vacuuming "public.doc"(primary pid=24692) -INFO: "doc": found 9000 removable, 1000 nonremovable row versions in 55 pages(primary pid=24692) -DETAIL: 0 dead row versions cannot be removed yet. -CPU 0.00s/0.04u sec elapsed 0.04 sec. -INFO: analyzing "public.doc"(primary pid=24692) -INFO: ANALYZE INFO : "doc": scanned 6 of 6 pages, containing 1000 live rows and 0 dead rows; 1000 rows in sample, 1000 estimated total rows(primary pid=24692) -VACUUM -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-prepare.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-prepare.md deleted file mode 100644 index 5498efa1..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-prepare.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: dolphin PREPARE -summary: dolphin PREPARE -author: zhang cuiping -date: 2022-10-24 ---- - -# PREPARE - -## Function - -Creates a prepared statement. - -A prepared statement is a performance optimizing object on the server. **PREPARE** is executed to parse, analyze, and rewrite the specified query. **EXECUTE** is executed to plan and execute the prepared statement. This avoids repetitive parsing and analysis. After the PREPARE statement is created, it exists throughout the database session. Once it is created (even if in a transaction block), it will not be deleted when a transaction is rolled back. It can only be deleted by explicitly invoking DEALLOCATE or automatically deleted when the session ends. - -## Precautions - -N/A - -## Syntax - -``` -PREPARE name [ ( data_type [, ...] ) ] AS statement; -PREPARE name FROM statement; -``` - -## Parameter Description - -- **name** - - Specifies the name of a prepared statement. It must be unique in the session. - -- **data_type** - - Specifies the data type of the parameter. - -- **statement** - - Specifies a **SELECT**, **INSERT**, **UPDATE**, **DELETE**, **MERGE INTO**, or **VALUES** statement. - -## Examples - -```sql -MogDB=# CREATE TABLE test(name text, age int); -CREATE TABLE -MogDB=# INSERT INTO test values('a',18); -INSERT 0 1 -MogDB=# PREPARE stmt FROM SELECT * FROM test; -PREPARE -MogDB=# EXECUTE stmt; - name | age -------+----- - a | 18 -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-rename-table.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-rename-table.md deleted file mode 100644 index cf403219..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-rename-table.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: dolphin RENAME TABLE -summary: dolphin RENAME TABLE -author: Guo Huan -date: 2023-05-15 ---- - -# RENAME TABLE - -## Function - -Modify the table name, including modifying the table schema, renaming the table, and deleting the table permissions. - -```sql -RENAME TABLE old_table to new_table; -``` - -## Precautions - -- This section contains only the syntax added by dolphin, the original MogDB syntax has not been removed or modified. -- When there are multiple commands to modify table names under the rename table statement, the syntax sorts the table names to be modified, then locks them in order, and then modifies the table names in left-to-right order. When rename table a to b, b to c, and table b does not exist in the middle, it is skipped without locking. -- For the table does not exist, and the target table and the existence of the table conflict, then the corresponding error message is reported. When a table has a synonym, the original table cannot have a synonym dependency, and the target table cannot have a synonym with the same name. -- When modifying the table name and schema of the target table, it will determine whether the current user has privileges on the table. - -## RENAME TABLE Syntax - -- Modify the table definition. - - ```sql - RENAME TABLE old_schema.table_name TO new_schema.new_table_name [, old_schema.table_name TO new_schema.new_table_name ...]; - ``` - -## Parameter Description - -RENAME TABLE can rename one or more tables at the same time. But you must have ALTER and DROP privileges on the old table, and CREATE and INSERT privileges on the new table. You must also have permissions on old_schema and new_schema. - -- Modify table name - - ```sql - RENAME TABLE old_table to new_table; - ``` - -- When the old table and the new table are under the same schema, RENAME TABLE is equivalent to the following syntax - - ```sql - ALTER TABLE old_table RENAME TO new_table; - ``` - -- RENAME TABLE supports modification of multiple table names in a single sql syntax and its execution order is from left to right. - - ```sql - RENAME TABLE A TO B, B TO C, C TO A; - ``` - -- RENAME TABLE contains table locking operations, and the order of table locking is based on the old table's schema.table to sort, and then sequentially lock the sorted table. Cross-schema modification of table names in MogDB is equivalent to cross-library modification of table names in MySQL. - - ```sql - RENAME TABLE old_schema.old_table TO new_schema.new_table; - ``` - -- The old_table cannot be a synonym and cannot store synonym dependencies. new_table cannot be a synonym. - -- RENAME TABLE modifies the table name and also modifies the data types in the system table pg_type that have the same name as old_table. and dependencies in the system table pg_depend. -- Temporary tables and global temporary tables are not supported. -- Cross-schema modification of table names for views is not supported. Only modification of table names under the same schema is supported. -- RENAME TABLE After changing the table name, new_table does not have the privileges of old_table. You must reassign privileges to the new_table as a superuser. -- REANME TABLE syntax, old_table If old_table has triggers, old_table cannot change the table name across schemas. -- old_table and new_table cannot be the same before and after. -- RENAME TABLE syntax If old_table does not specify a schema, old_table is traversed from the search_path until it is found, or an error is reported that old_table does not exist. If new_table does not specify a schema, new_table is in the same schema as old_table. - -## Helpful Links - -[ALTER TABLE](dolphin-alter-table.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-rename-user.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-rename-user.md deleted file mode 100644 index 5d8ba772..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-rename-user.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: dolphin RENAME USER -summary: dolphin RENAME USER -author: zhang cuiping -date: 2022-10-24 ---- - -# RENAME USER - -## Function - -Change the user name in the database. - -## Precautions - -- RENAME USER changes the user name. Only the user name of the current table can be changed. -- If multiple users are modified and one of the user names does not exist or fails to be executed due to other reasons, the entire statement fails and all user names remain unchanged. -- It is equivalent to ALTER USER… RENAME TO…. - -## Syntax - -``` -RENAME USER - old_user1 TO new_user1, - old_user2 TO new_user2, - ... -``` - -## Parameter Description - -- **old_user** Old user name, which must exist. -- **new_user** New user name - -## Examples - -``` -rename user - user1 to user4, - user2 to user5, - user3 to user6; -``` - -## Helpful Links - -[ALTER USER](../../../../../../reference-guide/sql-syntax/ALTER-USER.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-revoke.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-revoke.md deleted file mode 100644 index f03d1a56..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-revoke.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: dolphin REVOKE -summary: dolphin REVOKE -author: zhang cuiping -date: 2022-10-24 ---- - -# REVOKE - -## Function - -REVOKE is used to revoke permissions from one or more roles. - -## Precautions - -This section describes only the new syntax of Dolphin. The original syntax of MogDB is not deleted or modified. The ALTER ROUTINE, CRAETE ROUTINE, CREATE TEMPORARY TABLES, CREATE USER, CREATE TABLESPACE and INDEX permissions are added. - -## Syntax - -- The ALTER ROUTINE permission is added. - -The ALTER permission is basically the same as that of the function and procedure. - -The syntax after modification is described as follows: - -``` -REVOKE { { EXECUTE | ALTER ROUTINE | ALTER | DROP | COMMENT } [, ...] | ALL [ PRIVILEGES ] } -ON {FUNCTION {function_name ( [ {[ argmode ] [ arg_name ] arg_type} [, ...] ] )} | PROCEDURE {proc_name ( [ {[ argmode ] [ arg_name ] arg_type} [, ...] ] )} [, ...] | ALL FUNCTIONS IN SCHEMA schema_name [, ...] | ALL PROCEDURE IN SCHEMA schema_name [, ...] | schema_name.*} -FROM { [ GROUP ] role_name | PUBLIC } [, ...] -[ WITH GRANT OPTION ]; -``` - -- The CREATE ROUTINE permission is added. - -The permission is basically the same as that of CREATE ANY FUNCTION. - -The syntax after modification is described as follows: - -``` -REVOKE { CREATE ANY TABLE | ALTER ANY TABLE | DROP ANY TABLE | SELECT ANY TABLE | INSERT ANY TABLE | UPDATE ANY TABLE | - DELETE ANY TABLE | CREATE ANY SEQUENCE | CREATE ANY INDEX | CREATE ANY FUNCTION | CREATE ROUTINE | EXECUTE ANY FUNCTION | - CREATE ANY PACKAGE | EXECUTE ANY PACKAGE | CREATE ANY TYPE } [, ...] - [ON *.*] - FROM [ GROUP ] role_name [, ...] - [ WITH ADMIN OPTION ]; -``` - -- The CREATE TEMPORARY TABLES permission is added. - -The permission is basically the same as that of TEMPORARY. - -The syntax after modification is described as follows: - -``` -REVOKE { { CREATE | CONNECT | CREATE TEMPORARY TABLES | TEMPORARY | TEMP | ALTER | DROP | COMMENT } [, ...] - | ALL [ PRIVILEGES ] } - ON { DATABASE database_name [, ...] | database_name.* } - FROM { [ GROUP ] role_name | PUBLIC } [, ...] - [ WITH GRANT OPTION ]; -``` - -- The CREATE USER permission is added. - -It controls users' permission to create new users, which is basically the same as the CREATEROLE and NOCREATEROLE permissions of users. - -The new syntax is described as follows: - -``` -REVOKE CREATE USER ON *.* FROM ROLE_NAME; -``` - -- The CREATE TABLESPACE permission is added. - -It controls users' permission to create tablespaces. - -The new syntax is described as follows: - -``` -REVOKE CREATE TABLESPACE ON *.* FROM ROLE_NAME; -``` - -- The INDEX permission is added. - -The permission is basically the same as that of CREATE ANY INDEX. - -The syntax after modification is described as follows: - -``` -REVOKE { CREATE ANY TABLE | ALTER ANY TABLE | DROP ANY TABLE | SELECT ANY TABLE | INSERT ANY TABLE | UPDATE ANY TABLE | - DELETE ANY TABLE | CREATE ANY SEQUENCE | CREATE ANY INDEX | INDEX | CREATE ANY FUNCTION | EXECUTE ANY FUNCTION | - CREATE ANY PACKAGE | EXECUTE ANY PACKAGE | CREATE ANY TYPE } [, ...] - { ON *.* } - FROM [ GROUP ] role_name [, ...] - [ WITH ADMIN OPTION ]; -``` - -## Parameter Description - -N/A - -## Examples - -```sql -REVOKE ALTER ROUTINE ON FUNCTION TEST FROM USER_TESTER; -REVOKE CREATE ANY FUNCTION FROM USER_TESTER; -REVOKE CREATE TEMPORARY TABLES ON DATABASE DATABASE_TEST FROM USER_TESTER; -REVOKE CREATE USER ON *.* FROM USER_TESTER; -REVOKE CREATE TABLESPACE ON *.* FROM USER_TESTER; -REVOKE INDEX FROM TEST_USER; -``` - -## Helpful Links - -[REVOKE](../../../../../../reference-guide/sql-syntax/REVOKE.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-select-hint.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-select-hint.md deleted file mode 100644 index 3ef4cf33..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-select-hint.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: dolphin SELECT HINT -summary: dolphin SELECT HINT -author: Guo Huan -date: 2023-05-15 ---- - -# SELECT HINT - -## Function - -Set the GUC parameters related to query optimization that are in effect within this query execution. This section deals with dolphin's syntactic enhancements to select-hint. For details on kernel-supported hints, see [Hint-based Tuning](../../../../../../performance-tuning/sql-tuning/hint-based-tuning.md). - -## Syntax - -``` -set_var(param = value) -``` - -## Parameter Description - -- **param** - - Parameter name. - - The following parameters are currently supported to take effect using the Hint setting - - Boolean type: - - enable\_bitmapscan, enable\_hashagg,enable\_hashjoin, enable\_indexscan,enable\_indexonlyscan, enable\_material,enable\_mergejoin, enable\_nestloop,enable\_index\_nestloop, enable\_seqscan,enable\_sort, enable\_tidscan,partition\_iterator\_elimination,partition\_page\_estimation,enable\_functional\_dependency,var\_eq\_const\_selectivity, - - - Integer type: - - query\_dop - - - Floating point type: - - cost\_weight\_index、default\_limit\_rows、seq\_page\_cost、random\_page\_cost、cpu\_tuple\_cost、cpu\_index\_tuple\_cost、cpu\_operator\_cost、effective\_cache\_size - - - Enumeration type: - - try_vector_engine_strategy - - - String type: - - dolphin.optimizer\_switch - -- **value** - - The value of the parameter. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-select.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-select.md deleted file mode 100644 index e9cb0834..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-select.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: dolphin SELECT -summary: dolphin SELECT -author: zhang cuiping -date: 2022-10-24 ---- - -# SELECT - -## Function - -**SELECT** retrieves data from a table or view. - -Serving as an overlaid filter for a database table, **SELECT** filters required data from the table using SQL keywords. - -## Precautions - -- Compared with the MogDB SELECT syntax, the SOUNDS LIKE syntax under the WHERE clause is added. -- The new JOIN does not contain ON/USING. The effect is the same as that of CROSS JOIN. - -## Syntax - -- Querying data - -``` -[ WITH [ RECURSIVE ] with_query [, ...] ] -SELECT [/*+ plan_hint */] [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ] -{ * | {expression [ [ AS ] output_name ]} [, ...] } -[ FROM from_item [, ...] ] -[ WHERE condition ] -[ [ START WITH condition ] CONNECT BY [NOCYCLE] condition [ ORDER SIBLINGS BY expression ] ] -[ GROUP BY grouping_element [, ...] ] -[ HAVING condition [, ...] ] -[ WINDOW {window_name AS ( window_definition )} [, ...] ] -[ { UNION | INTERSECT | EXCEPT | MINUS } [ ALL | DISTINCT ] select ] -[ ORDER BY {expression [ [ ASC | DESC | USING operator ] | nlssort_expression_clause ] [ NULLS { FIRST | LAST } ]} [, ...] ] -[ LIMIT { [offset,] count | ALL } ] -[ OFFSET start [ ROW | ROWS ] ] -[ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ] -[ {FOR { UPDATE | NO KEY UPDATE | SHARE | KEY SHARE } [ OF table_name [, ...] ] [ NOWAIT ]} [...] ]; -``` - -- The **group** clause is as follows: - - ``` - ( ) - | expression - | ( expression [, ...] ) - | rollup_clause - | CUBE ( { expression | ( expression [, ...] ) } [, ...] ) - | GROUPING SETS ( grouping_element [, ...] ) - ``` - - The rollup_clause clause is as follows: - - ``` - ROLLUP ( { expression | ( expression [, ...] ) } [, ...] ) - | { expression | ( expression [, ...] ) } WITH ROLLUP - ``` - -- JOIN syntax - -``` -[JOIN | INNER JOIN] {ON join_condition | USING ( join_column [, ...] ) } -``` - -## Parameter Description - -- WHERE clause - - 1. SOUNDS LIKE is a syntax of condition. For example, **column_name sounds like 'character';** is equivalent to the comparison result of **soundex(column_name) = soundex('character')**. It is a Boolean value. It is used to query the data that meets the conditions through soundex processing. - - ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** For details about other parameters, see [SELECT](../../../../../../reference-guide/sql-syntax/SELECT.md). - -## Examples - -–Example of the SOUNDS LIKE: homophone column query - -```sql -MogDB=# CREATE TABLE TEST(id int, name varchar); -MogDB=# INSERT INTO TEST VALUES(1, 'too'); -MogDB=# SELECT * FROM TEST WHERE name SOUNDS LIKE 'two'; - id | name -----+------ - 1 | too -(1 row) ---Use ROLLUP in the SELECT GROUP BY clause. -MogDB=# CREATE TABLESPACE t_tbspace ADD DATAFILE 'my_tablespace' ENGINE = test_engine; -CREATE TABLESPACE -MogDB=# CREATE TABLE t_with_rollup(id int, name varchar(20), area varchar(50), count int); -CREATE TABLE -MogDB=# INSERT INTO t_with_rollup values(1, 'a', 'A', 10); -INSERT 0 1 -MogDB=# INSERT INTO t_with_rollup values(2, 'b', 'B', 15); -INSERT 0 1 -MogDB=# INSERT INTO t_with_rollup values(2, 'b', 'B', 20); -INSERT 0 1 -MogDB=# INSERT INTO t_with_rollup values(3, 'c', 'C', 50); -INSERT 0 1 -MogDB=# INSERT INTO t_with_rollup values(3, 'c', 'C', 15); -INSERT 0 1 -MogDB=# SELECT name, sum(count) FROM t_with_rollup GROUP BY ROLLUP(name); - name | sum -------+----- - a | 10 - b | 35 - c | 65 - | 110 -(4 rows) - -MogDB=# SELECT name, sum(count) FROM t_with_rollup GROUP BY (name) WITH ROLLUP; - name | sum -------+----- - a | 10 - b | 35 - c | 65 - | 110 -(4 rows) -``` - -## Helpful Links - -[SELECT](../../../../../../reference-guide/sql-syntax/SELECT.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-set-charset.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-set-charset.md deleted file mode 100644 index 4934c83a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-set-charset.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: dolphin SET CHARSET -summary: dolphin SET CHARSET -author: zhang cuiping -date: 2022-10-24 ---- - -# SET CHARSET - -## Function - -Sets the character encoding type of the client. - -## Precautions - -- This statement is equivalent to **set client_encoding** in MogDB. -- Set this parameter based on the front-end service requirements. Ensure that the client code is the same as the server code to improve efficiency. -- It is compatible with all encoding types of PostgreSQL. - -## Syntax - -``` -SET {CHARACTER SET | CHARSET} {'charset_name' | DEFAULT} -``` - -## Parameter Description - -- **{CHARACTER SET | CHARSET}** - - The two are equivalent. - -- **{'charset_name' | DEFAULT}** - - charset\_name supports the character encoding types that can be set by MogDB, such as utf8 and gbk. If DEFAULT is specified, the character set is reset to the default one. - - charset\_name supports the following formats: - - 1. utf8 - 2. 'utf8' - 3. "utf8" - -## Examples - -```sql -MogDB=# show client_encoding; --[ RECORD 1 ]---+---- -client_encoding | GBK - -MogDB=# set charset gbk; -SET -db_show=# show client_encoding; --[ RECORD 1 ]---+---- -client_encoding | GBK - -MogDB=# set charset default; -SET -MogDB=# show client_encoding; --[ RECORD 1 ]---+----- -client_encoding | UTF8 - -MogDB=# set character set 'gbk'; -SET -MogDB=# show client_encoding; --[ RECORD 1 ]---+---- -client_encoding | GBK - -MogDB=# set character set default; -SET -MogDB=# show client_encoding; --[ RECORD 1 ]---+----- -client_encoding | UTF8 -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-set-password.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-set-password.md deleted file mode 100644 index 6e9b53d7..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-set-password.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: dolphin SET PASSWORD -summary: dolphin SET PASSWORD -author: zhang cuiping -date: 2022-10-24 ---- - -# SET PASSWORD - -## Function - -Change the user password. - -## Precautions - -- If no user is specified, the password of the current user is changed. -- An initial user can change the password of any user (including its own password). REPLACE does not need to be specified to verify the current password. -- A non-initial user cannot change the password of an initial user. -- The sysadmin user and users with the createrole permission can change the passwords of other users (non-initialization, non-sysadmin, and non-createrole users). REPLACE does not need to be specified to verify the current password. -- When the sysadmin user or a user with the createrole permission changes the password, REPLACE must be specified to verify the current password. - -## Syntax - -``` -SET PASSWORD [FOR user] = password_option [REPLACE 'current_auth_string'] - -password_option: { - 'auth_string' - | PASSWORD('auth_string') -} -``` - -## Parameter Description - -- **[FOR user]** - - **user** supports the following formats: - - 1. **user** (case insensitive). - 2. **'user'** (case sensitive). - 3. **“user”** (case sensitive) - 4. **'user'@'host'** (case sensitive). - 5. **current_user()/current_user**. - -- **auth_string** - - Password to be set. - -- **current_auth_string** - - Old password. - -## Examples - -```sql ---Changes the password of the specified user. -MogDB=# create user user1 with password 'abcd@123'; -CREATE ROLE -MogDB=# set password for user1 = 'abcd@124'; -ALTER ROLE - ---Changes the password of the current user. -MogDB=# set password = 'abcd@123'; -ALTER ROLE -MogDB=# set password for current_user = 'abcd@123'; -ALTER ROLE -MogDB=# set password for current_user() = 'abcd@123'; -ALTER ROLE -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-character-set.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-character-set.md deleted file mode 100644 index caf06356..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-character-set.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: dolphin SHOW CHARACTER SET -summary: dolphin SHOW CHARACTER SET -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW CHARACTER SET - -## Precautions - -N/A - -## Function - -Displays all supported character sets. - -## Syntax - -``` -SHOW {CHARACTER SET | CHARSET} [LIKE 'pattern' | WHERE expr] -``` - -## Parameter Description - -- **WHERE expr** - - Filter expressions. - -- **LIKE 'pattern'** - - The pattern regular expression matches the name of the character set. - -## Return Result Set - -| Field | Description | Note | -| ----------------- | ---------------------------- | ----------- | -| charset | character set name | | -| Description | Description of the character set | | -| default collation | Default sorting rules for character sets | The content of this field is empty | -| maxlen | The maximum number of bytes required to store a character. | | - -## Examples - -```sql -MogDB=# SHOW CHARACTER SET LIKE 'a%'; - charset | Description | default collation | maxlen ----------+-------------------+-------------------+-------- - abc | alias for WIN1258 | | 1 - alt | alias for WIN866 | | 1 -(2 rows) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-collation.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-collation.md deleted file mode 100644 index ce134671..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-collation.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: dolphin SHOW COLLATION -summary: dolphin SHOW COLLATION -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW COLLATION - -## Precautions - -N/A - -## Function - -Displays the character order of all supported servers. - -## Syntax - -``` -SHOW COLLATION [LIKE 'pattern' | WHERE expr] -``` - -## Parameter Description - -- **WHERE expr** - - Filter expressions. - -- **LIKE 'pattern'** - - The pattern regular expression matches the sorted name. - -## Returns The Result Set - -| Field | Description | Note | -| --------- | ---------------------------- | :---------- | -| collation | Sorting set names | | -| charset | Character sets associated with sorted sets | | -| id | Description of the character set | This field corresponds to the OID of the row corresponding to the row in the pg_collation table | -| default | Whether it is a sorted set corresponding to a character set | MogDB has no default sorting, and the contents of this field are null | -| compiled | Whether the sort set is compiled or not | The value of this field is Yes | -| sortlen | Memory size required when sorting character sets | The value of this field is NULL | - -## Examples - -```sql -MogDB=# SHOW COLLATION LIKE 'aa%'; - collation | charset | id | default | compiled | sortlen -------------------+---------+-------+---------+----------+---------- - aa_DJ | utf8 | 13450 | | Yes | - aa_DJ | latin1 | 13451 | | Yes | - aa_DJ.iso88591 | latin1 | 13452 | | Yes | - aa_DJ.utf8 | utf8 | 13453 | | Yes | - aa_ER | utf8 | 13454 | | Yes | - aa_ER.utf8 | utf8 | 13455 | | Yes | - aa_ER.utf8@saaho | utf8 | 13456 | | Yes | - aa_ER@saaho | utf8 | 13457 | | Yes | - aa_ET | utf8 | 13458 | | Yes | - aa_ET.utf8 | utf8 | 13459 | | Yes | -(10 rows) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-columns.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-columns.md deleted file mode 100644 index 61d984f9..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-columns.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: dolphin SHOW COLUMNS -summary: dolphin SHOW COLUMNS -author: zhang cuiping -date: 2022-10-24 ---- - -# SHOW COLUMNS - -## Function - -Views the column meta information of a specified table. - -## Precautions - -- You need to specify the schema corresponding to the temporary table for query. -- All participating columns of a composite primary key index are displayed as PRI in the Key column. -- All participating columns of the composite unique index are displayed as UNI in the Key column. -- If a column is involved in the creation of multiple indexes, the Key column is displayed based on the first index created in the column. -- The generated column is displayed in Default. -- If a table name contains schemaname or dbname and dbname is specified, only the specified dbname is matched. -- The result displays only the columns on which the current query user has the SELECT permission. - -## Syntax - -``` -SHOW [FULL] {COLUMNS | FIELDS} - {FROM | IN} tbl_name - [{FROM | IN} db_name] - [LIKE 'pattern' | WHERE expr] -``` - -## Parameter Description - -- **{COLUMNS | FIELDS}** - - The effect of using COLUMNS is equivalent to that of using FIELDS. - -- **tbl_name** - - Table name. You can specify a table name or **schema_name.table_name**. - -- **db_name** - - Database name (or schema name). This option is preferred when the database name (or schema name) is also specified in tbl_name. - -- **LIKE 'pattern'** - - The patternp matches the Field column of the displayed result. - -## Examples - -```sql ---Create a simple table: -MogDB=# CREATE SCHEMA tst_schema1; - -MogDB=# SET SEARCH_PATH TO tst_schema1; - -MogDB=# CREATE TABLE tst_t1 -MogDB-# ( -MogDB(# id int primary key, -MogDB(# name varchar(20) NOT NULL, -MogDB(# addr text COLLATE "de_DE", -MogDB(# phone text COLLATE "es_ES", -MogDB(# addr_code text -MogDB(# ); -MogDB=# COMMENT ON COLUMN tst_t1.id IS 'identity'; - ---View the column meta information of a table. -MogDB=# SHOW COLUMNS FROM tst_t1; - Field | Type | Null | Key | Default | Extra ------------+-----------------------+------+-----+---------+------- - id | integer | NO | PRI | NULL | - name | character varying(20) | NO | | NULL | - addr | text | YES | | NULL | - phone | text | YES | | NULL | - addr_code | text | YES | | NULL | - -MogDB=# show FULL COLUMNS FROM tst_t1; - Field | Type | Collation | Null | Key | Default | Extra | Privileges | Comment ------------+-----------------------+-----------+------+-----+---------+-------+-----------------------------------------+---------- - id | integer | NULL | NO | PRI | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | identity - name | character varying(20) | NULL | NO | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - addr | text | de_DE | YES | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - phone | text | es_ES | YES | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - addr_code | text | NULL | YES | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - - MogDB=# show FULL COLUMNS FROM tst_schema1.tst_t1; - Field | Type | Collation | Null | Key | Default | Extra | Privileges | Comment ------------+-----------------------+-----------+------+-----+---------+-------+-----------------------------------------+---------- - id | integer | NULL | NO | PRI | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | identity - name | character varying(20) | NULL | NO | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - addr | text | de_DE | YES | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - phone | text | es_ES | YES | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - addr_code | text | NULL | YES | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - ---Fuzzy match and filtering -MogDB=# show full columns from tst_t1 like '%addr%'; - Field | Type | Collation | Null | Key | Default | Extra | Privileges | Comment ------------+------+-----------+------+-----+---------+-------+-----------------------------------------+--------- - addr | text | de_DE | YES | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - addr_code | text | NULL | YES | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - -MogDB=# show full columns from tst_t1 where Type='text'; - Field | Type | Collation | Null | Key | Default | Extra | Privileges | Comment ------------+------+-----------+------+-----+---------+-------+-----------------------------------------+--------- - addr | text | de_DE | YES | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - phone | text | es_ES | YES | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - addr_code | text | NULL | YES | | NULL | | UPDATE,SELECT,REFERENCES,INSERT,COMMENT | - ---Display permission filtering -MogDB=# CREATE USER tst_u1 PASSWORD 'tst_u1@123'; -MogDB=# SET ROLE tst_u1 PASSWORD 'tst_u1@123'; -MogDB=> SET SEARCH_PATH TO tst_schema1; - -MogDB=> show full columns from tst_t1; - Field | Type | Collation | Null | Key | Default | Extra | Privileges | Comment --------+------+-----------+------+-----+---------+-------+------------+--------- -(0 rows) - -MogDB=# RESET ROLE; -MogDB=# GRANT SELECT (addr, phone) on tst_t1 to tst_u1; -MogDB=# SET ROLE tst_u1 PASSWORD 'tst_u1@123'; - -MogDB=> SET SEARCH_PATH TO tst_schema1; -MogDB=> show full columns from tst_t1; - Field | Type | Collation | Null | Key | Default | Extra | Privileges | Comment ------------+------+-----------+------+-----+---------+-------+------------------+--------- - addr | text | de_DE | YES | | NULL | | SELECT | - phone | text | es_ES | YES | | NULL | | SELECT | -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-database.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-database.md deleted file mode 100644 index 28c665b4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-database.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: dolphin SHOW CREATE DATABASE -summary: dolphin SHOW CREATE DATABASE -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW CREATE DATABASE - -## Function - -Displays the CREATE DATABASE statement that creates the named database. - -If that SHOW statement contains an IF NOT EXISTS clause. the output also contains such a clause.SHOW CREATE SCHEMA is a synonym for SHOW CREATE DATABASE. - -## Precautions - -Database and schema are equivalent in b-database, so when statements are assembled in MogDB, they are assembled according to create schema. - -In MogDB, the create schema supports the with blockchain clause, so the information in the system table of the schema will also be used to determine whether to splice the clause when assembling. - -## Syntax - -``` -SHOW CREATE {DATABASE | SCHEMA} [IF NOT EXISTS] db_name -``` - -## Parameter Description - -- **db_name** - - The target instance name. - -## Examples - -```sql --- Query Database Creation Statements -MogDB=# show create database test_get_database; - Database | Create Database --------------------+---------------------------------------------------- - test_get_database | CREATE SCHEMA test_get_database AUTHORIZATION omm -(1 row) -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-function.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-function.md deleted file mode 100644 index 30404a9b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-function.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: dolphin SHOW CREATE FUNCTION -summary: dolphin SHOW CREATE FUNCTION -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW CREATE FUNCTION - -## Function - -It returns the exact string that can be used to recreate the named function. A similar statement, SHOW CREATE PROCEDURE, displays information about stored functions. - -To use either statement, you must have global SELECT privileges. - -## Precautions - -sql_mode is the session value for the query. bDatabase shows here the sql_mode bound when the routine is created, MogDB shows here the session value because MogDB does not bind the routine to sql_mode when it creates the routine. - -character_set_client is the session value of the system variable for client_encoding when the routine was created. - -collation_connection is the value specified when lc_collate creates the database. - -Database Collation is the value specified by lc_collate when it created the database. - -## Syntax - -``` -SHOW CREATE FUNCTION func_name -``` - -## Parameter Description - -- **func_name** - - The function name. - -## Examples - -```sql --- Creating a function -MogDB=# CREATE FUNCTION functest_A_1(text, date) RETURNS bool LANGUAGE 'sql' - AS 'SELECT $1 = ''abcd'' AND $2 > ''2001-01-01'''; -CREATE FUNCTION --- Query function creation statemen -MogDB=# show create function functest_A_1; - Function | Create Function | sql_mode | character_set_client | collation_connection -| Database Collation ---------------+------------------------------------------------------------------+-------------------------------------+----------------------+----------------------+-------------------- - functest_a_1 | CREATE OR REPLACE FUNCTION public.functest_a_1(text, date) +| sql_mode_strict,sql_mode_full_group | UTF8 | en_US.UTF-8 -| en_US.UTF-8 - | RETURNS boolean +| | | -| - | LANGUAGE sql +| | | -| - | NOT FENCED NOT SHIPPABLE +| | | -| - | AS $function$SELECT $1 = 'abcd' AND $2 > '2001-01-01'$function$;+| | | -| - | | | | -| -(1 row) -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-procedure.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-procedure.md deleted file mode 100644 index ceb52574..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-procedure.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: dolphin SHOW CREATE PROCEDURE -summary: dolphin SHOW CREATE PROCEDURE -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW CREATE PROCEDURE - -## Function - -It returns the exact string that can be used to recreate the named procedure. A similar statement, SHOW CREATE FUNCTION, displays information about the stored function. -To use either statement, you must have global SELECT privileges. - -## Precautions - -sql_mode is the session value for the query. bDatabase shows here the sql_mode bound when the routine is created, MogDB shows here the session value because MogDB does not bind the routine to sql_mode when it creates the routine. - -character_set_client is the session value of the system variable for client_encoding when the routine was created. - -collation_connection is the value specified when lc_collate creates the database. - -Database Collation is the value specified by lc_collate when it created the database. - -## Syntax - -``` -SHOW CREATE PROCEDURE proc_name -``` - -## Parameter Description - -- **proc_name** - - Stored procedure name. - -## Examples - -```sql --- Create a stored procedure. -MogDB=# create procedure test_procedure_test(int,int) -MogDB-# SHIPPABLE IMMUTABLE -MogDB-# as -MogDB$# begin -MogDB$# select $1 + $2; -MogDB$# end; -MogDB$# / -CREATE PROCEDURE --- Query stored procedure creation statements. -MogDB=# show create procedure test_procedure_test; - Procedure | Create Procedure | sql_mode | character_set_client | collation_connection | Database Collation ----------------------+-----------------------------------------------------------------+-------------------------------------+----------------------+----------------------+-------------------- - test_procedure_test | CREATE OR REPLACE PROCEDURE public.test_procedure_test(int,int)+| sql_mode_strict,sql_mode_full_group | UTF8 | en_US.UTF-8 - | en_US.UTF-8 - | IMMUTABLE SHIPPABLE +| | | - | - | AS DECLARE +| | | - | - | begin +| | | - | - | select $1 + $2; +| | | - | - | end; +| | | - | - | / +| | | - | - | | | | - | -(1 row) -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-table.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-table.md deleted file mode 100644 index 2b3e8271..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-table.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: dolphin SHOW CREATE TABLE -summary: dolphin SHOW CREATE TABLE -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW CREATE TABLE - -## Function - -Displays the CREATE TABLE statement to create the named table. This syntax can also be used to query view creation statements. - -## Precautions - -- This syntax does not support querying temporary tables. -- The field character set and character order will be inherited from the table. the character set and character order of both the field and the table will be displayed in full in SHOW CREATE TABLE. If the default character set or character order of the table does not exist, when b_format_behavior_compat_options = 'default_collation', the character set and character order will be inherited from the current database's character set and its corresponding default character order, and no character set and character order will be inherited from the table when b_format_behavior_compat_options is not set. When b_format_behavior_compat_ is not set, no character set or character order is displayed. - -## Syntax - -``` -show create table tbl_name; -``` - -## Parameter Description - -- **tbl_name** - - Table name. - -## Examples - -```sql -MogDB=# CREATE TABLE t1 (c1 INT PRIMARY KEY); -NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "t1_pkey" for table "t1" -CREATE TABLE -MogDB=# show create table t1; - Table | Create Table --------+--------------------------------------------------------- - t1 | SET search_path = public; + - | CREATE TABLE t1 ( + - | c1 integer NOT NULL + - | ) + - | WITH (orientation=row, compression=no); + - | ALTER TABLE t1 ADD CONSTRAINT t1_pkey PRIMARY KEY (c1); -(1 row) -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-trigger.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-trigger.md deleted file mode 100644 index aeee90aa..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-trigger.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: dolphin SHOW CREATE TRIGGER -summary: dolphin SHOW CREATE TRIGGER -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW CREATE TRIGGER - -## Function - -It returns the exact string that can be used to recreate the named trigger. - -## Precautions - -sql_mode is the session value at the time of querying. B Database shows here the sql_mode bound at the time of creating the routine, and MogDB shows here the session value because MogDB does not bind the routine to sql_mode at the time of creating the routine. - -character_set_client is client_encoding The session value of the system variable when the routine was created . - -collation_connection is the value specified when lc_collate created the database. - -Database Collation is the value specified when lc_collate created the database. - -## Syntax - -``` -SHOW CREATE TRIGGER trigger_name -``` - -## Parameter Description - -- **trigger_name** - - The trigger name. - -## Examples - -```sql --- Query trigger creation statement. -MogDB=# show create trigger before_ins_stmt_trig; - Trigger | sql_mode | SQL Original Statement - | character_set_client | collation_connection | Database Collation -----------------------+-------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+----------------------+----------------------+-------------------- - before_ins_stmt_trig | sql_mode_strict,sql_mode_full_group | CREATE TRIGGER before_ins_stmt_trig BEFORE INSERT ON main_table FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('before_ins_stmt') | UTF8 | en_US.UTF-8 | en_US.UTF-8 -(1 row) -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-view.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-view.md deleted file mode 100644 index f0741ae2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-create-view.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: dolphin SHOW CREATE VIEW -summary: dolphin SHOW CREATE VIEW -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW CREATE VIEW - -## Function - -It returns the exact string that can be used to recreate the named view. - -## Precautions - -character_set_client is the session value of the system variable when client_encoding created the routine. - -collation_connection is the value specified when lc_collate created the database. - -## Syntax - -``` -SHOW CREATE VIEW view_name -``` - -## Parameter Description - -- **view_name** - - The view name. - -## Examples - -```sql --- Create a view -MogDB=# create view tt19v as -MogDB-# select 'foo'::text = any(array['abc','def','foo']::text[]) c1, -MogDB-# 'foo'::text = any((select array['abc','def','foo']::text[])::text[]) c2; -CREATE VIEW --- Query view creation statement. -MogDB=# show create view tt19v; - View | Create View - | character_set_client | collation_connection --------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------+---------------------- - tt19v | CREATE OR REPLACE VIEW public.tt19v AS - +| UTF8 | en_US.UTF-8 - | SELECT ('foo'::text = ANY (ARRAY['abc'::text, 'def'::text, 'foo'::text])) AS c1, ('foo'::text = ANY ((SELECT ARRAY['abc'::text, 'def'::text, 'foo'::text] AS "array")::text[])) AS c2; | | -(1 row) -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-databases.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-databases.md deleted file mode 100644 index 807c0aa5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-databases.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: dolphin SHOW DATABASES -summary: dolphin SHOW DATABASES -author: zhang cuiping -date: 2022-10-24 ---- - -# SHOW DATABASES - -## Function - -Lists all schemas or queries schemas by condition. - -## Precautions - -- The B-compatible SHOW DATABASES command is used to query databases, and the MogDB SHOW DATABASES command is used to query schemas. -- Schemas are displayed by name. - -## Syntax - -``` -SHOW {DATABASES | SCHEMAS} [LIKE 'pattern' | WHERE expr] -``` - -## Parameter Description - -- **{DATABASES | SCHEMAS}** - - ``` - The two are equivalent. - ``` - -- **[LIKE 'pattern' | WHERE expr]** - - ``` - The **pattern** supports the LIKE syntax, which can be the full name or part of schema\_name for fuzzy query. The expr supports any expression. The common usage is **show database where database = 'name'**. - ``` - -## Examples - -```sql ---View all schemas in the current database. -MogDB=# create schema a1; -CREATE SCHEMA - -MogDB=# show databases; - Database --------------------- - a1 - blockchain - cstore - db4ai - dbe_perf - dbe_pldebugger - dbe_pldeveloper - information_schema - pg_catalog - pg_toast - pkg_service - public - snapshot - sqladvisor -(14 rows) - ---Query schemas by condition. -MogDB=# create schema abb1; -CREATE SCHEMA -MogDB=# create schema abb2; -CREATE SCHEMA -MogDB=# create schema abb3; -CREATE SCHEMA -MogDB=# show databases like '%bb%'; - Database ----------- - abb1 - abb2 - abb3 -(3 rows) - -MogDB=# show databases like 'a%'; - Database ----------- - a1 - abb1 - abb2 - abb3 -(4 rows) - -MogDB=# show schemas where database = 'a1'; - Database ----------- - a1 -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-function-status.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-function-status.md deleted file mode 100644 index 9a8b0d67..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-function-status.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: dolphin SHOW FUNCTION STATUS -summary: dolphin SHOW FUNCTION STATUS -author: zhang cuiping -date: 2022-10-24 ---- - -# SHOW FUNCTION STATUS - -## Precautions - -N/A - -## Function - -Displays information about storage functions. - -## Syntax - -``` -SHOW FUNCTION STATUS [LIKE 'pattern' | WHERE expr] -``` - -## Parameter Description - -- **WHERE expr** - - Filters expressions. - -- **LIKE 'pattern'** - - The pattern regular expression matches the function name. - -## Return Result Set - -| Column | Description | Remarks | -| :------------------- | :------------------------------------------- | :------------------- | -| Db | Schema name. | Displayed by schema. | -| Name | Function name. | | -| TYPE | Type | FUNCTION/PROCEDURE | -| Deinfer | User | | -| Modified | Modification time. | | -| Created | Creation time. | | -| Security_type | Security type. | | -| Comment | Comments. | | -| character_set_client | Character set of the client during creation. | The value is empty. | -| collation_connection | Sorting rule of the client during creation. | The value is empty. | -| Database Collation | Database collocation. | | - -## Examples - -```sql --- Create a function. -MogDB=# CREATE FUNCTION func_add_sql(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -CREATE FUNCTION -MogDB=# show function status like 'func_add_s%'; - Db | Name | Type | Definer | Modified | Created | Security_type | Comment | character_set_client | collation_connection | Database Collation ---------+--------------+----------+---------+-------------------------------+-------------------------------+---------------+---------+----------------------+----------------------+-------------------- - public | func_add_sql | FUNCTION | wyc | 2022-09-24 14:42:29.427382+08 | 2022-09-24 14:42:29.427382+08 | INVOKER | | | | en_US.UTF-8 -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-grants.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-grants.md deleted file mode 100644 index f9f6356a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-grants.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: dolphin SHOW GRANTS -summary: dolphin SHOW GRANTS -author: zhang cuiping -date: 2022-10-24 ---- - -# SHOW GRANTS - -## Function - -Displays user permission information in the MogDB. - -## Precautions - -- If no user is specified, the permission information of the current user is displayed. - -## Syntax - -``` -SHOW GRANTS [FOR user] -``` - -## Parameter Description - -- **user** - - Username. If this parameter is not specified, the permission information of the user who executes the statement is displayed. - -## Examples - -```sql -MogDB=# show grants for test; - Grants -------------------------------------------------------------------------------------------- - GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLE test TO test - GRANT SELECT ON TABLE test TO test - ALTER ROLE test WITH LOGIN -(3 rows) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-index.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-index.md deleted file mode 100644 index acb7c895..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-index.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: dolphin SHOW INDEX -summary: dolphin SHOW INDEX -author: zhang cuiping -date: 2022-10-24 ---- - -# SHOW INDEX - -## Function - -Displays the index information of a table. - -## Precautions - -- If schema_name is not specified, tables in the current schema are queried. -- If the specified table is in schema_name.table_name format and schema_name is specified, the schema of schema_name is used. - -## Syntax - -``` -SHOW { INDEX | INDEXES | KEYS } - { FROM | IN } table_name - [{FROM | IN} schema_name ] - [ WHERE expr ] -``` - -## Parameter Description - -- **table_name** - - ``` - Table name. You can specify a table name or **schema\_name.table\_name**. - ``` - -- **schema_name** - - ``` - Schema name. This parameter is optional. If this parameter is not specified, the current schema is queried. - ``` - -## Output Column Description - -| Column | Description | -| :------------ | :----------------------------------------------------------- | -| Table | Name of the table to which the index belongs | -| Non_unique | Whether the index is a non-unique index | -| Key_name | Index name | -| Seq_in_index | Sequence number of the index column in the index | -| Column_name | Column name of the index column | -| Collation | The value can be **A** (ascending order by default), **D** (descending order), or **NULL** (indexes cannot be sorted). | -| Cardinality | Calculated based on pg_statistic.stadistinct and pg_class.reltuples:
stadistinct > 0: stadistinct
stadistinct = 0: NULL
stadistinct < 0: reltuples *stadistinct* -1 | -| Sub_part | Index prefix If the column is only partially indexed, the value is the number of index characters. If the entire column is indexed, the value is NULL. Currently, the prefix index is not supported. The value is NULL. | -| Packed | How to pack the key value. Specify pack_keys when creating a table. Otherwise, NULL is returned. Not supported currently. The value is NULL. | -| Null | If the value may contain NULL, the value is **YES**. Otherwise, the value is **''**. | -| Index_type | Index method, such as Btree and HASH. | -| Comment | If the value of **indisusable** in the pg_index table is **true**, **disabled** is displayed. If the value of **indisusable** in the pg_index table is **false**, **''** is displayed. | -| Index_comment | Comment specified by COMMENT when an index is created | - -## Examples - -```sql ---Create an index and a table. -MogDB=# CREATE SCHEMA tst_schema; -MogDB=# SET SEARCH_PATH TO tst_schema; - -MogDB=# CREATE TABLE tst_t1 -MogDB-# ( -MogDB(# id int primary key, -MogDB(# name varchar(20) NOT NULL -MogDB(# ); -MogDB=# CREATE INDEX tst_t1_name_ind on tst_t1(name); - ---View the index of a table. -MogDB=# show index from tst_t1 ; - table | non_unique | key_name | seq_in_index | column_name | collation | cardinality | sub_part | packed | null | index_type | comment | index_comment ---------+------------+-----------------+--------------+-------------+-----------+-------------+----------+--------+------+------------+---------+--------------- - tst_t1 | t | tst_t1_name_ind | 1 | name | A | | | | | btree | | - tst_t1 | f | tst_t1_pkey | 1 | id | A | | | | | btree | | -(2 rows) -``` - -## Helpful Links - -N/A diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-master-status.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-master-status.md deleted file mode 100644 index b062b7e8..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-master-status.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: dolphin SHOW MASTER STATUS -summary: dolphin SHOW MASTER STATUS -author: zhang cuiping -date: 2022-10-24 ---- - -# SHOW MASTER STATUS - -## Function - -Views the progress of WAL (Xlog). - -## Precautions - -- This statement can also be executed in a non-primary database. -- When the command is executed on the primary database, the results of Xlog_Lsn and pg_current_xlog_location are the same. When the command is executed on a non-primary database, the results of Xlog_Lsn and pg_last_xlog_replay_location are the same. -- The primary database uses this statement to query the real-time progress of Xlog writing. -- The standby database uses this statement to query the real-time progress of the current Xlog replay. - -## Syntax - -``` -SHOW MASTER STATUS -``` - -## Parameter Description - -- **Xlog_File_Name** - - ``` - Name of the Xlog file that is being processed. - ``` - -- **Xlog_File_Offset** - - ``` - Offset position of the Xlog file that is being processed. - ``` - -- **Xlog_Lsn** - - ``` - LSN of the current Xlog. - ``` - -## Examples - -```sql -MogDB=# show master status; - Xlog_File_Name | Xlog_File_Offset | Xlog_Lsn ---------------------------+------------------+------------ - 000000010000000000000010 | 7142672 | 0/106CFD10 -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-plugins.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-plugins.md deleted file mode 100644 index 531ac4f4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-plugins.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: dolphin SHOW PLUGINS -summary: dolphin SHOW PLUGINS -author: zhang cuiping -date: 2022-10-24 ---- - -# SHOW PLUGINS - -## Function - -View the plug-in list in the current database. - -## Precautions - -N/A - -## Syntax - -``` -SHOW PLUGINS -``` - -## Parameter Description - -N/A - -## Examples - -```sql ---View the plug-in list. -MogDB=# SHOW PLUGINS; - Name | Status | Type | Library | License | Comment ------------------+----------+------+---------+---------+---------------------------------------------------- - roach_api_stub | DISABLED | | NULL | | roach api stub - file_fdw | ACTIVE | | NULL | | foreign-data wrapper for flat file access - security_plugin | ACTIVE | | NULL | | provides security functionality - hdfs_fdw | ACTIVE | | NULL | | foreign-data wrapper for flat file access - plpgsql | ACTIVE | | NULL | | PL/pgSQL procedural language - dolphin | ACTIVE | | NULL | | sql engine - dist_fdw | ACTIVE | | NULL | | foreign-data wrapper for distfs access - postgres_fdw | DISABLED | | NULL | | foreign-data wrapper for remote PostgreSQL servers - hstore | ACTIVE | | NULL | | data type for storing sets of (key, value) pairs - log_fdw | ACTIVE | | NULL | | Foreign Data Wrapper for accessing logging data - ---Update the plug-in status. -MogDB=# drop extension hstore; -MogDB=# SHOW PLUGINS; - Name | Status | Type | Library | License | Comment ------------------+----------+------+---------+---------+---------------------------------------------------- - roach_api_stub | DISABLED | | NULL | | roach api stub - file_fdw | ACTIVE | | NULL | | foreign-data wrapper for flat file access - security_plugin | ACTIVE | | NULL | | provides security functionality - hdfs_fdw | ACTIVE | | NULL | | foreign-data wrapper for flat file access - plpgsql | ACTIVE | | NULL | | PL/pgSQL procedural language - dolphin | ACTIVE | | NULL | | sql engine - dist_fdw | ACTIVE | | NULL | | foreign-data wrapper for distfs access - postgres_fdw | DISABLED | | NULL | | foreign-data wrapper for remote PostgreSQL servers - hstore | DISABLED | | NULL | | data type for storing sets of (key, value) pairs - log_fdw | ACTIVE | | NULL | | Foreign Data Wrapper for accessing logging data - -MogDB=# CREATE extension hstore; -MogDB=# show plugins; - Name | Status | Type | Library | License | Comment ------------------+----------+------+---------+---------+---------------------------------------------------- - roach_api_stub | DISABLED | | NULL | | roach api stub - file_fdw | ACTIVE | | NULL | | foreign-data wrapper for flat file access - security_plugin | ACTIVE | | NULL | | provides security functionality - hdfs_fdw | ACTIVE | | NULL | | foreign-data wrapper for flat file access - plpgsql | ACTIVE | | NULL | | PL/pgSQL procedural language - dolphin | ACTIVE | | NULL | | sql engine - dist_fdw | ACTIVE | | NULL | | foreign-data wrapper for distfs access - postgres_fdw | DISABLED | | NULL | | foreign-data wrapper for remote PostgreSQL servers - hstore | ACTIVE | | NULL | | data type for storing sets of (key, value) pairs - log_fdw | ACTIVE | | NULL | | Foreign Data Wrapper for accessing logging data -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-privileges.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-privileges.md deleted file mode 100644 index 1113e597..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-privileges.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: dolphin SHOW PRIVILEGES -summary: dolphin SHOW PRIVILEGES -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW PRIVILEGES - -## Function - -View a list of permission information in the current databas - -## Precautions - -N/A - -## Syntax - -``` -SHOW PRIVILEGES -``` - -## Parameter Description - -N/A - -## Examples - -```sql --- Displays a list of permission information in the current database -MogDB=# show privileges; - Privileges | Context | Comment -----------------------+------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------- - Alter | Large object,Sequence,Database,Foreign Server,Function,Node group,Schema,Tablespace,Type,Directory,Package | To alter the 'objects' - Alter any index | Index | To alter any index - Alter any sequence | Sequence | To alter any sequence - Alter any table | Table | To alter any table - Alter any trigger | Trigger | To alter any trigger - Alter any type | Type | To alter any type - Comment | Table | To comment on table - Compute | Node group | To compute on node group - Connect | Database | To connect database - Create | Database,Schema,Tablespace,Node group | To create database,schema,tablespace,node group - Create any function | Function | To create any function - Create any index | Index | To create any index - Create any package | Package | To create any package - Create any sequence | Sequence | To create any sequence - Create any synonym | Synonym | To create any synonym - Create any table | Table | To create any table - Create any trigger | Trigger | To create any trigger - Create any type | Type | To create any type - Delete | Table | To delete table - Delete any table | Table | To delete any table - Drop any sequence | Sequence | To drop any sequence - Drop any synonym | Synonym | To drop any synonym - Drop any table | Table | To drop any table - Drop any trigger | Trigger | To drop any trigger - Drop any type | Type | To drop any type - Execute | Function,Procedure,Package | To execute function, procedure,Package - Execute any function | Function | To execute any function - Execute any package | Package | To execute any package - Index | Table | To create index on table - Insert | Table | To insert into table - Insert any table | Table | To insert any table - References | Table | To have references on table - Select | Large object,Sequence,Table | To select on large object,sequence and table - Select any sequence | Sequence | To select any sequence - Select any table | Table | To select on any table - Temporary | Database | To create temporary table in database - Temp | Database | To create temporary table in database - Truncate | Table | To truncate table - Update | Large object,Sequence,Table | To update large object,Sequence,Table - Update any table | Table | To update any table - Usage | Domain,Foreign data wrapper,Foreign server,Language,Schema,Sequence,Type | To use domain,fdw,foreign server,language,schema,sequence and type - Vacuum | Table | To vacuum table -(42 rows) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-procedure-status.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-procedure-status.md deleted file mode 100644 index 7f5e09e0..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-procedure-status.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: dolphin SHOW PROCEDURE STATUS -summary: dolphin SHOW PROCEDURE STATUS -author: zhang cuiping -date: 2022-10-24 ---- - -# SHOW PROCEDURE STATUS - -## Precautions - -N/A - -## Function - -Displays information about stored procedures. - -## Syntax - -``` -SHOW PROCEDURE STATUS [LIKE 'pattern' | WHERE expr] -``` - -## Parameter Description - -For details, see [SHOW FUNCTION STATUS](./dolphin-show-function-status.md). - -## Display Content - -For details, see [SHOW FUNCTION STATUS](./dolphin-show-function-status.md). - -## Examples - -```sql -MogDB=# --Create a stored procedure. -MogDB=# create or replace procedure proc1() as declare genre_rec record; -- Declare record type. -MogDB$# begin -MogDB$# for genre_rec in (select e1.ename from public.emp1 e1 join public.emp1mot e1m on e1.mgr = e1m.mgr) -MogDB$# loop -MogDB$# raise notice ' %', genre_rec."ename"; -- Printing. -MogDB$# end loop; -MogDB$# end; -MogDB$# / -CREATE PROCEDURE - MogDB=# --View information. -MogDB=# show procedure status like 'proc%'; - Db | Name | Type | Definer | Modified | Created | Security_type | Comment | character_set_client | collation_connection |Database Collation ---------+-------+-----------+---------+-------------------------------+-------------------------------+---------------+---------+----------------------+----------------------+-------------------- - public | proc1 | PROCEDURE | wyc | 2022-09-24 14:46:40.868293+08 | 2022-09-24 14:46:40.868293+08 | INVOKER | | | |en_US.UTF-8 -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-processlist.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-processlist.md deleted file mode 100644 index 73f1518b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-processlist.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: dolphin SHOW PROCESSLIST -summary: dolphin SHOW PROCESSLIST -author: zhang cuiping -date: 2022-10-24 ---- - -# SHOW PROCESSLIST - -## Function - -Queries information about the current external connection (or internal thread). - -## Precautions - -- The Id column corresponds to sessionid in the pg_stat_activity view. -- The Info column records the last SQL statement executed by the connection, which is different from that in B-compatible databases (B-compatible databases display the SQL statement that is being executed). However, you can check whether the SQL statement is being executed based on the State column. If the State column is active, the SQL statement corresponding to the Info column is being executed. - -## Syntax - -``` -SHOW [FULL] PROCESSLIST -``` - -## Parameter Description - -- **FULL** - - ``` - Do not use the FULL option. The Info column displays only the SQL statement whose length does not exceed 100. - - If the FULL option is used, the Info column can display SQL statements whose length does not exceed 1024 characters. If the length of an SQL statement exceeds 1024 characters, the part beyond 1024 characters will be truncated. - ``` - -## Examples - -```sql ---Method 1 -MogDB=# show processlist; - Id | Pid | QueryId | UniqueSqlId | User | Host | db | Command | - BackendStart | XactStart | Time | State | Info ------------------+-----------------+-------------------+-------------+-----------+------+----------+------------------------+--- -----------------------------+-------------------------------+--------+--------+---------------------------------------- - 139653370304256 | 139653370304256 | 0 | 0 | MogDB | | postgres | ApplyLauncher | 20 -22-06-21 16:46:19.656076+08 | | | | - 139653319255808 | 139653319255808 | 0 | 0 | MogDB | | postgres | Asp | 20 -22-06-21 16:46:19.728521+08 | | 1 | active | - 139653336483584 | 139653336483584 | 0 | 0 | MogDB | | postgres | PercentileJob | 20 -22-06-21 16:46:19.728527+08 | | 8 | active | - 139653302175488 | 139653302175488 | 0 | 0 | MogDB | | postgres | statement flush thread | 20 -22-06-21 16:46:19.728558+08 | | 508507 | idle | - 139653198239488 | 139653198239488 | 0 | 0 | MogDB | | postgres | WorkloadMonitor | 20 -22-06-21 16:46:19.750133+08 | | | | - 139653181298432 | 139653181298432 | 0 | 0 | MogDB | | postgres | WLMArbiter | 20 -22-06-21 16:46:19.750976+08 | | | | - 139653215110912 | 139653215110912 | 0 | 0 | MogDB | | postgres | workload | 20 -22-06-21 16:46:19.754504+08 | 2022-06-21 16:46:19.769585+08 | 508507 | active | WLM fetch collect info from data nodes - 139653421840128 | 139653421840128 | 0 | 0 | MogDB | | postgres | JobScheduler | 20 -22-06-27 10:00:54.754007+08 | | 0 | active | - 139653044328192 | 139653044328192 | 48976645947655327 | 1772643515 | MogDB | -1 | dolphin | gsql | 20 -22-06-27 14:00:53.163338+08 | 2022-06-27 14:01:26.794658+08 | 0 | active | show processlist; - 139653027546880 | 139653027546880 | 48976645947655326 | 1775585557 | MogDB | -1 | postgres | gsql | 20 -22-06-27 14:01:03.969962+08 | 2022-06-27 14:01:19.967521+08 | 7 | active | select pg_sleep(100); -(10 rows) - ---Method 2 -MogDB=# show full processlist; - Id | Pid | QueryId | UniqueSqlId | User | Host | db | Command | - BackendStart | XactStart | Time | State | Info ------------------+-----------------+-------------------+-------------+-----------+------+----------+------------------------+--- -----------------------------+-------------------------------+--------+--------+---------------------------------------- - 139653370304256 | 139653370304256 | 0 | 0 | MogDB | | postgres | ApplyLauncher | 20 -22-06-21 16:46:19.656076+08 | | | | - 139653319255808 | 139653319255808 | 0 | 0 | MogDB | | postgres | Asp | 20 -22-06-21 16:46:19.728521+08 | | 1 | active | - 139653336483584 | 139653336483584 | 0 | 0 | MogDB | | postgres | PercentileJob | 20 -22-06-21 16:46:19.728527+08 | | 8 | active | - 139653302175488 | 139653302175488 | 0 | 0 | MogDB | | postgres | statement flush thread | 20 -22-06-21 16:46:19.728558+08 | | 508507 | idle | - 139653198239488 | 139653198239488 | 0 | 0 | MogDB | | postgres | WorkloadMonitor | 20 -22-06-21 16:46:19.750133+08 | | | | - 139653181298432 | 139653181298432 | 0 | 0 | MogDB | | postgres | WLMArbiter | 20 -22-06-21 16:46:19.750976+08 | | | | - 139653215110912 | 139653215110912 | 0 | 0 | MogDB | | postgres | workload | 20 -22-06-21 16:46:19.754504+08 | 2022-06-21 16:46:19.769585+08 | 508507 | active | WLM fetch collect info from data nodes - 139653421840128 | 139653421840128 | 0 | 0 | MogDB | | postgres | JobScheduler | 20 -22-06-27 10:00:54.754007+08 | | 0 | active | - 139653044328192 | 139653044328192 | 48976645947655327 | 1772643515 | MogDB | -1 | dolphin | gsql | 20 -22-06-27 14:00:53.163338+08 | 2022-06-27 14:01:26.794658+08 | 0 | active | show processlist; - 139653027546880 | 139653027546880 | 48976645947655326 | 1775585557 | MogDB | -1 | postgres | gsql | 20 -22-06-27 14:01:03.969962+08 | 2022-06-27 14:01:19.967521+08 | 7 | active | select pg_sleep(100); -(10 rows) -``` - -## Helpful Links - -N/A \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-slave-hosts.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-slave-hosts.md deleted file mode 100644 index 41d40aa5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-slave-hosts.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: dolphin SHOW SLAVE HOSTS -summary: dolphin SHOW SLAVE HOSTS -author: zhang cuiping -date: 2022-10-24 ---- - -# SHOW SLAVE HOSTS - -## Function - -Views WAL (Xlog) synchronization status information, such as the locations where the sender sends logs and where the receiver receives logs. - -## Precautions - -- This command is valid in the primary database. -- The displayed result is the same as that in the **select \* from pg_stat_replication**. - -## Syntax - -``` -SHOW SLAVE HOSTS -``` - -## Parameter Description - -- **pid** - - ``` - PID of the thread. - ``` - -- **usesysid** - - ``` - User system ID. - ``` - -- **usename** - - ``` - Username. - ``` - -- **application_name** - - ``` - Program name. - ``` - -- **client_addr** - - ``` - Client address. - ``` - -- **client_port** - - ``` - Port of the client. - ``` - -- **backend_start** - - ``` - Start time of the program. - ``` - -- **state** - - ``` - Log replication state. - Catch-up state - Consistent streaming state - ``` - -- **sender_sent_location** - - ``` - Location where the sender sends logs. - ``` - -- **receiver_write_location** - - ``` - Location where the receiver writes logs. - ``` - -- **receiver_flush_location** - - ``` - Location where the receiver flushes logs. - ``` - -- **receiver_replay_location** - - ``` - Location where the receiver replays logs. - ``` - -- **sync_priority** - - ``` - Priority of synchronous duplication (**0** indicates asynchronization.) - ``` - -- **sync_state** - - ``` - Synchronization state. - Asynchronous replication - Synchronous replication - Potential synchronization - ``` - -## Examples - -```sql -MogDB=# show slave hosts; --[ RECORD 1 ]------------+---------------------------------- -pid | 140395615176448 -usesysid | 10 -usename | opengauss -application_name | WalSender to Standby[walreceiver] -client_addr | 127.0.0.1 -client_hostname | -client_port | 43174 -backend_start | 2022-08-23 18:41:12.398717+08 -state | Streaming -sender_sent_location | 0/1098BB08 -receiver_write_location | 0/1098BB08 -receiver_flush_location | 0/1098BB08 -receiver_replay_location | 0/1098BB08 -sync_priority | 1 -sync_state | Sync -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-status.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-status.md deleted file mode 100644 index add491f6..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-status.md +++ /dev/null @@ -1,499 +0,0 @@ ---- -title: dolphin SHOW STATUS -summary: dolphin SHOW STATUS -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW STATUS - -## Function - -SHOW STATUS displays data for all values of the current statistical status of the system. This statement does not require any privileges. It requires only the ability to connect to the server. - -## Precautions - -N/A - -## Syntax - -```sql -SHOW [GLOBAL | SESSION] STATUS - [LIKE 'pattern' | WHERE expr] -``` - -## Examples - -```sql -MogDB=# show status; - Variable_name | Value ---------------------------------------+------------------------------- - apply_counter | 0 - apply_total_dur | 0 - avgiotim | 11 - bg_commit_counter | 14 - bg_resp_avg | 2380520 - bg_resp_max | 7032370 - bg_resp_min | 160 - bg_resp_total | 33327281 - bg_rollback_counter | 0 - blk_read_time | 0 - blks_hit | 475019 - blks_read | 1296 - blk_write_time | 0 - buffers_alloc | 0 - buffers_backend | 0 - buffers_backend_fsync | 0 - buffers_checkpoint | 0 - buffers_clean | 0 - checkpoints_req | 4 - checkpoints_timed | 27 - checkpoint_sync_time | 6376 - checkpoint_write_time | 5 - ckpt_clog_flush_num | 0 - ckpt_csnlog_flush_num | 0 - ckpt_multixact_flush_num | 0 - ckpt_predicate_flush_num | 0 - ckpt_redo_point | 0/28DDB58 - ckpt_twophase_flush_num | 0 - commit_counter | 5 - confl_bufferpin | 0 - confl_deadlock | 0 - conflicts | 0 - confl_lock | 0 - confl_snapshot | 0 - confl_tablespace | 0 - curr_dwn | 0 - current_xlog_insert_lsn | 0/28DDF28 - curr_start_page | 1661 - curr_time | 1 - datid | 16384 - datname | tt - dcl_count | 0 - ddl_count | 0 - deadlocks | 0 - delete_count | 0 - dml_count | 8 - file_id | 0 - file_reset_num | 0 - file_trunc_num | 0 - global_instance_time_count | 10 - high_threshold_pages | 0 - high_threshold_writes | 0 - insert_count | 0 - last_replayed_read_ptr | 0 - local_max_ptr | 0 - locks_count | 43 - login_counter | 3 - logout_counter | 2 - low_threshold_pages | 17 - low_threshold_writes | 2 - lstiotim | 10 - maxiowtm | 32 - maxwritten_clean | 0 - miniotim | 3 - min_recovery_point | 0 - node_name | single_node1 - numbackends | 1 - os_runtime_count | 19 - os_threads_count | 23 - p80 | 0 - p95 | 0 - pgwr_actual_flush_total_num | 7 - pgwr_last_flush_num | 7 - phyblkrd | 501 - phyblkwrt | 7 - phyblkwrt | 6 - phyrds | 501 - phywrts | 6 - phywrts | 7 - primary_flush_ptr | 42852136 - process_pending_counter | 0 - process_pending_total_dur | 0 - queue_head_page_rec_lsn | 0/28DDB58 - queue_rec_lsn | 0/28DDB58 - read_data_io_counter | 0 - read_data_io_total_dur | 0 - read_ptr | 42851160 - read_xlog_io_counter | 2 - read_xlog_io_total_dur | 471 - recovery_done_ptr | 0 - redo_done_time | 0 - redo_start_ptr | 42851160 - redo_start_time | 732333220674305 - remain_dirty_page_num | 3 - resp_avg | 8090 - resp_max | 35491 - resp_min | 140 - resp_total | 40449 - rollback_counter | 0 - select_count | 5 - single_node1-backend_used_memory | 1 - single_node1-cstore_used_memory | 0 - single_node1-dynamic_peak_memory | 562 - single_node1-dynamic_peak_shrctx | 180 - single_node1-dynamic_used_memory | 561 - single_node1-dynamic_used_shrctx | 180 - single_node1-gpu_dynamic_peak_memory | 0 - single_node1-gpu_dynamic_used_memory | 0 - single_node1-gpu_max_dynamic_memory | 0 - single_node1-max_backend_memory | 348 - single_node1-max_cstore_memory | 512 - single_node1-max_dynamic_memory | 8142 - single_node1-max_process_memory | 12288 - single_node1-max_sctpcomm_memory | 0 - single_node1-max_shared_memory | 3285 - single_node1-other_used_memory | 0 - single_node1-pooler_conn_memory | 0 - single_node1-pooler_freeconn_memory | 0 - single_node1-process_used_memory | 800 - single_node1-sctpcomm_peak_memory | 0 - single_node1-sctpcomm_used_memory | 0 - single_node1-shared_used_memory | 215 - single_node1-storage_compress_memory | 0 - single_node1-udf_reserved_memory | 0 - speed | 0 - stats_reset | 2023-03-16 17:34:43.424584+08 - stats_reset | 2023-03-16 17:34:25.277803+08 - summary_file_iostat_count | 62 - temp_bytes | 0 - temp_files | 0 - total_pages | 17 - total_writes | 2 - tup_deleted | 61 - tup_fetched | 64396 - tup_inserted | 9906 - tup_returned | 55952 - tup_updated | 305 - update_count | 0 - user_id | 10 - user_name | hlv - wait_events_count | 401 - worker_info | no redo worker - workload | default_pool - write_data_io_counter | 0 - write_data_io_total_dur | 0 - writetim | 75 - xact_commit | 1235 - xact_rollback | 28 -(148 rows) - -MogDB=# show global status; - Variable_name | Value ---------------------------------------+------------------------------- - apply_counter | 0 - apply_total_dur | 0 - avgiotim | 10 - bg_commit_counter | 168 - bg_resp_avg | 53200083 - bg_resp_max | 122185319 - bg_resp_min | 79 - bg_resp_total | 8937613869 - bg_rollback_counter | 2 - blk_read_time | 0 - blks_hit | 504050 - blks_read | 1444 - blk_write_time | 0 - buffers_alloc | 0 - buffers_backend | 0 - buffers_backend_fsync | 0 - buffers_checkpoint | 0 - buffers_clean | 0 - checkpoints_req | 4 - checkpoints_timed | 29 - checkpoint_sync_time | 6794 - checkpoint_write_time | 5 - ckpt_clog_flush_num | 1 - ckpt_csnlog_flush_num | 0 - ckpt_multixact_flush_num | 1 - ckpt_predicate_flush_num | 0 - ckpt_redo_point | 0/28DE060 - ckpt_twophase_flush_num | 0 - commit_counter | 10 - confl_bufferpin | 0 - confl_deadlock | 0 - conflicts | 0 - confl_lock | 0 - confl_snapshot | 0 - confl_tablespace | 0 - curr_dwn | 0 - current_xlog_insert_lsn | 0/28DE180 - curr_start_page | 1684 - curr_time | 1 - datid | 16384 - datname | tt - dcl_count | 0 - ddl_count | 0 - deadlocks | 0 - delete_count | 0 - dml_count | 21 - file_id | 0 - file_reset_num | 0 - file_trunc_num | 2 - global_instance_time_count | 10 - high_threshold_pages | 0 - high_threshold_writes | 0 - insert_count | 0 - last_replayed_read_ptr | 0 - local_max_ptr | 0 - locks_count | 44 - login_counter | 9 - logout_counter | 8 - low_threshold_pages | 27 - low_threshold_writes | 4 - lstiotim | 8 - maxiowtm | 32 - maxwritten_clean | 0 - miniotim | 3 - min_recovery_point | 0 - node_name | single_node1 - numbackends | 1 - os_runtime_count | 19 - os_threads_count | 23 - p80 | 0 - p95 | 0 - pgwr_actual_flush_total_num | 10 - pgwr_last_flush_num | 3 - phyblkrd | 520 - phyblkwrt | 10 - phyblkwrt | 8 - phyrds | 520 - phywrts | 8 - phywrts | 9 - primary_flush_ptr | 42852736 - process_pending_counter | 0 - process_pending_total_dur | 0 - queue_head_page_rec_lsn | 0/0 - queue_rec_lsn | 0/28DE060 - read_data_io_counter | 0 - read_data_io_total_dur | 0 - read_ptr | 42851160 - read_xlog_io_counter | 2 - read_xlog_io_total_dur | 471 - recovery_done_ptr | 0 - redo_done_time | 0 - redo_start_ptr | 42851160 - redo_start_time | 732333220674305 - remain_dirty_page_num | 0 - resp_avg | 47907 - resp_max | 254914 - resp_min | 116 - resp_total | 479066 - rollback_counter | 0 - select_count | 15 - single_node1-backend_used_memory | 1 - single_node1-cstore_used_memory | 0 - single_node1-dynamic_peak_memory | 571 - single_node1-dynamic_peak_shrctx | 181 - single_node1-dynamic_used_memory | 558 - single_node1-dynamic_used_shrctx | 181 - single_node1-gpu_dynamic_peak_memory | 0 - single_node1-gpu_dynamic_used_memory | 0 - single_node1-gpu_max_dynamic_memory | 0 - single_node1-max_backend_memory | 348 - single_node1-max_cstore_memory | 512 - single_node1-max_dynamic_memory | 8142 - single_node1-max_process_memory | 12288 - single_node1-max_sctpcomm_memory | 0 - single_node1-max_shared_memory | 3285 - single_node1-other_used_memory | 0 - single_node1-pooler_conn_memory | 0 - single_node1-pooler_freeconn_memory | 0 - single_node1-process_used_memory | 806 - single_node1-sctpcomm_peak_memory | 0 - single_node1-sctpcomm_used_memory | 0 - single_node1-shared_used_memory | 220 - single_node1-storage_compress_memory | 0 - single_node1-udf_reserved_memory | 0 - speed | 0 - stats_reset | 2023-03-16 17:34:25.277803+08 - stats_reset | 2023-03-16 17:34:43.424584+08 - summary_file_iostat_count | 65 - temp_bytes | 0 - temp_files | 0 - total_pages | 27 - total_writes | 4 - tup_deleted | 61 - tup_fetched | 68794 - tup_inserted | 9906 - tup_returned | 59299 - tup_updated | 305 - update_count | 0 - user_id | 10 - user_name | hlv - wait_events_count | 401 - worker_info | no redo worker - workload | default_pool - write_data_io_counter | 0 - write_data_io_total_dur | 0 - writetim | 93 - xact_commit | 1324 - xact_rollback | 30 -(148 rows) - -MogDB=# show session status; - Variable_name | Value ---------------------------------------+------------------------------- - apply_counter | 0 - apply_total_dur | 0 - avgiotim | 10 - bg_commit_counter | 168 - bg_resp_avg | 53200083 - bg_resp_max | 122185319 - bg_resp_min | 79 - bg_resp_total | 8937613869 - bg_rollback_counter | 2 - blk_read_time | 0 - blks_hit | 504050 - blks_read | 1444 - blk_write_time | 0 - buffers_alloc | 0 - buffers_backend | 0 - buffers_backend_fsync | 0 - buffers_checkpoint | 0 - buffers_clean | 0 - checkpoints_req | 4 - checkpoints_timed | 29 - checkpoint_sync_time | 6794 - checkpoint_write_time | 5 - ckpt_clog_flush_num | 1 - ckpt_csnlog_flush_num | 0 - ckpt_multixact_flush_num | 1 - ckpt_predicate_flush_num | 0 - ckpt_redo_point | 0/28DE060 - ckpt_twophase_flush_num | 0 - commit_counter | 10 - confl_bufferpin | 0 - confl_deadlock | 0 - conflicts | 0 - confl_lock | 0 - confl_snapshot | 0 - confl_tablespace | 0 - curr_dwn | 0 - current_xlog_insert_lsn | 0/28DE180 - curr_start_page | 1684 - curr_time | 1 - datid | 16384 - datname | tt - dcl_count | 0 - ddl_count | 0 - deadlocks | 0 - delete_count | 0 - dml_count | 21 - file_id | 0 - file_reset_num | 0 - file_trunc_num | 2 - global_instance_time_count | 10 - high_threshold_pages | 0 - high_threshold_writes | 0 - insert_count | 0 - last_replayed_read_ptr | 0 - local_max_ptr | 0 - locks_count | 44 - login_counter | 9 - logout_counter | 8 - low_threshold_pages | 27 - low_threshold_writes | 4 - lstiotim | 8 - maxiowtm | 32 - maxwritten_clean | 0 - miniotim | 3 - min_recovery_point | 0 - node_name | single_node1 - numbackends | 1 - os_runtime_count | 19 - os_threads_count | 23 - p80 | 0 - p95 | 0 - pgwr_actual_flush_total_num | 10 - pgwr_last_flush_num | 3 - phyblkrd | 520 - phyblkwrt | 10 - phyblkwrt | 8 - phyrds | 520 - phywrts | 8 - phywrts | 9 - primary_flush_ptr | 42852736 - process_pending_counter | 0 - process_pending_total_dur | 0 - queue_head_page_rec_lsn | 0/0 - queue_rec_lsn | 0/28DE060 - read_data_io_counter | 0 - read_data_io_total_dur | 0 - read_ptr | 42851160 - read_xlog_io_counter | 2 - read_xlog_io_total_dur | 471 - recovery_done_ptr | 0 - redo_done_time | 0 - redo_start_ptr | 42851160 - redo_start_time | 732333220674305 - remain_dirty_page_num | 0 - resp_avg | 47907 - resp_max | 254914 - resp_min | 116 - resp_total | 479066 - rollback_counter | 0 - select_count | 15 - single_node1-backend_used_memory | 1 - single_node1-cstore_used_memory | 0 - single_node1-dynamic_peak_memory | 571 - single_node1-dynamic_peak_shrctx | 181 - single_node1-dynamic_used_memory | 558 - single_node1-dynamic_used_shrctx | 181 - single_node1-gpu_dynamic_peak_memory | 0 - single_node1-gpu_dynamic_used_memory | 0 - single_node1-gpu_max_dynamic_memory | 0 - single_node1-max_backend_memory | 348 - single_node1-max_cstore_memory | 512 - single_node1-max_dynamic_memory | 8142 - single_node1-max_process_memory | 12288 - single_node1-max_sctpcomm_memory | 0 - single_node1-max_shared_memory | 3285 - single_node1-other_used_memory | 0 - single_node1-pooler_conn_memory | 0 - single_node1-pooler_freeconn_memory | 0 - single_node1-process_used_memory | 806 - single_node1-sctpcomm_peak_memory | 0 - single_node1-sctpcomm_used_memory | 0 - single_node1-shared_used_memory | 220 - single_node1-storage_compress_memory | 0 - single_node1-udf_reserved_memory | 0 - speed | 0 - stats_reset | 2023-03-16 17:34:25.277803+08 - stats_reset | 2023-03-16 17:34:43.424584+08 - summary_file_iostat_count | 65 - temp_bytes | 0 - temp_files | 0 - total_pages | 27 - total_writes | 4 - tup_deleted | 61 - tup_fetched | 68794 - tup_inserted | 9906 - tup_returned | 59299 - tup_updated | 305 - update_count | 0 - user_id | 10 - user_name | hlv - wait_events_count | 401 - worker_info | no redo worker - workload | default_pool - write_data_io_counter | 0 - write_data_io_total_dur | 0 - writetim | 93 - xact_commit | 1324 - xact_rollback | 30 -(148 rows) - -MogDB=# show status like 'xact%'; - Variable_name | Value ----------------+------- - xact_commit | 1390 - xact_rollback | 31 -(2 rows) - -MogDB=# show status where variable_name = 'ckpt_redo_point'; - Variable_name | Value ------------------+----------- - ckpt_redo_point | 0/28DE2A0 -(1 row) -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-table-status.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-table-status.md deleted file mode 100644 index bb72b6b2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-table-status.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: dolphin SHOW TABLE STATUS -summary: dolphin SHOW TABLE STATUS -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW TABLE STATUS - -## Function - -View the table status of the current library (or schema). - -## Precautions - -If db_name is not specified, the query is for the status of the table under the current library (or schema). - -## Syntax - -``` -SHOW TABLE STATUS - [{FROM | IN} db_name] - [LIKE 'pattern' | WHERE expr] -``` - -## Parameter Description - -- **db_name** - - Library name (or schema), optional, if not specified, the query is for the current library or schema. - -- **LIKE 'pattern'** - - pattern matches the first column of the displayed result (column name is 'Name ['pattern']'). - -## Output Field Description - -| Field | Description | -| --------------- | ------------------------------------------------------------ | -| Name | Table name | -| Engine | Storage engine type. Range of values: USTORE, indicates that the table supports Inplace-Update storage engine. ASTORE, indicates that the table supports Append-Only storage engine. | -| Version | The default value is NULL | -| Row_format | Storage method. The range of values: ROW, indicates that the data of the table will be stored in rows; COLUMN, indicates that the data of the table will be stored in columns. | -| Rows | Number of rows | -| Avg_row_length | The default value is NULL | -| Data_length | Data size, obtained from pg_relation_size(oid) | -| Max_data_length | The default value is NULL | -| Index_length | Index size, obtained from pg_indexes_size(oid) | -| Data_free | The default value is NULL | -| Auto_increment | When the primary key is a sequence, get its last value | -| Create_time | Creation time | -| Update_time | Update time | -| Check_time | The default value is NULL | -| Collation | ordered set | -| Checksum | The default value is NULL | -| Create_options | Options for creating a table | -| Comment | Comment | - -## Examples - -```sql -MogDB=# CREATE SCHEMA tst_schema; -MogDB=# -MogDB=# SET SEARCH_PATH TO tst_schema; -MogDB=# -MogDB=# CREATE TABLE tst_t1 -MogDB-# ( -MogDB(# id serial primary key, -MogDB(# name varchar(20), -MogDB(# phone text -MogDB(# )WITH(ORIENTATION=ROW, STORAGE_TYPE=USTORE); -MogDB=# -MogDB=# COMMENT ON TABLE tst_t1 IS 'this is comment'; -MogDB=# -MogDB=# CREATE VIEW tst_v1 AS SELECT * FROM tst_t1; -MogDB=# -MogDB=# CREATE TABLE tst_t2 -MogDB-# ( -MogDB(# id serial primary key, -MogDB(# name varchar(20), -MogDB(# phone text -MogDB(# )WITH(ORIENTATION=COLUMN); -MogDB=# - --- Viewing Table Status -MogDB=# show table status; - Name | Engine | Version | Row_format | Rows | Avg_row_length | Data_length | Max_data_length | Index_length | Data_free | Auto_increment | Create_time | Update_time | Check_time | Collation | Checksum | Create_options | Comment ---------+--------+---------+------------+------+----------------+-------------+-----------------+--------------+-----------+----------------+---------------------+---------------------+------------+-------------+----------+------------------------------------------------------+----------------- - tst_t1 | USTORE | | ROW | 0 | 0 | 0 | 0 | 57344 | 0 | 1 | 2022-10-18 09:04:24 | 2022-10-18 09:04:24 | | en_US.UTF-8 | | {orientation=row,storage_type=ustore,compression=no} | this is comment - tst_t2 | ASTORE | | COLUMN | 0 | 0 | 24576 | 0 | 8192 | 0 | 1 | 2022-10-18 09:04:24 | 2022-10-18 09:04:24 | | en_US.UTF-8 | | {orientation=column,compression=low} | - tst_v1 | | | | 0 | 0 | 0 | 0 | 0 | 0 | | 2022-10-18 09:04:24 | 2022-10-18 09:04:24 | | en_US.UTF-8 | | | -(3 rows) - --- like Fuzzy Matching -MogDB=# show table status in tst_schema like '%tst_t%'; - Name | Engine | Version | Row_format | Rows | Avg_row_length | Data_length | Max_data_length | Index_length | Data_free | Auto_increment | Create_time | Update_time | Check_time | Collation | Checksum | Create_options | Comment ---------+--------+---------+------------+------+----------------+-------------+-----------------+--------------+-----------+----------------+---------------------+---------------------+------------+-------------+----------+------------------------------------------------------+----------------- - tst_t1 | USTORE | | ROW | 0 | 0 | 0 | 0 | 57344 | 0 | 1 | 2022-10-18 09:04:24 | 2022-10-18 09:04:24 | | en_US.UTF-8 | | {orientation=row,storage_type=ustore,compression=no} | this is comment - tst_t2 | ASTORE | | COLUMN | 0 | 0 | 24576 | 0 | 8192 | 0 | 1 | 2022-10-18 09:04:24 | 2022-10-18 09:04:24 | | en_US.UTF-8 | | {orientation=column,compression=low} | -(2 rows) - --- where Conditional Filtering -MogDB=# show table status from tst_schema where Engine='ASTORE'; - Name | Engine | Version | Row_format | Rows | Avg_row_length | Data_length | Max_data_length | Index_length | Data_free | Auto_increment | Create_time | Update_time | Check_time | Collation | Checksum | Create_options | Comment ---------+--------+---------+------------+------+----------------+-------------+-----------------+--------------+-----------+----------------+---------------------+---------------------+------------+-------------+----------+--------------------------------------+--------- - tst_t2 | ASTORE | | COLUMN | 0 | 0 | 24576 | 0 | 8192 | 0 | 1 | 2022-10-18 09:04:24 | 2022-10-18 09:04:24 | | en_US.UTF-8 | | {orientation=column,compression=low} | -(1 row) - -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-tables.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-tables.md deleted file mode 100644 index ab023aa8..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-tables.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: dolphin SHOW TABLES -summary: dolphin SHOW TABLES -author: zhang cuiping -date: 2022-10-24 ---- - -# SHOW TABLES - -## Function - -Views the list of tables in the current database or schema. - -## Precautions - -- If db\_name is not specified, the list of tables in the current database (or schema) is queried. - -## Syntax - -``` -SHOW [FULL] TABLES - [{FROM | IN} db_name] - [LIKE 'pattern' | WHERE expr] -``` - -## Parameter Description - -- **db_name** - - Specifies the database name (or schema). This parameter is optional. If it is not specified, the current database or schema is queried. - -- **LIKE 'pattern'** - - The patternp matches the first column (column name: 'Tables_in_dbname [`pattern`]') in the displayed result. - -## Examples - -```sql ---Create a simple table: -MogDB=# CREATE SCHEMA tst_schema; -MogDB=# SET SEARCH_PATH TO tst_schema; - -MogDB=# CREATE TABLE tst_t1 -MogDB-# ( -MogDB(# id int primary key, -MogDB(# name varchar(20) NOT NULL, -MogDB(# addr text COLLATE "de_DE", -MogDB(# phone text COLLATE "es_ES", -MogDB(# addr_code text -MogDB(# ); -MogDB=# CREATE VIEW tst_v1 AS SELECT * FROM tst_t1; -MogDB=# CREATE TABLE t_t2(id int); - ---View the list of tables in the database (or schema). -MogDB=# show tables; - Tables_in_tst_schema ----------------------- - tst_t1 - tst_v1 - t_t2 - -MogDB=# show full tables; - Tables_in_tst_schema | Table_type -----------------------+------------ - tst_t1 | BASE TABLE - tst_v1 | VIEW - t_t2 | BASE TABLE - -MogDB=# show full tables in tst_schema; - Tables_in_tst_schema | Table_type -----------------------+------------ - tst_t1 | BASE TABLE - tst_v1 | VIEW - t_t2 | BASE TABLE - ---Fuzzy match and filtering -MogDB=# show full tables like '%tst%'; - Tables_in_tst_schema (%tst%) | Table_type -------------------------------+------------ - tst_t1 | BASE TABLE - tst_v1 | VIEW - -MogDB=# show full tables where Table_type='VIEW'; - Tables_in_tst_schema | Table_type -----------------------+------------ - tst_v1 | VIEW -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-triggers.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-triggers.md deleted file mode 100644 index 35686ff7..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-triggers.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: dolphin SHOW TRIGGERS -summary: dolphin SHOW TRIGGERS -author: zhang cuiping -date: 2022-10-24 ---- - -# SHOW TRIGGERS - -## Function - -Displays information about storage functions. - -## Precautions - -N/A - -## Syntax - -``` -SHOW TRIIGERS {FROM | IN} db_name [LIKE 'pattern' | WHERE expr] -``` - -## Parameter Description - -- **db_name** - - Database name (or schema). - -- **WHERE expr** - - Filters expressions. - -- **LIKE 'pattern'** - - The pattern regular expression matches the trigger name. - -## Return Result Set - -| Column | Type | Description | -| :------------------- | :----------------------------------------------------- | :---------------------------- | -| Trigger | Trigger name. | | -| Event | Trigger events (Insert, delete, update, and truncate). | | -| Table | Trigger definition table. | | -| Statement | Trigger content. | | -| Timing | Trigger timing (before or after the trigger). | | -| Created | Time when the trigger was created. | This parameter is left blank. | -| sql_mode | SQL mode when a trigger is created. | This parameter is left blank. | -| Definer | Definer. | | -| character_set_client | Character set of the client during creation. | This parameter is left blank. | -| collation_connection | Sorting rule of the client during creation. | This parameter is left blank. | -| Database Collation | Database collocation. | | - -## Examples - -```sql -MogDB=# --Create a trigger table and trigger function. -MogDB=# CREATE TABLE test_trigger_src_tbl(id1 INT, id2 INT, id3 INT); -CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS $$ DECLARE BEGIN INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3); RETURN NEW; END $$ LANGUAGE PLPGSQL; --- Create a trigger. -CREATE TRIGGER insert_trigger BEFORE INSERT ON test_trigger_src_tbl FOR EACH ROW EXECUTE PROCEDURE tri_insert_func(); --- View information. -show triggers; -CREATE TABLE -MogDB=# CREATE TABLE test_trigger_des_tbl(id1 INT, id2 INT, id3 INT); -CREATE TABLE -MogDB=# CREATE OR REPLACE FUNCTION tri_insert_func() RETURNS TRIGGER AS $$ DECLARE BEGIN INSERT INTO test_trigger_des_tbl VALUES(NEW.id1, NEW.id2, NEW.id3); RETURN NEW; END $$ LANGUAGE PLPGSQL; -CREATE FUNCTION -MogDB=# --Create a trigger. -MogDB=# CREATE TRIGGER insert_trigger BEFORE INSERT ON test_trigger_src_tbl FOR EACH ROW EXECUTE PROCEDURE tri_insert_func(); -CREATE TRIGGER -MogDB=# --View information. -MogDB=# show triggers; - Trigger | Event | Table | Statement | Timing | Created | sql_mode | Definer | character_set_client | collation_connection | Database Collation -----------------+--------+----------------------+-------------------------------------+--------+---------+----------+---------+----------------------+----------------------+-------------------- - insert_trigger | INSERT | test_trigger_src_tbl | EXECUTE PROCEDURE tri_insert_func() | BEFORE | | | wyc | | | en_US.UTF-8 -(1 row) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-variables.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-variables.md deleted file mode 100644 index 6973fdd6..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-variables.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: dolphin SHOW VARIABLES -summary: dolphin SHOW VARIABLES -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW VARIABLES - -## Function - -SHOW VARIABLES displays the values of system variables. This statement does not require any privileges. It only requires the ability to connect to the server. The like and where clauses can be appended for further matching. - -SHOW VARIABLES accepts the optional GLOBAL or SESSION variable range modifiers: - -- With the GLOBAL modifier, the statement displays global system variable values. These are the values of the corresponding session variables used to initialize the new connection to MogDB. If the variable does not have a global value, no value is displayed. -- With the SESSION modifier, the statement displays the system variable values that are valid for the current connection. -- If the modifier is not present, the default value is SESSION. - -## Precautions - -N/A - -## Syntax - -``` -SHOW [GLOBAL | SESSION] VARIABLES [LIKE 'pattern' | WHERE expr]; -``` - -## Parameter Description - -- **[GLOBAL | SESSION]** - - global denotes the default value of the query guc parameter. - - session indicates the session value of the query guc parameter. - -- **[LIKE 'pattern' | WHERE expr]** - - Matches an expression. - -## Examples - -```sql --- Query guc parameters beginning with v -MogDB=# show variables like 'v%'; - Variable_name | Value -----------------------------+------------ - vacuum_cost_delay | 0 - vacuum_cost_limit | 200 - vacuum_cost_page_dirty | 20 - vacuum_cost_page_hit | 1 - vacuum_cost_page_miss | 10 - vacuum_defer_cleanup_age | 0 - vacuum_freeze_min_age | 2000000000 - vacuum_freeze_table_age | 4000000000 - vacuum_gtt_defer_check_age | 10000 - var_eq_const_selectivity | off - version_retention_age | 0 -(11 rows) -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-warnings.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-warnings.md deleted file mode 100644 index 65cc01a5..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-show-warnings.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: dolphin SHOW WARNINGS/ERRORS -summary: dolphin SHOW WARNINGS/ERRORS -author: Guo Huan -date: 2023-05-15 ---- - -# SHOW WARNINGS/ERRORS - -## Function - -Displays the sql that was executed in the current session and the resulting messages (including errors, warnings, notes). - -## Precautions - -- The added system parameter, sql_note, is a switch that sets whether show warnings displays Note-level messages. -- The Code field is the error code for the message. Its numeric meaning corresponds to the macro definition in ERRCODE. The status macros of various messages are generated by MAKE_SQLSTATE(ch1, ch2, ch3, ch4, ch5). MAKE_SQLSTATE works by subtracting '0' from the ascii code of the characters ch1 ~ ch5, and then taking the last six bits of their binary to get res1 ~ res5, and then composing the five data from low to high to form a 30-bit binary result (res1 ~ res5), and then composing these five data from low to high to form a 30-bit binary result (res1 ~ res5), which is a 30-bit binary result. bit binary result (res5res4res3res2res1), which is converted to a decimal number, i.e. the number of the error code. Different error code numbers correspond to different status macros. - -## Syntax - -```sql -SHOW WARNINGS [LIMIT [offset,] row_count] -SHOW COUNT(*) WARNINGS - -SHOW ERRORS [LIMIT [offset,] row_count] -SHOW COUNT(*) ERRORS -``` - -## Parameter Description - -- **row_count** - - Outputs the last sql, with a limit on the number of rows of warnings/errors messages generated. - -- **offset** - - Starts displaying from the first line of message. - -- **add system parameters** - - [sql_note](../../guc-parameters.md) This parameter is a switch that sets whether show warnings displays Note level messages or not. - -## Return Result Set - -| Field Name | Type | Description | -| -------------------- | ----------------------- | -------------------------------- | -| Level | character type | Level of message (Note/Warning/Error) | -| Code | integer type | Error code corresponding to the message status | -| Message | character type | Message content | - -## Examples - -```sql -MogDB=# show sql_note; - sql_note ----------- - on -(1 row) - -MogDB=# create table test(id int, name varchar default 11); -CREATE TABLE -MogDB=# create table test(id int, name varchar default 11); -ERROR: relation "test" already exists in schema "public" -DETAIL: creating new table with existing name in the same schema -MogDB=# show warnings limit 1; - level | code | message --------+-----------+--------------------------------------------------- - Error | 117571716 | relation "test" already exists in schema "public" -(1 row) - -MogDB=# show count(*) warnings; - count -------- - 1 -(1 row) - -MogDB=# CREATE OR REPLACE FUNCTION TEST_FUNC(tempdata char) RETURNS VOID AS $$ -MogDB$# BEGIN -MogDB$# raise info'TEST CHAR VALUE IS %',tempdata; -MogDB$# END; -MogDB$# $$ LANGUAGE plpgsql; -CREATE FUNCTION -MogDB=# select TEST_FUNC('abc'::clob); -INFO: TEST CHAR VALUE IS abc -CONTEXT: referenced column: test_func - test_func ------------ - -(1 row) - -MogDB=# show warnings; - level | code | message --------+------+------------------------ - Note | 0 | TEST CHAR VALUE IS abc -(1 row) - -MogDB=# set sql_note=false; -SET -MogDB=# select TEST_FUNC('abc'::clob); -INFO: TEST CHAR VALUE IS abc -CONTEXT: referenced column: test_func - test_func ------------ - -(1 row) - -MogDB=# show warnings; - level | code | message --------+------+--------- -(0 rows) - -MogDB=# SELECT pg_advisory_unlock(1), pg_advisory_unlock_shared(2), pg_advisory_unlock(1, 1), pg_advisory_unlock_shared(2, 2); -WARNING: you don't own a lock of type ExclusiveLock -CONTEXT: referenced column: pg_advisory_unlock -WARNING: you don't own a lock of type ShareLock -CONTEXT: referenced column: pg_advisory_unlock_shared -WARNING: you don't own a lock of type ExclusiveLock -CONTEXT: referenced column: pg_advisory_unlock -WARNING: you don't own a lock of type ShareLock -CONTEXT: referenced column: pg_advisory_unlock_shared - pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock_shared ---------------------+---------------------------+--------------------+--------------------------- - f | f | f | f -(1 row) - -MogDB=# show warnings; - level | code | message ----------+------+-------------------------------------------- - Warning | 64 | you don't own a lock of type ExclusiveLock - Warning | 64 | you don't own a lock of type ShareLock - Warning | 64 | you don't own a lock of type ExclusiveLock - Warning | 64 | you don't own a lock of type ShareLock -(4 rows) - -MogDB=# show warnings limit 2, 4; - level | code | message ----------+------+-------------------------------------------- - Warning | 64 | you don't own a lock of type ExclusiveLock - Warning | 64 | you don't own a lock of type ShareLock -(2 rows) - --- Use sql_note to control the switch for storing note information. -CREATE OR REPLACE FUNCTION TEST_FUNC(tempdata char) RETURNS VOID AS $$ -BEGIN - raise info'TEST CHAR VALUE IS %',tempdata; -END; -$$ LANGUAGE plpgsql; -select TEST_FUNC('abc'::clob); -INFO: TEST CHAR VALUE IS abc -CONTEXT: referenced column: test_func - test_func ------------ - -(1 row) - -show warnings; - level | code | message --------+------+------------------------ - Note | 0 | TEST CHAR VALUE IS abc -(1 row) - -set sql_note=false; -select TEST_FUNC('abc'::clob); -INFO: TEST CHAR VALUE IS abc -CONTEXT: referenced column: test_func - test_func ------------ - -(1 row) - -show warnings; - level | code | message --------+------+--------- -(0 rows) -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-sql-syntax.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-sql-syntax.md deleted file mode 100644 index 1568aa48..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-sql-syntax.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: dolphin SQL Syntax -summary: dolphin SQL Syntax -author: Guo Huan -date: 2023-05-19 ---- - -# SQL Syntax - -+ **[ALTER DATABASE](dolphin-alter-database.md)** -+ **[ALTER FUNCTION](dolphin-alter-function.md)** -+ **[ALTER PROCEDURE](dolphin-alter-procedure.md)** -+ **[ALTER SERVER](dolphin-alter-server.md)** -+ **[ALTER TABLE](dolphin-alter-table.md)** -+ **[ALTER TABLE PARTITION](dolphin-alter-table-partition.md)** -+ **[ALTER TABLESPACE](dolphin-alter-tablespace.md)** -+ **[ALTER VIEW](dolphin-alter-view.md)** -+ **[ANALYZE | ANALYSE](dolphin-analyze-analyse.md)** -+ **[AST](dolphin-ast.md)** -+ **[CHECKSUM TABLE](dolphin-checksum-table.md)** -+ **[CREATE DATABASE](dolphin-create-database.md)** -+ **[CREATE FUNCTION](dolphin-create-function.md)** -+ **[CREATE INDEX](dolphin-create-index.md)** -+ **[CREATE PROCEDURE](dolphin-create-procedure.md)** -+ **[CREATE SERVER](dolphin-create-server.md)** -+ **[CREATE TABLE](dolphin-create-table.md)** -+ **[CREATE TABLE AS](dolphin-create-table-as.md)** -+ **[CREATE TABLE PARTITION](dolphin-create-table-partition.md)** -+ **[CREATE TABLESPACE](dolphin-create-tablespace.md)** -+ **[CREATE TRIGGER](dolphin-create-trigger.md)** -+ **[CREATE VIEW](dolphin-create-view.md)** -+ **[DESCRIBE TABLE](dolphin-describe-table.md)** -+ **[DO](dolphin-do.md)** -+ **[DROP DATABASE](dolphin-drop-database.md)** -+ **[DROP INDEX](dolphin-drop-index.md)** -+ **[DROP TABLESPACE](dolphin-drop-tablespace.md)** -+ **[EXECUTE](dolphin-execute.md)** -+ **[EXPLAIN](dolphin-explain.md)** -+ **[FLUSH BINARY LOGS](dolphin-flush-binary-logs.md)** -+ **[GRANT](dolphin-grant.md)** -+ **[GRANT/REVOKE PROXY](dolphin-grant-revoke-proxy.md)** -+ **[INSERT](dolphin-insert.md)** -+ **[KILL](dolphin-kill.md)** -+ **[LOAD DATA](dolphin-load-data.md)** -+ **[OPTIMIZE TABLE](dolphin-optimize-table.md)** -+ **[PREPARE](dolphin-prepare.md)** -+ **[RENAME TABLE](dolphin-rename-table.md)** -+ **[RENAME USER](dolphin-rename-user.md)** -+ **[REVOKE](dolphin-revoke.md)** -+ **[SELECT](dolphin-select.md)** -+ **[SELECT HINT](dolphin-select-hint.md)** -+ **[SET CHARSET](dolphin-set-charset.md)** -+ **[SET PASSWORD](dolphin-set-password.md)** -+ **[SHOW CHARACTER SET](dolphin-show-character-set.md)** -+ **[SHOW COLLATION](dolphin-show-collation.md)** -+ **[SHOW COLUMNS](dolphin-show-columns.md)** -+ **[SHOW CREATE DATABASE](dolphin-show-create-database.md)** -+ **[SHOW CREATE FUNCTION](dolphin-show-create-function.md)** -+ **[SHOW CREATE PROCEDURE](dolphin-show-create-procedure.md)** -+ **[SHOW CREATE TABLE](dolphin-show-create-table.md)** -+ **[SHOW CREATE TRIGGER](dolphin-show-create-trigger.md)** -+ **[SHOW CREATE VIEW](dolphin-show-create-view.md)** -+ **[SHOW DATABASES](dolphin-show-databases.md)** -+ **[SHOW FUNCTION STATUS](dolphin-show-function-status.md)** -+ **[SHOW GRANTS](dolphin-show-grants.md)** -+ **[SHOW INDEX](dolphin-show-index.md)** -+ **[SHOW MASTER STATUS](dolphin-show-master-status.md)** -+ **[SHOW PLUGINS](dolphin-show-plugins.md)** -+ **[SHOW PRIVILEGES](dolphin-show-privileges.md)** -+ **[SHOW PROCEDURE STATUS](dolphin-show-procedure-status.md)** -+ **[SHOW PROCESSLIST](dolphin-show-processlist.md)** -+ **[SHOW SLAVE HOSTS](dolphin-show-slave-hosts.md)** -+ **[SHOW STATUS](dolphin-show-status.md)** -+ **[SHOW TABLES](dolphin-show-tables.md)** -+ **[SHOW TABLE STATUS](dolphin-show-table-status.md)** -+ **[SHOW TRIGGERS](dolphin-show-triggers.md)** -+ **[SHOW VARIABLES](dolphin-show-variables.md)** -+ **[SHOW WARNINGS/ERRORS](dolphin-show-warnings.md)** -+ **[UPDATE](dolphin-update.md)** -+ **[USE db_name](dolphin-use-db_name.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-update.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-update.md deleted file mode 100644 index e5df3bef..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-update.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -title: dolphin UPDATE -summary: dolphin UPDATE -author: zhang cuiping -date: 2022-10-24 ---- - -# UPDATE - -## Function - -Updates data in a table. Changes the values of the specified columns in all rows that satisfy the condition. The WHERE clause clarifies conditions. The SET clause specifies the columns to be modified and columns that not specified in the SET clause retain their previous values. - -## Precautions - -- This section describes only the new syntax of Dolphin. The UPDATE syntax of MogDB is not modified. For details about the UPDATE syntax of the original MogDB, see section [UPDATE](../../../../../../reference-guide/sql-syntax/UPDATE.md). - -## Syntax - -``` -Update a single table: -[ WITH [ RECURSIVE ] with_query [, ...] ] -UPDATE [/*+ plan_hint */] [IGNORE] [ ONLY ] table_name [ partition_clause ] [ * ] [ [ AS ] alias ] -SET {column_name = { expression | DEFAULT } - |( column_name [, ...] ) = {( { expression | DEFAULT } [, ...] ) |sub_query }}[, ...] - [ FROM from_list] [ WHERE condition ] - [ ORDER BY {expression [ [ ASC | DESC | USING operator ] - [ LIMIT { count } ] - [ RETURNING {* - | {output_expression [ [ AS ] output_name ]} [, ...] }]; - -Update multiple tables: -[ WITH [ RECURSIVE ] with_query [, ...] ] -UPDATE [/*+ plan_hint */] [IGNORE] table_list -SET {column_name = { expression | DEFAULT } - |( column_name [, ...] ) = {( { expression | DEFAULT } [, ...] ) |sub_query }}[, ...] - [ FROM from_list] [ WHERE condition ]; - -where sub_query can be: -SELECT [ ALL | DISTINCT [ ON ( expression [, ...] ) ] ] -{ * | {expression [ [ AS ] output_name ]} [, ...] } -[ FROM from_item [, ...] ] -[ WHERE condition ] -[ GROUP BY grouping_element [, ...] ] -[ HAVING condition [, ...] ] -[ ORDER BY {expression [ [ ASC | DESC | USING operator ] | nlssort_expression_clause ] [ NULLS { FIRST | LAST } ]} [, ...] ] -[ LIMIT { [offset,] count | ALL } ] -``` - -## Parameter Description - -- **IGNORE** - - If an error occurs in a specified scenario when an UPDATE statement containing the keyword IGNORE is executed, the error is degraded to warning and the statement execution continues without affecting other data operations. Error degradation can be enabled in the following scenarios: - - 1. The non-null constraint is violated. - - If the executed SQL statement violates the non-null constraint of the table, you can use this hint to degrade errors to warnings and use one of the following strategies based on the value of the GUC parameter **sql_ignore_strategy**: - - - If **sql_ignore_strategy** is set to **ignore_null**, the UPDATE operations on rows that violate non-null constraints are ignored and remaining data operations are performed. - - - If **sql_ignore_strategy** is set to **overwrite_null**, the null value that violates the constraint is overwritten by the default value of the target type, and the remaining data operations are performed. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** >The GUC parameter sql_ignore_strategy is of the enumeration type. The options are ignore_null and overwrite_null. - - 2. The unique constraint is violated. - - If the executed SQL statement violates the unique constraint of a table, you can use this hint to degrade errors to warnings, ignore the UPDATE operation on the row that violates the constraint, and continue to perform the remaining data operations. - - 3. The partitioned table cannot match a valid partition. - - When UPDATE is performed on a partitioned table, if a row of data does not match a valid partition of the table, you can use this hint to degrade errors to warnings, ignore the row, and continue to perform operations on the remaining data. - - 4. Failed to convert the updated value to the target column type. - - During the execution of the UPDATE statement, if the new value does not match the type of the target column, you can use this hint to degrade errors to warnings and continue the execution based on the new value type and the target column type: - - - When the new value type and column type are both numeric: - - If the new value is within the range of the column type, update the value directly. If the new value is beyond the range of the column type, replace the value with the maximum or minimum value of the column type. - - - When the new value type and column type are both character strings: - - If the length of the new value is within the range specified by the column type, update the value directly. If the length of the new value is beyond the range specified by the column type, the first n characters of the column type are retained. - - - When the new value type cannot be converted to the column type: - - Update to the default value of the column type. - - The IGNORE keyword does not support column store and cannot take effect in column-store tables. - -## Example - -### IGNORE - -To use the ignore_error hints, you need to create a database named **db_ignore** in B-compatible mode. - -``` -create database db_ignore dbcompatibility 'B'; -\c db_ignore -``` - -- **Ignore the non-null constraint.** - -```sql -db_ignore=# create table t_not_null(num int not null); -CREATE TABLE --- The ignore strategy is used. -db_ignore=# set sql_ignore_strategy = 'ignore_null'; -SET -db_ignore=# insert into t_not_null values (1); -INSERT 0 1 -db_ignore=# select * from t_not_null ; - num ------ - 1 -(1 row) - -db_ignore=# update /*+ ignore_error */ t_not_null set num = null where num = 1; -WARNING: null value in column "num" violates not-null constraint -DETAIL: Failing row contains (null). -UPDATE 0 -db_ignore=# select * from t_not_null ; - num ------ - 1 -(1 row) - - --- The overwrite strategy is used. -db_ignore=# delete from t_not_null; -db_ignore=# set sql_ignore_strategy = 'overwrite_null'; -SET -db_ignore=# insert into t_not_null values (1); -WARNING: null value in column "num" violates not-null constraint -DETAIL: Failing row contains (null). -INSERT 0 1 -db_ignore=# select * from t_not_null ; - num ------ - 1 -(1 rows) - -db_ignore=# update /*+ ignore_error */ t_not_null set num = null where num = 1; -WARNING: null value in column "num" violates not-null constraint -DETAIL: Failing row contains (null). -UPDATE 1 -db_ignore=# select * from t_not_null ; - num ------ - 0 -(1 rows) -``` - -- **Ignore the unique constraint.** - -```sql -db_ignore=# create table t_unique(num int unique); -NOTICE: CREATE TABLE / UNIQUE will create implicit index "t_unique_num_key" for table "t_unique" -CREATE TABLE -db_ignore=# insert into t_unique values(1), (2); -INSERT 0 1 -db_ignore=# update /*+ ignore_error */ t_unique set num = 1 where num = 2; -WARNING: duplicate key value violates unique constraint in table "t_unique" -UPDATE 0 -db_ignore=# select * from t_unique ; - num ------ - 1 - 2 -(2 rows) -``` - -- **Ignore the partitioned table that cannot match a valid partition.** - -```sql -db_ignore=# CREATE TABLE t_ignore -db_ignore-# ( -db_ignore(# col1 integer NOT NULL, -db_ignore(# col2 character varying(60) -db_ignore(# ) WITH(segment = on) PARTITION BY RANGE (col1) -db_ignore-# ( -db_ignore(# PARTITION P1 VALUES LESS THAN(5000), -db_ignore(# PARTITION P2 VALUES LESS THAN(10000), -db_ignore(# PARTITION P3 VALUES LESS THAN(15000) -db_ignore(# ); -CREATE TABLE -db_ignore=# insert into t_ignore values(3000); -INSERT 0 1 -db_ignore=# select * from t_ignore ; - col1 | col2 -------+------ - 3000 | -(1 row) -db_ignore=# update /*+ ignore_error */ t_ignore set col1 = 20000 where col1 = 3000; -WARNING: fail to update partitioned table "t_ignore".new tuple does not map to any table partition. -UPDATE 0 -db_ignore=# select * from t_ignore ; - col1 | col2 -------+------ - 3000 | -(1 row) -``` - -- **Failed to convert the updated value to the target column type.** - -```sql --- When the new value type and column type are both numeric: -db_ignore=# create table t_tinyint(num tinyint); -CREATE TABLE -db_ignore=# insert into t_tinyint values(1); -INSERT 0 1 -db_ignore=# select * from t_tinyint; - num ------ - 1 -(1 row) -db_ignore=# update /*+ignore_error */ t_tinyint set num = 10000 where num = 1; -WARNING: tinyint out of range -CONTEXT: referenced column: num -UPDATE 1 -db_ignore=# select * from t_tinyint; - num ------ - 255 -(1 row) - - --- When the new value type and column type are both character strings: -db_ignore=# create table t_varchar5(content varchar(5)); -CREATE TABLE -db_ignore=# insert into t_varchar5 values('abc'); -INSERT 0 1 -db_ignore=# select * from t_varchar5; - content ---------- - abc -(1 row) -db_ignore=# update /*+ ignore_error */ t_varchar5 set content = 'abcdefghijklmn' where content = 'abc'; -WARNING: value too long for type character varying(5) -CONTEXT: referenced column: content -UPDATE 1 -db_ignore=# select * from t_varchar5; - content ---------- - abcde -(1 row) -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-use-db_name.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-use-db_name.md deleted file mode 100644 index 1ab93d29..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/sql-reference/sql-syntax/dolphin-use-db_name.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: dolphin USE db_name -summary: dolphin USE db_name -author: zhang cuiping -date: 2022-10-24 ---- - -# USE db_name - -## Function - -The USE db\_name statement uses the db\_name database as the default (current) database for subsequent statements. The database remains the default database until the end of the paragraph, or until a different USE statement is published. - -## Precautions - -N/A - -## Syntax - -``` -USE db_name -``` - -## Parameter Description - -- **db_name** - - Database name - -## Examples - -```sql ---Switch to the db1 database. -MogDB=# USE db1; -SET -MogDB=# CREATE TABLE test(a text); -CREATE TABLE -MogDB=# INSERT INTO test VALUES('db1'); -INSERT 0 1 - ---Switch to the db2 database. -MogDB=# USE db2; -SET -MogDB=# CREATE TABLE test(a text); -CREATE TABLE -MogDB=# INSERT INTO test VALUES('db2'); -INSERT 0 1 -MogDB=# select a from db1.test; - a ------ - db1 -(1 row) - -MogDB=# select a from db2.test; - a ------ - db2 -(1 row) - -MogDB=# select a from test; - a ------ - db2 -(1 row) - ---Switch to the db1 database. -MogDB=# USE db1; -SET -MogDB=# select a from test; - a ------ - db1 -(1 row) -``` - -## Helpful Links - -N/A \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/basic-statements/dolphin-assignment-statements.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/basic-statements/dolphin-assignment-statements.md deleted file mode 100644 index 79e9cee6..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/basic-statements/dolphin-assignment-statements.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: dolphin Assignment Statements -summary: dolphin Assignment Statements -author: Guo Huan -date: 2023-05-15 ---- - -# Assignment Statements - -## Notice - -Compared with the original openGauss, Dolphin modifies the assignment syntax as follows: - -1. The syntax function of assigning values to variables through **set** is added between BEGIN and END. - -## Syntax - -Figure 1 shows the syntax diagram for assigning a value to a variable. - -**Figure 1** assignment_value::= -![assignment_value](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/assignment_value.png) - -The following is supported in B-compatible mode: - -``` -set variable_name := value; -``` - -The syntax is described as follows: - -- **variable_name** indicates the name of a variable. -- **value** can be a value or an expression. The type of **value** must be compatible with the type of **variable_name**. - -Example: - -```sql -MogDB=# DECLARE - emp_id INTEGER := 7788; -- Assignment -BEGIN - emp_id := 5; -- Assignment - emp_id := 5*7784; -END; -/ - --- In B-compatible mode: -MogDB=# DECLARE - emp_id INTEGER := 7788; -- Assignment -BEGIN - set emp_id := 5; -- Assignment - set emp_id := 5*7784; -END; -/ -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - You can run the **set variable_name :=(=) value** command to assign a value to a variable between BEGIN and END. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/basic-statements/dolphin-basic-statements.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/basic-statements/dolphin-basic-statements.md deleted file mode 100644 index ffede664..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/basic-statements/dolphin-basic-statements.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: dolphin Basic Statements -summary: dolphin Basic Statements -author: Guo Huan -date: 2023-05-22 ---- - -# Basic Statements - -- **[Assignment Statements](dolphin-assignment-statements.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/dolphin-stored-procedures.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/dolphin-stored-procedures.md deleted file mode 100644 index 738100c1..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/stored-procedures/dolphin-stored-procedures.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: dolphin Stored Procedures -summary: dolphin Stored Procedures -author: Guo Huan -date: 2023-05-22 ---- - -# Stored Procedures - -- **[Basic Statements](basic-statements/dolphin-basic-statements.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-INDEX_STATISTIC.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-INDEX_STATISTIC.md deleted file mode 100644 index c0b96515..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-INDEX_STATISTIC.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: dolphin INDEX_STATISTIC -summary: dolphin INDEX_STATISTIC -author: Guo Huan -date: 2023-05-16 ---- - -# INDEX_STATISTIC - -The INDEX_STATISTIC view stores index information for the current database. - -**Table 1** INDEX_STATISTIC fields - -| Name | Type | Description | -| :------------ | :--------------- | :----------------------------------------------------------- | -| namespace | name | Tablespace to which the index belongs | -| table | name | Table to which the index belong | -| non_unique | boolean | Whether it is a unique index | -| key_name | name | Index name | -| seq_in_index | smallint | Serial number of the indexed column in the index | -| column_name | name | Column names of index columns | -| collation | text | The values are A (default, ascending), D (descending), NULL (indexes do not support sorting) | -| cardinality | double precision | Calculated from pg_statistic.distinct and pg_class.reltuples:
stadistinct > 0: stadistinct
stadistinct = 0: NULL
stadistinct < 0: reltuples \* stadistinct \* -1 | -| sub_part | text | Index prefix.The number of indexed characters if the column is only partially indexed, or NULL if the entire column is indexed. prefix indexing is not currently supported, NULL | -| packed | text | How to pack key values, specify pack_keys when creating table; otherwise return NULL. currently not supported, NULL | -| null | text | YES if it may contain a NULL value, otherwise | -| index_type | name | Indexing methods used: BTREE, HASH, etc. | -| comment | text | A true indisusable record in the pg_index table displays disabled, while a false record displays '' | -| index_comment | text | Comment information specified by COMMENT when creating an index | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-PG_TYPE_NONSTRICT_BASIC_VALUE.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-PG_TYPE_NONSTRICT_BASIC_VALUE.md deleted file mode 100644 index 5139dda2..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-PG_TYPE_NONSTRICT_BASIC_VALUE.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: dolphin PG_TYPE_NONSTRICT_BASIC_VALUE -summary: dolphin PG_TYPE_NONSTRICT_BASIC_VALUE -author: zhang cuiping -date: 2022-10-24 ---- - -# PG_TYPE_NONSTRICT_BASIC_VALUE - -PG_TYPE_NONSTRICT_BASIC_VALUE view stores the base value of the type for insert values() insertion. By default, only system administrator privileges are allowed to access this system table; normal users need authorization to access it. - -**Table 1** PG_TYPE_NONSTRICT_BASIC_VALUE fields - -| Name | Type | Description | -| :---------- | :--- | :---------------------- | -| typename | text | type name | -| basic_value | text | basic value of the type | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-system-views.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-system-views.md deleted file mode 100644 index aa09538a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/dolphin-extension/dolphin-syntax/system-views/dolphin-system-views.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: dolphin System Views -summary: dolphin System Views -author: Guo Huan -date: 2023-05-22 ---- - -# System Views - -+ **[PG_TYPE_NONSTRICT_BASIC_VALUE](dolphin-PG_TYPE_NONSTRICT_BASIC_VALUE.md)** -+ **[INDEX_STATISTIC](dolphin-INDEX_STATISTIC.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/mysql-compatible-description.md b/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/mysql-compatible-description.md deleted file mode 100644 index 873a341a..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/mysql-compatibility-description/mysql-compatible-description.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: MySQL-compatible Description -summary: MySQL-compatible Description -author: zhang cuiping -date: 2022-10-24 ---- - -# MySQL-compatible Description - -The MySQL compatibility of MogDB is mainly realized through the Dolphin and kernel. The description of the Dolphin can be found in [Dolphin Extension](dolphin-extension/dolphin-extension.md). The MySQL compatibility features implemented in MogDB are shown in the following table. - -**Table 1** MySQL-compatible features implemented in the MogDB - -| Category | Overview | Link | -| :-------------------- | :----------------------------------------------------------- | :----------------------------------------------------------- | -| DDL | The CREATE DATABASE supports the IF NOT EXISTS option. | [CREATE DATABASE](../../reference-guide/sql-syntax/CREATE-DATABASE.md) | -| DDL | The CREATE SCHEMA supports the IF NOT EXISTS option. | [CREATE SCHEMA](../../reference-guide/sql-syntax/CREATE-SCHEMA.md) | -| DDL | The ALTER USER supports the IF EXISTS option. | [ALTER USER](../../reference-guide/sql-syntax/ALTER-USER.md) | -| DDL | ALTER VIEW supports the DEFINER option. | [ALTER VIEW](../../reference-guide/sql-syntax/ALTER-VIEW.md) | -| DDL | CREATE VIEW supports the DEFINER option. | [CREATE VIEW](../../reference-guide/sql-syntax/CREATE-VIEW.md) | -| DDL | The COMMENT option can be specified in the statements for creating and modifying objects. | [ALTER FUNCTION](../../reference-guide/sql-syntax/ALTER-FUNCTION.md)、 [ALTER PROCEDURE](../../reference-guide/sql-syntax/ALTER-PROCEDURE.md)、 [ALTER TABLE](../../reference-guide/sql-syntax/ALTER-TABLE.md)、 [CREATE FUNCTION](../../reference-guide/sql-syntax/CREATE-FUNCTION.md)、 [CREATE INDEX](../../reference-guide/sql-syntax/CREATE-INDEX.md)、 [CREATE PROCEDURE](../../reference-guide/sql-syntax/CREATE-PROCEDURE.md)、 [CREATE TABLE](../../reference-guide/sql-syntax/CREATE-TABLE.md)、 [CREATE TABLE PARTITION](../../reference-guide/sql-syntax/CREATE-TABLE-PARTITION.md)、 [CREATE TABLE SUBPARTITION](../../reference-guide/sql-syntax/CREATE-TABLE-SUBPARTITION.md) | -| DDL | CREATE TABLE supports the creation of primary keys, UNIQUE indexes, and foreign key constraints. | [CREATE TABLE](../../reference-guide/sql-syntax/CREATE-TABLE.md) | -| DDL | ALTER TABLE supports the creation of primary keys, UNIQUE indexes, and foreign key constraints. | [ALTER TABLE](../../reference-guide/sql-syntax/ALTER-TABLE.md) | -| DDL | CREATE TABLE supports the creation of auto-increment columns. | [CREATE TABLE](../../reference-guide/sql-syntax/CREATE-TABLE.md) | -| DML | DELETE supports the deletion of data from multiple tables. | [DELETE](../../reference-guide/sql-syntax/DELETE.md) | -| DML | DELETE supports ORDER BY. | [DELETE](../../reference-guide/sql-syntax/DELETE.md) | -| DML | DELETE supports the deletion of data from a specified partition or subpartition. | [DELETE](../../reference-guide/sql-syntax/DELETE.md) | -| DML | UPDATE supports the update of data from multiple tables. | [UPDATE](../../reference-guide/sql-syntax/UPDATE.md) | -| DML | UPDATE supports ORDER BY and LIMIT. | [UPDATE](../../reference-guide/sql-syntax/UPDATE.md) | -| DCL | User-defined variables can be set in sessions. | [SET](../../reference-guide/sql-syntax/SET.md) | -| DCL | Global variables of SET can be enhanced. | [SET](../../reference-guide/sql-syntax/SET.md) | -| Data type | The NVARCHAR type is supported. | [Character Types](../../reference-guide/supported-data-types/character-data-types.md) | -| Data type | The SET type is supported. | [SET Type](../../reference-guide/supported-data-types/set-type.md) | -| Function and operator | The secure equality operator <=> is supported. | [Simple Expressions](../../reference-guide/sql-reference/expressions/simple-expressions.md) | -| Function and operator | The group_concat function is supported. | [Aggregate Functions](../../reference-guide/functions-and-operators/aggregate-functions.md) | -| Function and operator | The security functions aes_decrypt and aes_encrypt are supported. | [Security Functions](../../reference-guide/functions-and-operators/security-functions.md) | -| Function and operator | The character processing functions SHA, SHA1, and SHA2 are supported. | [Character Processing Functions and Operators](../../reference-guide/functions-and-operators/character-processing-functions-and-operators.md) | \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-management.md b/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-management.md deleted file mode 100644 index fd89a350..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-management.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Partition Management -summary: Partition Management -author: Guo Huan -date: 2023-05-19 ---- - -# Partition Management - -- **[Partition Pruning](partition-pruning/partition-pruning.md)** -- **[Recommendations For Choosing A Partitioning Strategy](recommendations-for-choosing-a-partitioning-strategy/recommendations-for-choosing-a-partitioning-strategy.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/benefits-of-partition-pruning.md b/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/benefits-of-partition-pruning.md deleted file mode 100644 index dc42b875..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/benefits-of-partition-pruning.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Benefits of Partition Pruning -summary: Benefits of Partition Pruning -author: Guo Huan -date: 2022-06-14 ---- - -# Benefits of Partition Pruning - -Partition pruning is a common performance optimization method in partitioned tables. Before scanning a partition table, you can check partition constraints and the definition of each partition to exclude partitions that do not need to be scanned in advance, which can greatly improve scan performance. In the query planning stage, if the partition constraint is a definite expression, the partition that does not need to be scanned can be cut off according to the partition constraint expression in the query planning stage. This partition pruning method is generally called static partition pruning. Static partition pruning result can be seen from the EXPLAIN VERBOSE output, as shown below. - -explain verbose select * from prune_tt01 where a<8; - -```sql -MogDB=# drop table if exists prune_tt01; -DROP TABLE -MogDB=# CREATE TABLE prune_tt01(a int, b int) -PARTITION BY RANGE(a) -( - PARTITION prune_tt01_p1 VALUES LESS THAN(5), - PARTITION prune_tt01_p2 VALUES LESS THAN(10), - PARTITION prune_tt01_p3 VALUES LESS THAN(15), - PARTITION prune_tt01_p4 VALUES LESS THAN(MAXVALUE) -); -CREATE TABLE -MogDB=# INSERT INTO prune_tt01 VALUES (generate_series(1, 20), generate_series(1,20)); -INSERT 0 20 -MogDB=# CREATE INDEX index_prune_tt01 ON prune_tt01 USING btree(a) LOCAL; -CREATE INDEX -MogDB=# explain verbose select * from prune_tt01 where a<8 ; - QUERY PLAN ----------------------------------------------------------------------------------------------------- - Partition Iterator (cost=13.80..27.75 rows=716 width=8) - Output: a, b - Iterations: 2 - Selected Partitions: 1..2 - -> Partitioned Bitmap Heap Scan on public.prune_tt01 (cost=13.80..27.75 rows=716 width=8) - Output: a, b - Recheck Cond: (prune_tt01.a < 8) - -> Partitioned Bitmap Index Scan on index_prune_tt01 (cost=0.00..13.62 rows=716 width=0) - Index Cond: (prune_tt01.a < 8) -(9 rows) -``` - -However, in many scenarios such as the prepare-execute execution mode and the scenario in which the partition constraint expression contains subqueries, the partition constraint expression is uncertain or contains unknown parameters in the query planning phase, and cannot be tailored in the query planning phase. Partitioning expression is determined by external parameters and the results of subqueries for pruning, and usually pruning in the execution phase is called dynamic partitioning pruning. Dynamic partition pruning allows you to see pruning information from the execution through Explain Verbose (selected partitions: PART) - -explain verbose select * from prune_tt01 where a < (select 8); - -```sql -MogDB=# explain verbose select * from prune_tt01 where a < (select 8); - QUERY PLAN --------------------------------------------------------------------------------------- - Partition Iterator (cost=0.01..36.87 rows=716 width=8) - Output: prune_tt01.a, prune_tt01.b - Iterations: PART - Selected Partitions: PART - InitPlan 1 (returns $0) - -> Result (cost=0.00..0.01 rows=1 width=0) - Output: 8 - -> Partitioned Seq Scan on public.prune_tt01 (cost=0.00..36.86 rows=716 width=8) - Output: prune_tt01.a, prune_tt01.b - Filter: (prune_tt01.a < $0) -(10 rows) -``` - -MogDB 3.0 introduces dynamic pruning of partitioned tables, which greatly reduces the amount of data retrieved from disk and reduces processing time, thus improving query performance and optimizing resource utilization. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/dynamic-partition-pruning.md b/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/dynamic-partition-pruning.md deleted file mode 100644 index 927a994f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/dynamic-partition-pruning.md +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: Dynamic Partition Pruning -summary: Dynamic Partition Pruning -author: Guo Huan -date: 2022-06-14 ---- - -# Dynamic Partition Pruning - -Dynamic partition pruning supports the following three types: - -1. The prepare-execute (bind variable) execution mode and dynamic pruning expression are similar to static pruning. The only difference is that one side of the static pruning expression is Const. One side of the dynamic pruning expression is external parameters or an expression composed of external parameters. - - ```sql - prepare ps(int) as select * from prune_tt01 where a > $1; - explain verbose execute ps(12); - ``` - -2. A query statement contains subqueries, which are divided into two methods: pruning partitioned tables in the main query and pruning partitioned tables in the subquery: - - - The partition table in the main query is pruned. The subquery is generally an unrelated subquery. The subquery works as one side of the partition pruning expression in the main query, and the other side is the partitioning key. - - ```sql - select * from prune_tt01 where a > (select a from t2 limit 1); - ``` - - - The partition table in the subquery is pruned. The subquery is generally a correlation subquery, and the subquery does not support promotion. Note: The related subquery involves pruning of multiple partitioned table, the result of the last pruning is shown in Explain Analyze. - any subquery: - - ```sql - explain analyze select * from t2 where a > any(select a from prune_tt01 where prune_tt01.a = t2.a); - ``` - - exists subquery: - - ```sql - explain analyze select * from t2 where exists (select a from prune_tt01 where prune_tt01.a = t2.a limit 1); - ``` - -3. NestLoop parameterized query, that is, the query plan use the nestLoop+indexscan query method. The partition key of a partitioned table must be the join key of two tables and an index must be created on the partition key. - -The following is the dynamic partition pruning example. - -```sql -drop table if exists prune_tt01; -CREATE TABLE prune_tt01(a int, b int) -PARTITION BY RANGE(a) -( - PARTITION prune_tt01_p1 VALUES LESS THAN(5), - PARTITION prune_tt01_p2 VALUES LESS THAN(10), - PARTITION prune_tt01_p3 VALUES LESS THAN(15), - PARTITION prune_tt01_p4 VALUES LESS THAN(MAXVALUE) -); -INSERT INTO prune_tt01 VALUES (generate_series(1, 20), generate_series(1,20)); -CREATE INDEX index_prune_tt01 ON prune_tt01 USING btree(a) LOCAL; - -drop table if exists tt02; -create table tt02(a int, b int); -INSERT INTO tt02 VALUES (generate_series(1, 20), generate_series(1,20)); -``` - -**prepare-execute (bind variable) scenario** - -```sql -MogDB=# prepare ps(int) as select * from prune_tt01 where a > $1; -PREPARE -MogDB=# explain verbose execute ps(12); - QUERY PLAN --------------------------------------------------------------------------------------- - Partition Iterator (cost=0.00..36.86 rows=716 width=8) - Output: a, b - Iterations: PART - Selected Partitions: PART - -> Partitioned Seq Scan on public.prune_tt01 (cost=0.00..36.86 rows=716 width=8) - Output: a, b - Filter: (prune_tt01.a > $1) -(7 rows) - -MogDB=# explain analyze execute ps(12); - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------- - Partition Iterator (cost=0.00..36.86 rows=716 width=8) (actual time=0.099..0.110 rows=8 loops=1) - Iterations: 2 - Selected Partitions: 3..4 - -> Partitioned Seq Scan on prune_tt01 (cost=0.00..36.86 rows=716 width=8) (actual time=0.031..0.034 rows=8 loops=2) - Filter: (a > $1) - Rows Removed by Filter: 3 - Total runtime: 0.218 ms -(7 rows) -``` - -**Subquery scenario** - -- Prune the partition table in the main query - -```sql -MogDB=# explain verbose select * from prune_tt01 where a > (select a from t2 where a > 12 limit 1); - QUERY PLAN --------------------------------------------------------------------------------------- - Partition Iterator (cost=0.04..36.90 rows=716 width=8) - Output: prune_tt01.a, prune_tt01.b - Iterations: PART - Selected Partitions: PART - InitPlan 1 (returns $0) - -> Limit (cost=0.00..0.04 rows=1 width=4) - Output: t2.a - -> Seq Scan on public.t2 (cost=0.00..1.75 rows=49 width=4) - Output: t2.a - Filter: (t2.a > 12) - -> Partitioned Seq Scan on public.prune_tt01 (cost=0.00..36.86 rows=716 width=8) - Output: prune_tt01.a, prune_tt01.b - Filter: (prune_tt01.a > $0) -(13 rows) - -MogDB=# explain analyze select * from prune_tt01 where a > (select a from t2 where a > 12 limit 1); - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------- - Partition Iterator (cost=0.04..36.90 rows=716 width=8) (actual time=0.172..0.180 rows=7 loops=1) - Iterations: 2 - Selected Partitions: 3..4 - InitPlan 1 (returns $0) - -> Limit (cost=0.00..0.04 rows=1 width=4) (actual time=0.093..0.093 rows=1 loops=1) - -> Seq Scan on t2 (cost=0.00..1.75 rows=49 width=4) (actual time=0.091..0.091 rows=1 loops=1) - Filter: (a > 12) - Rows Removed by Filter: 12 - -> Partitioned Seq Scan on prune_tt01 (cost=0.00..36.86 rows=716 width=8) (actual time=0.020..0.020 rows=7 loops=2) - Filter: (a > $0) - Rows Removed by Filter: 4 - Total runtime: 0.301 ms -(12 rows) -``` - -- Prune the partition table in the subquery: - - any subquery: - -```sql -MogDB=# explain verbose select * from t2 where a > any(select a from prune_tt01 where prune_tt01.a = t2.a); - QUERY PLAN ----------------------------------------------------------------------------------------------------------- - Seq Scan on public.t2 (cost=0.00..582.83 rows=30 width=8) - Output: t2.a, t2.b - Filter: (SubPlan 1) - SubPlan 1 - -> Partition Iterator (cost=4.34..15.01 rows=11 width=4) - Output: prune_tt01.a - Iterations: PART - Selected Partitions: PART - -> Partitioned Bitmap Heap Scan on public.prune_tt01 (cost=4.34..15.01 rows=11 width=4) - Output: prune_tt01.a - Recheck Cond: (prune_tt01.a = t2.a) - -> Partitioned Bitmap Index Scan on index_prune_tt01 (cost=0.00..4.33 rows=11 width=0) - Index Cond: (prune_tt01.a = t2.a) -(13 rows) - -MogDB=# explain analyze select * from t2 where a > any(select a from prune_tt01 where prune_tt01.a = t2.a); - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------- - Seq Scan on t2 (cost=0.00..582.83 rows=30 width=8) (actual time=2.130..2.130 rows=0 loops=1) - Filter: (SubPlan 1) - Rows Removed by Filter: 60 - SubPlan 1 - -> Partition Iterator (cost=4.34..15.01 rows=11 width=4) (actual time=1.860..1.877 rows=20 loops=60) - Iterations: 1 - Selected Partitions: 4 - -> Partitioned Bitmap Heap Scan on prune_tt01 (cost=4.34..15.01 rows=11 width=4) (actual time=0.566..0.576 rows=20 loops=60) - Recheck Cond: (a = t2.a) - Heap Blocks: exact=20 - -> Partitioned Bitmap Index Scan on index_prune_tt01 (cost=0.00..4.33 rows=11 width=0) (actual time=0.482..0.482 rows=20 loops=60) - Index Cond: (a = t2.a) - Total runtime: 2.600 ms -(13 rows) -``` - - exists subquery: - -```sql -MogDB=# explain verbose select * from t2 where exists (select a from prune_tt01 where prune_tt01.a = t2.a limit 1); - QUERY PLAN ----------------------------------------------------------------------------------------------------------------- - Seq Scan on public.t2 (cost=0.00..319.92 rows=30 width=8) - Output: t2.a, t2.b - Filter: (SubPlan 1) - SubPlan 1 - -> Limit (cost=4.34..5.31 rows=1 width=4) - Output: prune_tt01.a - -> Partition Iterator (cost=4.34..15.01 rows=11 width=4) - Output: prune_tt01.a - Iterations: PART - Selected Partitions: PART - -> Partitioned Bitmap Heap Scan on public.prune_tt01 (cost=4.34..15.01 rows=11 width=4) - Output: prune_tt01.a - Recheck Cond: (prune_tt01.a = t2.a) - -> Partitioned Bitmap Index Scan on index_prune_tt01 (cost=0.00..4.33 rows=11 width=0) - Index Cond: (prune_tt01.a = t2.a) -(15 rows) - -MogDB=# explain analyze select * from t2 where exists (select a from prune_tt01 where prune_tt01.a = t2.a limit 1); - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------- - Seq Scan on t2 (cost=0.00..319.92 rows=30 width=8) (actual time=0.058..0.875 rows=20 loops=1) - Filter: (SubPlan 1) - Rows Removed by Filter: 40 - SubPlan 1 - -> Limit (cost=4.34..5.31 rows=1 width=4) (actual time=0.826..0.826 rows=20 loops=60) - -> Partition Iterator (cost=4.34..15.01 rows=11 width=4) (actual time=0.789..0.789 rows=20 loops=60) - Iterations: 1 - Selected Partitions: 4 - -> Partitioned Bitmap Heap Scan on prune_tt01 (cost=4.34..15.01 rows=11 width=4) (actual time=0.162..0.162 rows=20 loops=60) - Recheck Cond: (a = t2.a) - Heap Blocks: exact=20 - -> Partitioned Bitmap Index Scan on index_prune_tt01 (cost=0.00..4.33 rows=11 width=0) (actual time=0.123..0.123 rows=20 loops=60) - Index Cond: (a = t2.a) - Total runtime: 1.151 ms -(14 rows) -``` - -**nestloop scenario:** - -The following parameters are set only to simulate the use of nestloop in SQL statements (normally the optimizer will choose the best access path based on the amount of data in the table) - -```sql -MogDB=# SET enable_material = OFF; -SET -MogDB=# SET enable_mergejoin = OFF; -SET -MogDB=# SET enable_hashjoin = OFF; -SET -``` - -```sql -MogDB=# explain verbose select * from prune_tt01 inner join tt02 on prune_tt01.a = tt02.a; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------- - Nested Loop (cost=0.00..1501.91 rows=23091 width=16) - Output: prune_tt01.a, prune_tt01.b, tt02.a, tt02.b - -> Seq Scan on public.tt02 (cost=0.00..31.49 rows=2149 width=8) - Output: tt02.a, tt02.b - -> Partition Iterator (cost=0.00..0.57 rows=11 width=8) - Output: prune_tt01.a, prune_tt01.b - Iterations: PART - Selected Partitions: PART - -> Partitioned Index Scan using index_prune_tt01 on public.prune_tt01 (cost=0.00..0.57 rows=11 width=8) - Output: prune_tt01.a, prune_tt01.b - Index Cond: (prune_tt01.a = tt02.a) -(11 rows) - -MogDB=# explain analyze select * from prune_tt01 inner join tt02 on prune_tt01.a = tt02.a; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------- - Nested Loop (cost=0.00..1501.91 rows=23091 width=16) (actual time=0.078..0.535 rows=20 loops=1) - -> Seq Scan on tt02 (cost=0.00..31.49 rows=2149 width=8) (actual time=0.023..0.030 rows=20 loops=1) - -> Partition Iterator (cost=0.00..0.57 rows=11 width=8) (actual time=0.441..0.462 rows=20 loops=20) - Iterations: 1 - Selected Partitions: 4 - -> Partitioned Index Scan using index_prune_tt01 on prune_tt01 (cost=0.00..0.57 rows=11 width=8) (actual time=0.146..0.158 rows=20 loops=20) - Index Cond: (a = tt02.a) - Total runtime: 0.770 ms -(8 rows) -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/how-to-identify-whether-partition-pruning-has-been-used.md b/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/how-to-identify-whether-partition-pruning-has-been-used.md deleted file mode 100644 index e4920b5c..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/how-to-identify-whether-partition-pruning-has-been-used.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: How to Judge that Parition Pruning Is Used -summary: How to Judge that Parition Pruning Is Used -author: Guo Huan -date: 2022-06-22 ---- - -# How to Judge that Parition Pruning Is Used - -Whether MogDB uses partition pruning is reflected from the execution plan of SQLs. You can check it using EXPLAIN VERBOSE or EXPLAIN ANALYZE. - -The partition pruning information is reflected in the execution plan column Iterations and Selected Partitions. \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/information-that-can-be-used-for-partition-pruning.md b/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/information-that-can-be-used-for-partition-pruning.md deleted file mode 100644 index 792d42e4..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/information-that-can-be-used-for-partition-pruning.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Information that Can Be Pruned -summary: Information that Can Be Pruned -author: Guo Huan -date: 2022-06-14 ---- - -# Information that Can Be Pruned - -Partition pruning can be performed on partitioned columns. - -MogDB prunes partitions when you use range, `LIKE`, equality, and `IN` list predicates on range or list partition columns, and equality and `IN` list predicates on hash partition columns. - -In example 1, table prune_tt01 is partitioned by range on column A. - -MogDB uses predicates on partition columns to perform partition pruning, as shown below. - -**Example 1 Createing a table with partition pruning** - -```sql -CREATE TABLE prune_tt01(a int, b int) -PARTITION BY RANGE(a) -( - PARTITION prune_tt01_p1 VALUES LESS THAN(5), - PARTITION prune_tt01_p2 VALUES LESS THAN(10), - PARTITION prune_tt01_p3 VALUES LESS THAN(15), - PARTITION prune_tt01_p4 VALUES LESS THAN(MAXVALUE) -); -INSERT INTO prune_tt01 VALUES (generate_series(1, 20), generate_series(1,20)); -CREATE INDEX index_prune_tt01 ON prune_tt01 USING btree(a) LOCAL; - -select * from prune_tt01 where a > 8 ; -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/partition-pruning.md b/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/partition-pruning.md deleted file mode 100644 index 0bdfd510..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/partition-pruning.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Partition Pruning -summary: Partition Pruning -author: Guo Huan -date: 2023-05-19 ---- - -# Partition Pruning - -+ **[Benefits of Partition Pruning](benefits-of-partition-pruning.md)** -+ **[Information that Can Be Pruned](information-that-can-be-used-for-partition-pruning.md)** -+ **[How to Judge that Parition Pruning Is Used](how-to-identify-whether-partition-pruning-has-been-used.md)** -+ **[Static Partition Pruning](static-partition-pruning.md)** -+ **[Dynamic Partition Pruning](dynamic-partition-pruning.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/static-partition-pruning.md b/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/static-partition-pruning.md deleted file mode 100644 index 393eb564..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/partition-pruning/static-partition-pruning.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Static Partition Pruning -summary: Static Partition Pruning -author: Guo Huan -date: 2022-06-14 ---- - -# Static Partition Pruning - -MogDB determines when static pruning is used primarily based on static predicates. - -If MogDB can identify which consecutive set of partitions was accessed at parsing time, the **Selected Partitions** column in the execution plan shows the start and end values of the partition being accessed. For any other partition pruning case (including dynamic partition pruning), MogDB displays **Selected Partitions**. - -Static partition pruning is available via the [EXPLAIN VERBOSE](../../../reference-guide/sql-syntax/EXPLAIN.md) statement to view the execution plan. In the following example, there are four partitions in the prune_tt01 table. The query plan shows that partitions 3 and 4 are scanned by SeqScan. Therefore, it is judged that partitions 1 and 2 have been cut out. - -```sql -MogDB=# \d+ prune_tt01 - Table "public.prune_tt01" - Column | Type | Modifiers | Storage | Stats target | Description ---------+---------+-----------+---------+--------------+------------- - a | integer | | plain | | - b | integer | | plain | | -Indexes: - "index_prune_tt01" btree (a) LOCAL TABLESPACE pg_default -Partition By RANGE(a) -Number of partitions: 4 (View pg_partition to check each partition range.) -Has OIDs: no -Options: orientation=row, compression=no -``` - -```sql -MogDB=# explain verbose select * from prune_tt01 where a>12; - QUERY PLAN ----------------------------------------------------------------------------------------------------- - Partition Iterator (cost=13.80..27.75 rows=716 width=8) - Output: a, b - Iterations: 2 - Selected Partitions: 3..4 - -> Partitioned Bitmap Heap Scan on public.prune_tt01 (cost=13.80..27.75 rows=716 width=8) - Output: a, b - Recheck Cond: (prune_tt01.a > 12) - -> Partitioned Bitmap Index Scan on index_prune_tt01 (cost=0.00..13.62 rows=716 width=0) - Index Cond: (prune_tt01.a > 12) -(9 rows) -``` - -
- -The detailed information for static partition pruning is shown in the following table. - -**Table 1** Detailed information for static partition pruning - -| No. | Constraint Name | Constraint Range | -| ---- | ------------------------------ | ------------------------------------------------------------ | -| 1 | Partition table type | Range partition, list partition, and hash partition | -| 2 | Partition expression type | - A partition constraint can be a single expression, such as `a >12`
- A partition constraint can be a BOOL expression, such as `a > 2 and a < 12`
- A partition constraint can be an array, such as `a in (2, 3)`
- A partition constraint can be a constant expression, such as `1 = 1`
- A partition constraint can be the `Is (NOT)NULL` expression, such as `a IS NULL` | -| 3 | Partition expression operator | - Range partition table supports five operators, including `=`, `>`, `>=`, `<`, and `<=`.
- List partition and hash partition supports only the `=` operator. | -| 4 | Partition expression parameter | One side is partition key, and the other side is constance, such as `a > 12`. | -| 5 | Second-level partition | Combination of range, list, and hash partition tables, such as Range-List partition table. | -| 6 | Partition pruning result | Explain verbose shows the pruned partition list. | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/recommendations-for-choosing-a-partitioning-strategy.md b/product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/recommendations-for-choosing-a-partitioning-strategy.md deleted file mode 100644 index 318ca816..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/recommendations-for-choosing-a-partitioning-strategy.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Recommendations For Choosing A Partitioning Strategy -summary: Recommendations For Choosing A Partitioning Strategy -author: Guo Huan -date: 2023-05-19 ---- - -# Recommendations For Choosing A Partitioning Strategy - -+ **[When to Use Range Partitioning](when-to-use-range-partitioning.md)** -+ **[When to Use List Partitions](when-to-use-list-partitioning.md)** -+ **[When to Use Hash Partitioning](when-to-use-hash-partitioning.md)** \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-hash-partitioning.md b/product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-hash-partitioning.md deleted file mode 100644 index f0ca3bb3..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-hash-partitioning.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: When to Use Hash Partitioning -summary: When to Use Hash Partitioning -author: Guo Huan -date: 2022-06-14 ---- - -# When to Use Hash Partitioning - -Hash partitioning is useful for randomly distributing data between partitions based on hash algorithms rather than grouping similar data. - -In some cases, it is not obvious which partition the partition data should be located in, although the partition key can be determined. In some cases, you don't want to group similar data like range partitioning, but want the distribution of data to be inconsistent with its business or logical view. Hash partitioning puts a row into a partition based on the result of passing the keyword to the hash algorithm. - -With this approach, data is distributed randomly across partitions rather than grouped. This is a good approach for some data, but it may not be appropriate for managing historical data. However, hash partitioning shares some of the same performance characteristics as range partitioning. For example, partitioning pruning is limited to equality predicates. You can also use partitioned joins, parallel index access, and parallel DML. - -The advantage of hash partitioning is that the distribution of data is almost random, so the distribution is relatively uniform, which can avoid hot spot problems to some extent. - -The disadvantages of hash partitioning are as follows: - -- You cannot perform a range query without storing additional data. -- Since each node requires a corresponding hash value when nodes are added or deleted, the addition of nodes requires modification of the hash function, which causes much of the existing data to be remapped, causing massive data movement. In the meantime, the system may not continue to work. - -Example 1 Four hash partitions are created for table **Sales_hash** using the s_productid column as the partitioning keyword. Parallel joins with the products table can take advantage of partial or full partitioned joins. At this time, partitioning pruning is conducive to the query that only accesses the sales data of a single product or part of the product. - -If you specify a hash partition number instead of an explicit partition name, MogDB automatically generates an internal name for the partition. - -**Example 1 Creating a hash-partitioned table** - -```sql -CREATE TABLE sales_hash - (s_productid NUMBER, - s_saledate DATE, - s_custid NUMBER, - s_totalprice NUMBER) -PARTITION BY HASH(s_productid) -( PARTITION p1 , - PARTITION p2, - PARTITION p3 , - PARTITION p4 -); -``` - -## Related Page - -[Partitioned Tables](../../../reference-guide/sql-reference/partition-table.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-list-partitioning.md b/product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-list-partitioning.md deleted file mode 100644 index ae87f6b3..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-list-partitioning.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: When to Use List Partitioning -summary: When to Use List Partitioning -author: Guo Huan -date: 2022-06-14 ---- - -# When to Use List Partitions - -List partitioning explicitly map rows to partitions based on discrete values. - -In example 1, account managers who analyze accounts by region can make effective use of partition pruning. - -**Example 1 Creating a list-partitioned table** - -```sql -CREATE TABLE accounts -( id NUMBER -, account_number NUMBER -, customer_id NUMBER -, branch_id NUMBER -, region VARCHAR(2) -, status VARCHAR2(1) -) -PARTITION BY LIST (region) -( PARTITION p_northwest VALUES ('OR', 'WA') -, PARTITION p_southwest VALUES ('AZ', 'UT', 'NM') -, PARTITION p_northeast VALUES ('NY', 'VM', 'NJ') -, PARTITION p_southeast VALUES ('FL', 'GA') -, PARTITION p_northcentral VALUES ('SD', 'WI') -, PARTITION p_southcentral VALUES ('OK', 'TX') -); -``` - -## Related Page - -[Partitioned Tables](../../../reference-guide/sql-reference/partition-table.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-range-partitioning.md b/product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-range-partitioning.md deleted file mode 100644 index 30a74cac..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/partition-management/recommendations-for-choosing-a-partitioning-strategy/when-to-use-range-partitioning.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: When to Use Range Partitioning -summary: When to Use Range Partitioning -author: Guo Huan -date: 2022-06-14 ---- - -# When to Use Range Partitioning - -Range partitioning is useful for organizing similar data, especially date and time data. - -Range partitioning is a convenient way to partition historical data. The boundaries of a range partition define the order of partitions in a table or index - -Most SQL statements that access range partitions focus on time ranges. For example, SQL statements select data from a specific time period. If each partition represents a month of data, the amount of data scanned is reduced to a fraction of the total. This optimization method is called partition pruning. - -Range partitions are also useful when you need to periodically load new data and clean up old data, because range partitions are easy to add or remove. For example, the system typically keeps a scrolling data window that keeps data online for the last 36 months. Range partitioning simplifies this process. - -In summary, you can consider using range partitioning when: - -- It is often necessary to perform range predicate scans on easily partitioned columns in large tables. -- You want to maintain a scrolling data window. -- Large tables cannot be managed within a specified time range, such as backup and restoration, but can be divided into smaller logical blocks based on partition range columns. - -**Example 1 Creating a range-partitioned table** - -```sql -CREATE TABLE sales_table -( - order_no INTEGER NOT NULL, - goods_name CHAR(20) NOT NULL, - sales_date DATE NOT NULL, - sales_volume INTEGER, - sales_store CHAR(20) -) -PARTITION BY RANGE(sales_date) -( - PARTITION season1 VALUES LESS THAN('2021-04-01 00:00:00'), - PARTITION season2 VALUES LESS THAN('2021-07-01 00:00:00'), - PARTITION season3 VALUES LESS THAN('2021-10-01 00:00:00'), - PARTITION season4 VALUES LESS THAN(MAXVALUE) -); -``` - -## Related Page - -[Partitioned Tables](../../../reference-guide/sql-reference/partition-table.md) \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-1-plpgsql-overview.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-1-plpgsql-overview.md deleted file mode 100644 index d3cc52ef..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-1-plpgsql-overview.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Overview of PL/pgSQL Functions -summary: Overview of PL/pgSQL Functions -author: Guo Huan -date: 2021-11-10 ---- - -# Overview of PL/pgSQL Functions - -PL/pgSQL is a loadable procedural language. - -The functions created using PL/pgSQL can be used in any place where you can use built-in functions. For example, you can create calculation functions with complex conditions and use them to define operators or use them for index expressions. - -SQL is used by most databases as a query language. It is portable and easy to learn. Each SQL statement must be executed independently by a database server. - -In this case, when a client application sends a query to the server, it must wait for it to be processed, receive and process the results, and then perform some calculation before sending more queries to the server. If the client and server are not on the same machine, all these operations will cause inter-process communication and increase network loads. - -PL/pgSQL enables a whole computing part and a series of queries to be grouped inside a database server. This makes procedural language available and SQL easier to use. In addition, the client/server communication cost is reduced. - -- Extra round-trip communication between clients and servers is eliminated. -- Intermediate results that are not required by clients do not need to be sorted or transmitted between the clients and servers. -- Parsing can be skipped in multiple rounds of queries. - -PL/pgSQL can use all data types, operators, and functions in SQL. There are some common functions, such as gs_extend_library. - -+ **[Data Types](1-2-data-types.md)** -+ **[Data Type Conversion](1-3-data-type-conversion.md)** -+ **[Arrays, Sets, and Record](1-4-arrays-and-records.md)** -+ **[DECLARE Syntax](1-5-declare-syntax.md)** -+ **[Basic Statements](1-6-basic-statements.md)** -+ **[Dynamic Statements](1-7-dynamic-statements.md)** -+ **[Control Statements](1-8-control-statements.md)** -+ **[Transaction Management](1-9-transaction-management.md)** -+ **[Other Statements](1-10-other-statements.md)** -+ **[Cursors](1-11-cursors.md)** -+ **[Advanced Packages](advanced-packages/advanced-packages.md)** -+ **[Retry Management](1-12-retry-management.md)** -+ **[Debugging](1-13-debugging.md)** -+ **[package](1-14-package.md)** diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-10-other-statements.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-10-other-statements.md deleted file mode 100644 index cc2a571d..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-10-other-statements.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Other Statements -summary: Other Statements -author: Guo Huan -date: 2021-03-04 ---- - -# Other Statements - -## Lock Operations - -MogDB provides multiple lock modes to control concurrent accesses to table data. These modes are used when Multi-Version Concurrency Control (MVCC) cannot give expected behaviors. Alike, most MogDB commands automatically apply appropriate locks to ensure that called tables are not deleted or modified in an incompatible manner during command execution. For example, when concurrent operations exist, **ALTER TABLE** cannot be executed on the same table. - -## Cursor Operations - -MogDB provides cursors as a data buffer for users to store execution results of SQL statements. Each cursor region has a name. Users can use SQL statements to obtain records one by one from cursors and grant the records to master variables, then being processed further by host languages. - -Cursor operations include cursor definition, open, fetch, and close operations. - -For the complete example of cursor operations, see [Explicit Cursor](1-11-cursors.md#explicit-cursor). diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-11-cursors.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-11-cursors.md deleted file mode 100644 index 04c8397e..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-11-cursors.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: Cursors -summary: Cursors -author: Guo Huan -date: 2021-03-04 ---- - -# Cursors - -## Overview - -To process SQL statements, the stored procedure process assigns a memory segment to store context association. Cursors are handles or pointers pointing to context regions. With cursors, stored procedures can control alterations in context regions. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> If JDBC is used to call a stored procedure whose returned value is a cursor, the returned cursor cannot be used. - -Cursors are classified into explicit cursors and implicit cursors. [Table 1](#Table 1) shows the usage conditions of explicit and implicit cursors for different SQL statements. - -**Table 1** Cursor usage conditions - -| SQL Statement | Cursor | -| :---------------------------------------- | :------------------- | -| Non-query statements | Implicit | -| Query statements with single-line results | Implicit or explicit | -| Query statements with multi-line results | Explicit | - -## Explicit Cursor - -An explicit cursor is used to process query statements, particularly when query results are multiple records. - -**Procedure** - -An explicit cursor performs the following six PL/SQL steps to process query statements: - -1. **Define a static cursor:** Define a cursor name and its corresponding **SELECT** statement. - - [Figure 1](#static_cursor_define) shows the syntax diagram for defining a static cursor. - - **Figure 1** static_cursor_define::= - - ![static_cursor_define](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/cursors-1.jpg) - - Parameter description: - - - **cursor_name**: defines a cursor name. - - - **parameter**: specifies cursor parameters. Only input parameters are allowed in the following format: - - ``` - parameter_name datatype - ``` - - - **select_statement**: specifies a query statement. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > The system automatically determines whether the cursor can be used for backward fetches based on the execution plan. - - **Define a dynamic cursor:** Define a **ref** cursor, which means that the cursor can be opened dynamically by a set of static SQL statements. First define the type of the **ref** cursor first and then the cursor variable of this cursor type. Dynamically bind a **SELECT** statement through **OPEN FOR** when the cursor is opened. - - [Figure 2](#cursor_typename) and [Figure 3](#dynamic_cursor_define) show the syntax diagrams for defining a dynamic cursor. - - **Figure 2** cursor_typename::= - - ![cursor_typename](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/cursors-2.png) - - **Figure 3** dynamic_cursor_define::= - - ![dynamic_cursor_define](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/cursors-3.png) - -2. **Open the static cursor:** Execute the **SELECT** statement corresponding to the cursor. The query result is placed in the workspace and the pointer directs to the head of the workspace to identify the cursor result set. If the cursor query statement carries the **FOR UPDATE** option, the **OPEN** statement locks the data row corresponding to the cursor result set in the database table. - - [Figure 4](#open_static_cursor) shows the syntax diagram for opening a static cursor. - - **Figure 4** open_static_cursor::= - - ![open_static_cursor](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/cursors-4.png) - - **Open the dynamic cursor:** Use the **OPEN FOR** statement to open the dynamic cursor and the SQL statement is dynamically bound. - - [Figure 5](#open_dynamic_cursor) shows the syntax diagrams for opening a dynamic cursor. - - **Figure 5** open_dynamic_cursor::= - - ![open_dynamic_cursor](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/cursors-5.png) - - A PL/SQL program cannot use the OPEN statement to repeatedly open a cursor. - -3. **Fetch cursor data**: Retrieve data rows in the result set and place them in specified output variables. - - [Figure 6](#fetch_cursor) shows the syntax diagrams for fetching cursor data. - - **Figure 6** fetch_cursor::= - - ![fetch_cursor](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/cursors-6.png) - -4. Process the record. - -5. Continue to process until the active set has no record. - -6. **Close the cursor**: When fetching and finishing the data in the cursor result set, close the cursor immediately to release system resources used by the cursor and invalidate the workspace of the cursor so that the **FETCH** statement cannot be used to fetch data any more. A closed cursor can be reopened by an OPEN statement. - - [Figure 7](#close_cursor) shows the syntax diagram for closing a cursor. - - **Figure 7** close_cursor::= - - ![close_cursor](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/cursors-7.jpg) - -**Attribute** - -Cursor attributes are used to control program procedures or know program status. When a DML statement is executed, the PL/SQL opens a built-in cursor and processes its result. A cursor is a memory segment for maintaining query results. It is opened when a DML statement is executed and closed when the execution is finished. An explicit cursor has the following attributes: - -- **%FOUND** attribute: returns **TRUE** if the last fetch returns a row. -- **%NOTFOUND** attribute: works opposite to the **%FOUND** attribute. -- **%ISOPEN** attribute: returns **TRUE** if the cursor has been opened. -- **%ROWCOUNT** attribute: returns the number of records fetched from the cursor. - -## Implicit Cursor - -Implicit cursors are automatically set by the system for non-query statements such as modify or delete operations, along with their workspace. Implicit cursors are named **SQL**, which is defined by the system. - -**Overview** - -Implicit cursor operations, such as definition, open, value-grant, and close operations, are automatically performed by the system and do not need users to process. Users can use only attributes related to implicit cursors to complete operations. In workspace of implicit cursors, the data of the latest SQL statement is stored and is not related to explicit cursors defined by users. - -Format call:**SQL%** - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - **INSERT**, **UPDATE**, **DELETE**, and **SELECT** statements do not need defined cursors. -> - In O-compatible mode, if the GUC parameter **behavior_compat_options** is set to **compat_cursor**, implicit cursors are valid across stored procedures. - -**Attributes** - -An implicit cursor has the following attributes: - -- **SQL%FOUND**: Boolean attribute, which returns **TRUE** if the last fetch returns a row. -- **SQL%NOTFOUND**: Boolean attribute, which works opposite to the **SQL%FOUND** attribute. -- **SQL%ROWCOUNT**: numeric attribute, which returns the number of records fetched from the cursor. -- **SQL%ISOPEN**: Boolean attribute, whose value is always **FALSE**. Close implicit cursors immediately after an SQL statement is run. - -**Examples** - -```sql --- Delete all employees in a department from the hr.staffs table. If the department has no employees, delete the department from the hr.sections table. -CREATE OR REPLACE PROCEDURE proc_cursor3() -AS - DECLARE - V_DEPTNO NUMBER(4) := 100; - BEGIN - DELETE FROM hr.staffs WHERE section_ID = V_DEPTNO; - -- Proceed based on cursor status. - IF SQL%NOTFOUND THEN - DELETE FROM hr.sections WHERE section_ID = V_DEPTNO; - END IF; - END; -/ - -CALL proc_cursor3(); - --- Delete the stored procedure and the temporary table. -DROP PROCEDURE proc_cursor3; -``` - -## Cursor Loop - -Use of cursors in WHILE and LOOP statements is called a cursor loop. Generally, OPEN, FETCH, and CLOSE statements are involved in this kind of loop. The following describes a loop that simplifies a cursor loop without the need for these operations. This kind of loop is applicable to a static cursor loop, without executing four steps about a static cursor. - -**Syntax** - -[Figure 8](#FOR_AS_loop) shows the syntax diagram of the **FOR AS** loop. - -**Figure 8** FOR_AS_loop::= - -![for_as_loop](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/cursors-8.png) - -**Precautions** - -- The **UPDATE** operation for the queried table is not allowed in the loop statement. -- The variable loop_name is automatically defined and is valid only in this loop. Its type is the same as that in the select_statement query result. The value of **loop_name** is the query result of **select_statement**. -- The **%FOUND**, **%NOTFOUND**, and **%ROWCOUNT** attributes access the same internal variable in MogDB. Transactions and the anonymous block do not support multiple cursor accesses at the same time. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-12-retry-management.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-12-retry-management.md deleted file mode 100644 index 4b9dfc06..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-12-retry-management.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Retry Management -summary: Retry Management -author: Guo Huan -date: 2021-03-04 ---- - -# Retry Management - -Retry is a process in which the database executes a SQL statement or stored procedure (including anonymous block) again in the case of execution failure, improving the execution success rate and user experience. The database checks the error code and retry configuration to determine whether to retry. - -- If the execution fails, the system rolls back the executed statements and executes the stored procedure again. - - Example: - - ```sql - MogDB=# CREATE OR REPLACE PROCEDURE retry_basic ( IN x INT) - AS - BEGIN - INSERT INTO t1 (a) VALUES (x); - INSERT INTO t1 (a) VALUES (x+1); - END; - / - - MogDB=# CALL retry_basic(1); - ``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-13-debugging.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-13-debugging.md deleted file mode 100644 index 30bffe0c..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-13-debugging.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Debugging -summary: Debugging -author: Guo Huan -date: 2021-03-04 ---- - -# Debugging - -## Syntax - -### RAISE - -RAISE has the following five syntax formats: - -**Figure 1** raise_format::= - -![raise_format](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/debugging-1.png) - -**Figure 2** raise_condition::= - -![raise_condition](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/debugging-2.png) - -**Figure 3** raise_sqlstate::= - -![raise_sqlstate](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/debugging-3.png) - -**Figure 4** raise_option::= - -![raise_option](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/debugging-4.png) - -**Figure 5** raise::= - -![raise](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/debugging-5.png) - -**Parameter description**: - -- The level option is used to specify the error level, that is, **DEBUG**, **LOG**, **INFO**, **NOTICE**, **WARNING**, or **EXCEPTION** (default). **EXCEPTION** throws an error that normally terminates the current transaction and the others only generate information at their levels. The [log_min_messages](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-time.md#log_min_messages) and [client_min_messages](../../reference-guide/guc-parameters/error-reporting-and-logging/logging-time.md#client_min_messages) parameters control whether the error messages of specific levels are reported to the client and are written to the server log. - -- **format**: specifies the error message text to be reported, a format string. The format string can be appended with an expression for insertion to the message text. In a format string, **%** is replaced by the parameter value attached to format and **%%** is used to print **%**. For example: - - ``` - --v_job_id replaces % in the string. - RAISE NOTICE 'Calling cs_create_job(%)',v_job_id; - ``` - -- **option = expression**: inserts additional information to an error report. The keyword option can be **MESSAGE**, **DETAIL**, **HINT**, or **ERRCODE**, and each expression can be any string. - - - **MESSAGE**: specifies the error message text. This option cannot be used in a **RAISE** statement that contains a format character string in front of **USING**. - - **DETAIL**: specifies detailed information of an error. - - **HINT**: prints hint information. - - **ERRCODE**: designates an error code (SQLSTATE) to a report. A condition name or a five-character SQLSTATE error code can be used. - -- **condition_name**: specifies the condition name corresponding to the error code. - -- **sqlstate**: specifies the error code. - -If neither a condition name nor an **SQLSTATE** is designated in a **RAISE EXCEPTION** command, the **RAISE EXCEPTION (P0001)** is used by default. If no message text is designated, the condition name or SQLSTATE is used as the message text by default. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - If the **SQLSTATE** designates an error code, the error code is not limited to a defined error code. It can be any error code containing five digits or ASCII uppercase rather than **00000**. Do not use an error code ended with three zeros because such error codes are category codes and can be captured by the whole category. -> - In O-compatible mode, SQLCODE is equivalent to SQLSTATE. -> -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - The syntax described in [Figure 5](#raise) does not append any parameter. This form is used only for the **EXCEPTION** statement in a **BEGIN** block so that the error can be re-processed. -> - For the condition name specified by ERRCODE and condition_name, see [Description of SQL Error Codes](../../reference-guide/error-code-reference/description-of-sql-error-codes/description-of-sql-error-codes.md). Only ERROR condition names are supported. - -### EXCEPTION_INIT - -In O-compatible mode, EXCEPTION_INIT can be used to define the SQLCODE error code. The syntax is as follows: - -**Figure 6** exception_init::= - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/debugging-6.png) - -**Parameter description**: - -- **exception_name** indicates the name of the exception declared by the user. The **EXCEPTION_INIT** syntax must follow the declared exception. -- **sqlcode** is a customized SQL code, which must be a negative integer ranging from –2147483647 to –1. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** When **EXCEPTION_INIT** is used to customize an SQL code, SQLSTATE is equivalent to SQLCODE, and SQLERRM is in the format of *xxx*: **non-GaussDB Exception**. For example, if the customized SQL code is **–1**, SQLSTATE is **–1** and SQLERRM is **1: non-GaussDB Exception**. - -**Example** - -Display error and hint information when a transaction terminates: - -```sql -CREATE OR REPLACE PROCEDURE proc_raise1(user_id in integer) -AS -BEGIN -RAISE EXCEPTION 'Noexistence ID --> %',user_id USING HINT = 'Please check your user ID'; -END; -/ - -call proc_raise1(300011); - --- Execution result: -ERROR: Noexistence ID --> 300011 -HINT: Please check your user ID -``` - -Two methods are available for setting **SQLSTATE**: - -```sql -CREATE OR REPLACE PROCEDURE proc_raise2(user_id in integer) -AS -BEGIN -RAISE 'Duplicate user ID: %',user_id USING ERRCODE = 'unique_violation'; -END; -/ - -\set VERBOSITY verbose -call proc_raise2(300011); - --- Execution result: -ERROR: Duplicate user ID: 300011 -SQLSTATE: 23505 -``` - -If the main parameter is a condition name or **SQLSTATE**, the following applies: - -RAISE division_by_zero; - -RAISE SQLSTATE '22012'; - -For example: - -```sql -CREATE OR REPLACE PROCEDURE division(div in integer, dividend in integer) -AS -DECLARE -res int; - BEGIN - IF dividend=0 THEN - RAISE division_by_zero; - RETURN; - ELSE - res := div/dividend; - RAISE INFO 'division result: %', res; - RETURN; - END IF; - END; -/ -call division(3,0); - --- Execution result: -ERROR: division_by_zero -``` - -Alternatively: - -``` -RAISE unique_violation USING MESSAGE = 'Duplicate user ID: ' || user_id; -``` - -In O-compatible mode, EXCEPTION_INIT can be used to customize error codes SQLCODE. - -``` -declare - deadlock_detected exception; - pragma exception_init(deadlock_detected, -1); -begin - if 1 > 0 then - raise deadlock_detected; - end if; -exception - when deadlock_detected then - raise notice 'sqlcode:%,sqlstate:%,sqlerrm:%',sqlcode,sqlstate,sqlerrm; -end; -/ --- Execution result: -NOTICE: sqlcode:-1,sqlstate:-1,sqlerrm: 1: non-GaussDB Exception -``` \ No newline at end of file diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-14-package.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-14-package.md deleted file mode 100644 index 56a00411..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-14-package.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Package -summary: Package -author: Guo Huan -date: 2022-04-27 ---- - -# Package - -A package is a combination of PL/SQL programs, such as stored procedures, functions, variables, constants, and cursors. It is object-oriented and can encapsulate PL/SQL program design elements. Functions in a package are created, deleted, and modified in a unified manner. - -A package contains two parts: package specifications and package body. The declaration contained in the package specifications can be accessed by external functions and anonymous blocks. The declaration contained in the package body cannot be accessed by external functions or anonymous blocks, but can be accessed only by functions and stored procedures in the package body. - -For details about how to create a package, see [CREATE PACKAGE](../../reference-guide/sql-syntax/CREATE-PACKAGE.md). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - Cross-package variables cannot be used as control variables in the for loops. -> - Types defined in a package cannot be deleted or modified, and cannot be used to define tables. -> - Cursor variables cannot be referenced in SCHEMA.PACKAGE.CUROSR mode. -> - A cursor with parameters can be opened only in the current package. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-2-data-types.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-2-data-types.md deleted file mode 100644 index e43bc6ea..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-2-data-types.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Data Types -summary: Data Types -author: Guo Huan -date: 2021-03-04 ---- - -# Data Types - -A data type refers to a value set and an operation set defined on the value set. The MogDB database consists of tables, each of which is defined by its own columns. Each column corresponds to a data type. The MogDB uses corresponding functions to perform operations on data based on data types. For example, the MogDB can perform addition, subtraction, multiplication, and division operations on data of numeric values. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-3-data-type-conversion.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-3-data-type-conversion.md deleted file mode 100644 index f37d0f9b..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-3-data-type-conversion.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Data Type Conversion -summary: Data Type Conversion -author: Guo Huan -date: 2021-03-04 ---- - -# Data Type Conversion - -Certain data types in the database support implicit data type conversions, such as assignments and parameters called by functions. For other data types, you can use the type conversion functions provided by MogDB, such as the **CAST** function, to forcibly convert them. - -MogDB lists common implicit data type conversions in [Table 1](#Implicit data type conversions). - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> The valid value range of **DATE** supported by MogDB is from 4713 B.C. to 294276 A.D. - -**Table 1** Implicit data type conversions - -| Raw Data Type | Target Data Type | **Remarks** | -| :------------ | :--------------- | :------------------------------------------- | -| CHAR | VARCHAR2 | - | -| CHAR | NUMBER | Raw data must consist of digits. | -| CHAR | DATE | Raw data cannot exceed the valid date range. | -| CHAR | RAW | - | -| CHAR | CLOB | - | -| VARCHAR2 | CHAR | - | -| VARCHAR2 | NUMBER | Raw data must consist of digits. | -| VARCHAR2 | DATE | Raw data cannot exceed the valid date range. | -| VARCHAR2 | CLOB | - | -| NUMBER | CHAR | - | -| NUMBER | VARCHAR2 | - | -| DATE | CHAR | - | -| DATE | VARCHAR2 | - | -| RAW | CHAR | - | -| RAW | VARCHAR2 | - | -| CLOB | CHAR | - | -| CLOB | VARCHAR2 | - | -| CLOB | NUMBER | Raw data must consist of digits. | -| INT4 | CHAR | - | -| INT4 | BOOLEAN | - | -| BOOLEAN | INT4 | - | diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-4-arrays-and-records.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-4-arrays-and-records.md deleted file mode 100644 index e580331f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-4-arrays-and-records.md +++ /dev/null @@ -1,813 +0,0 @@ ---- -title: Arrays, Sets, and Record -summary: Arrays, Sets, and Record -author: Guo Huan -date: 2021-03-04 ---- - -# Arrays, Sets, and Record - -## Arrays - -### Use of Array Types - -Before the use of arrays, an array type needs to be defined: - -Define an array type immediately after the **AS** keyword in a stored procedure. The method is as follows: - -``` -TYPE array_type IS VARRAY(size) OF data_type; -``` - -In the preceding information: - -- **array_type**: indicates the name of the array type to be defined. -- **VARRAY**: indicates the array type to be defined. -- **size**: indicates the maximum number of members in the array type to be defined. The value is a positive integer. -- **data_type**: indicates the types of members in the array type to be created. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - In MogDB, an array automatically increases. If an access violation occurs, a null value is returned, and no error message is reported. -> - The scope of an array type defined in a stored procedure takes effect only in this storage process. -> - It is recommended that you use one of the preceding methods to define an array type. If both methods are used to define the same array type, MogDB prefers the array type defined in a stored procedure to declare array variables. -> - **data_type** can also be a **record** type defined in a stored procedure (anonymous blocks are not supported), but cannot be an array or collection type defined in a stored procedure. - -MogDB supports access to array elements by using parentheses, and it also supports the **extend**, **count**, **first**, **last**, **prior**, **exists**, **trim**, **next**, and **delete** functions. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - If a stored procedure contains a DML statement (such as SELECT, UPDATE, INSERT, and DELETE), you are advised to use square brackets to access array elements. Using parentheses will access arrays by default. If no array exists, function expressions will be identified. -> - When the CLOB size is greater than 1 GB, the table of type, record type, and CLOB cannot be used in the input or output parameter, cursor, or raise info in a stored procedure. - -## Sets - -### Use of Set Types - -Before the use of sets, a set type needs to be defined. - -Define a set type immediately after the **AS** keyword in a stored procedure. The definition method is as follows: - -![syntax-of-the-record-type](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/arrays-and-records-2.png) - -In the preceding information: - -- **table_type**: indicates the name of the set type to be defined. -- **TABLE**: indicates the set type to be defined. -- **data_type**: indicates the types of members in the set to be created. -- **indexby_type**: indicates the type of the set index to be created. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - In MogDB, a set automatically increases. If an access violation occurs, a null value is returned, and no error message is reported. -> - The scope of a set type defined in a stored procedure takes effect only in this stored procedure. -> - The index can only be of the integer or varchar type. The length of the varchar type is not restricted. -> - **NOT NULL** has no function but only takes effect in the syntax. -> - **data_type** can also be the record type or set type defined in a stored procedure (anonymous blocks are not supported), but cannot be the array type. -> - Variables of the nested set type cannot be used across packages. -> - Variables of the **TABLE OF** **index by** type cannot be nested in a record as the input and output parameters of a stored procedure. -> - Variables of the **TABLE OF** **index by** type cannot be used as input and output parameters of functions. -> - The **RAISE INFO** command cannot be used to print the entire nested **TABLE OF** variable. -> - The **TABLE OF** variable cannot be transferred across autonomous transactions. -> - The input and output parameters of a stored procedure cannot be defined as the nested **TABLE OF** type. - -MogDB supports access to set elements by using parentheses, and it also supports the **extend**, **count**, **first**, **last**, **prior**, **next**, and **delete** functions. - -The set functions support **multiset union**, **intersect**, **except all**, and **distinct**. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** An expression can contain only one variable of the **TABLE OF** **index by** type. - -### Functions Supported by Sets - -#### Set Operators - -- = - - Parameter type: nest-table - - Return value: **true** or **false**, Boolean type - - Description: Checks whether two sets are of the same type. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1,2); - MogDB-# b nest := nest(1,2); - MogDB-# flag bool; - MogDB-# begin - MogDB$# flag := a = b; - MogDB$# raise info '%', flag; - MogDB$# end; - MogDB$# / - INFO: t - ANONYMOUS BLOCK EXECUTE - ``` - -- <> - - Parameter type: nest-table - - Return value: **true** or **false**, Boolean type - - Description: Checks whether the types of two sets are different. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1,2); - MogDB-# b nest := nest(1,2); - MogDB-# flag bool; - MogDB-# begin - MogDB$# flag := a <> b; - MogDB$# raise info '%', flag; - MogDB$# end; - MogDB$# / - INFO: f - ANONYMOUS BLOCK EXECUTE - ``` - -#### MULTISET - -- MULTISET UNION [ALL | DISTINCT] - - Parameter type: nest-table - - Return type: nest-table - - Description: Union of two set variables. **ALL** indicates that duplicate elements are not removed, and **DISTINCT** indicates that duplicate elements are removed. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1,2); - MogDB-# b nest := nest(2,3); - MogDB-# begin - MogDB$# a := a MULTISET UNION ALL b; - MogDB$# raise info '%', a; - MogDB$# end; - MogDB$# / - INFO: {1,2,2,3} - ANONYMOUS BLOCK EXECUTE - - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1,2); - MogDB-# b nest := nest(2,3); - MogDB-# begin - MogDB$# a := a MULTISET UNION DISTINCT b; - MogDB$# raise info '%', a; - MogDB$# end; - MogDB$# / - INFO: {1,2,3} - ANONYMOUS BLOCK EXECUTE - ``` - -- MULTISET EXCEPT [ALL | DISTINCT] - - Parameter type: nest-table - - Return type: nest-table - - Description: Difference of two set variables. Taking A MULTISET EXCEPT B as an example, **ALL** indicates that elements that are the same as those in B are removed from A. **DISTINCT** indicates that duplicate elements are removed from A first and then elements that are the same as those in B are removed from A. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1,2,2); - MogDB-# b nest := nest(2,3); - MogDB-# begin - MogDB$# a := a MULTISET EXCEPT ALL b; - MogDB$# raise info '%', a; - MogDB$# end; - MogDB$# / - INFO: {1,2} - ANONYMOUS BLOCK EXECUTE - - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1,2,2); - MogDB-# b nest := nest(2,3); - MogDB-# begin - MogDB$# a := a MULTISET EXCEPT DISTINCT b; - MogDB$# raise info '%', a; - MogDB$# end; - MogDB$# / - INFO: {1} - ANONYMOUS BLOCK EXECUTE - ``` - -- MULTISET INTERSECT [ALL | DISTINCT] - - Parameter type: nest-table - - Return type: nest-table - - Description: Intersection of two set variables. Taking A MULTISET INTERSECT B as an example, **ALL** indicates that all duplicate elements in A and B are obtained, and **DISTINCT** indicates that duplicate elements in A and B are obtained and then duplicate elements in this intersection are removed. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1,2,2); - MogDB-# b nest := nest(2,2,3); - MogDB-# begin - MogDB$# a := a MULTISET INTERSECT ALL b; - MogDB$# raise info '%', a; - MogDB$# end; - MogDB$# / - INFO: {2,2} - ANONYMOUS BLOCK EXECUTE - - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1,2,2); - MogDB-# b nest := nest(2,2,3); - MogDB-# begin - MogDB$# a := a MULTISET INTERSECT DISTINCT b; - MogDB$# raise info '%', a; - MogDB$# end; - MogDB$# / - INFO: {2} - ANONYMOUS BLOCK EXECUTE - ``` - -#### Set Types - -- exists(idx) - - Parameter: *idx* is of the int4 or varchar type. - - Return value: **true** or **false**, Boolean type - - Description: Checks whether a valid element exists in a specified position. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of varchar2; - MogDB-# a nest := nest('happy','?'); - MogDB-# flag bool; - MogDB-# begin - MogDB$# flag := a.exists(1); - MogDB$# raise info '%', flag; - MogDB$# flag := a.exists(10); - MogDB$# raise info '%', flag; - MogDB$# end; - MogDB$# / - INFO: t - INFO: f - ANONYMOUS BLOCK EXECUTE - - MogDB=# declare - MogDB-# type nest is table of varchar2 index by varchar2; - MogDB-# a nest; - MogDB-# flag bool; - MogDB-# begin - MogDB$# a('1') := 'Be'; - MogDB$# a('2') := 'happy'; - MogDB$# a('3') := '.'; - MogDB$# flag := a.exists('1'); - MogDB$# raise info '%', flag; - MogDB$# flag := a.exists('ddd'); - MogDB$# raise info '%', flag; - MogDB$# end; - MogDB$# / - INFO: t - INFO: f - ANONYMOUS BLOCK EXECUTE - ``` - -- extend[(e)] - - Parameters: *e* is of the int4 type. - - Return type: No value is returned. - - Description: Only the nest-table type is supported. One element is extended at the end of the nest-table variable. - - Restriction: extend() is not supported in nesting scenarios. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1); - MogDB-# begin - MogDB$# raise info '%', a; - MogDB$# a.extend; - MogDB$# raise info '%', a; - MogDB$# end; - MogDB$# / - INFO: {1} - INFO: {1,NULL} - ANONYMOUS BLOCK EXECUTE - - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1); - MogDB-# begin - MogDB$# raise info '%', a; - MogDB$# a.extend(2); - MogDB$# raise info '%', a; - MogDB$# end; - MogDB$# / - INFO: {1} - INFO: {1,NULL,NULL} - ANONYMOUS BLOCK EXECUTE - ``` - -- delete[(idx1[, idx2])] - - Parameters: *idx1* and *idx2* are of the int4 or varchar2 type. - - Return type: No value is returned. - - Description: Deletes all elements and releases corresponding storage space in a nest-table set (to use this set, **extend** must be executed again), or deletes all elements (including index set elements) in an index-by table set but does not release corresponding storage space. - - Restriction: delete() is not supported in nesting scenarios. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1,2,3,4,5); - MogDB-# begin - MogDB$# raise info '%', a; - MogDB$# a.delete; - MogDB$# raise info '%', a; - MogDB$# end; - MogDB$# / - INFO: {1,2,3,4,5} - INFO: {} - ANONYMOUS BLOCK EXECUTE - - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1,2,3,4,5); - MogDB-# begin - MogDB$# raise info '%', a; - MogDB$# a.delete(3); - MogDB$# raise info '%', a; - MogDB$# end; - MogDB$# / - INFO: {1,2,3,4,5} - INFO: {1,2,4,5} - ANONYMOUS BLOCK EXECUTE - - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# a nest := nest(1,2,3,4,5); - MogDB-# begin - MogDB$# raise info '%', a; - MogDB$# a.delete(2,4); - MogDB$# raise info '%', a; - MogDB$# end; - MogDB$# / - INFO: {1,2,3,4,5} - INFO: {1,5} - ANONYMOUS BLOCK EXECUTE - ``` - -- trim[(n)] - - Parameter: *n* is of the int4 type. - - Return type: No value is returned. - - Description: Deletes one or *n* elements and corresponding storage space from a nest-table set. Only the nest-table set type is supported. - - Restriction: trim() is not supported in nesting scenarios. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# aa nest:=nest(11,22,33,44,55); - MogDB-# begin - MogDB$# raise info 'aa:%' ,aa; - MogDB$# aa.trim; - MogDB$# raise info 'aa:%' ,aa; - MogDB$# aa.trim(2); - MogDB$# raise info 'aa:%' ,aa; - MogDB$# end; - MogDB$# / - INFO: aa:{11,22,33,44,55} - INFO: aa:{11,22,33,44} - INFO: aa:{11,22} - ANONYMOUS BLOCK EXECUTE - ``` - -- count - - Parameter: none - - Return type: int - - Description: Returns the number of valid elements in a set. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# aa nest:=nest(11,22,33,44,55); - MogDB-# begin - MogDB$# raise info 'count:%' ,aa.count; - MogDB$# end; - MogDB$# / - INFO: count:5 - ANONYMOUS BLOCK EXECUTE - - MogDB=# declare - MogDB-# type nest is table of int index by varchar; - MogDB-# aa nest; - MogDB-# begin - MogDB$# aa('aaa') := 111; - MogDB$# aa('bbb') := 222; - MogDB$# aa('ccc') := 333; - MogDB$# raise info 'count:%' ,aa.count; - MogDB$# end; - MogDB$# / - INFO: count:3 - ANONYMOUS BLOCK EXECUTE - ``` - -- first - - Parameter: none - - Return type: int or varchar - - Description: Returns the index of the first valid element in a set. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# aa nest:=nest(11,22,33,44,55); - MogDB-# begin - MogDB$# raise info 'first:%' ,aa.first; - MogDB$# end; - MogDB$# / - INFO: first:1 - ANONYMOUS BLOCK EXECUTE - - MogDB=# declare - MogDB-# type nest is table of int index by varchar; - MogDB-# aa nest; - MogDB-# begin - MogDB$# aa('aaa') := 111; - MogDB$# aa('bbb') := 222; - MogDB$# aa('ccc') := 333; - MogDB$# raise info 'first:%' ,aa.first; - MogDB$# end; - MogDB$# / - INFO: first:aaa - ANONYMOUS BLOCK EXECUTE - ``` - -- last - - Parameter: none - - Return type: int or varchar - - Description: Returns the index of the last valid element in a set. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# aa nest:=nest(11,22,33,44,55); - MogDB-# begin - MogDB$# raise info 'last:%' ,aa.last; - MogDB$# end; - MogDB$# / - INFO: last:5 - ANONYMOUS BLOCK EXECUTE - - MogDB=# declare - MogDB-# type nest is table of int index by varchar; - MogDB-# aa nest; - MogDB-# begin - MogDB$# aa('aaa') := 111; - MogDB$# aa('bbb') := 222; - MogDB$# aa('ccc') := 333; - MogDB$# raise info 'last:%' ,aa.last; - MogDB$# end; - MogDB$# / - INFO: last:ccc - ANONYMOUS BLOCK EXECUTE - ``` - -- prior(idx) - - Parameter: *idx* is of the int or varchar type. - - Return type: int or varchar - - Description: Returns the index of a valid element before the current index in a set. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# aa nest:=nest(11,22,33,44,55); - MogDB-# begin - MogDB$# raise info 'prior:%' ,aa.prior(3); - MogDB$# end; - MogDB$# / - INFO: prior:2 - ANONYMOUS BLOCK EXECUTE - - MogDB=# declare - MogDB-# type nest is table of int index by varchar; - MogDB-# aa nest; - MogDB-# begin - MogDB$# aa('aaa') := 111; - MogDB$# aa('bbb') := 222; - MogDB$# aa('ccc') := 333; - MogDB$# raise info 'prior:%' ,aa.prior('bbb'); - MogDB$# end; - MogDB$# / - INFO: prior:aaa - ANONYMOUS BLOCK EXECUTE - ``` - -- next(idx) - - Parameter: *idx* is of the int or varchar type. - - Return type: int or varchar - - Description: Returns the index of a valid element following the current index in a set. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# aa nest:=nest(11,22,33,44,55); - MogDB-# begin - MogDB$# raise info 'next:%' ,aa.next(3); - MogDB$# end; - MogDB$# / - INFO: next:4 - ANONYMOUS BLOCK EXECUTE - - MogDB=# declare - MogDB-# type nest is table of int index by varchar; - MogDB-# aa nest; - MogDB-# begin - MogDB$# aa('aaa') := 111; - MogDB$# aa('bbb') := 222; - MogDB$# aa('ccc') := 333; - MogDB$# raise info 'next:%' ,aa.next('bbb'); - MogDB$# end; - MogDB$# / - INFO: next:ccc - ANONYMOUS BLOCK EXECUTE - ``` - -- limit - - Parameter: none - - Return value: null - - Description: Returns the maximum number of elements that can be stored in a nest-table set. This function applies only to the array type. The return value is null. - - Example: - - ```sql - MogDB=# declare - MogDB-# type nest is table of int; - MogDB-# aa nest:=nest(11,22,33,44,55); - MogDB-# begin - MogDB$# raise info 'limit:%' ,aa.limit; - MogDB$# end; - MogDB$# / - INFO: limit: - ANONYMOUS BLOCK EXECUTE - ``` - -#### Set-related Functions - -- unnest_table(anynesttable) - - Description: Returns a set of elements in a nest-table. - - Return type: setof anyelement - - Restriction: The tableof type cannot be nested with the tableof type, or the tableof type cannot be nested with other types and then the tableof type. - - Example: - - ```sql - create or replace procedure f1() - as - type t1 is table of int; - v2 t1 := t1(null, 2, 3, 4, null); - tmp int; - cursor c1 is select * from unnest_table(v2); - begin - open c1; - for i in 1 .. v2.count loop - fetch c1 into tmp; - if tmp is null then - dbe_output.print_line(i || ': is null'); - else - dbe_output.print_line(i || ': ' || tmp); - end if; - end loop; - close c1; - end; - / - - MogDB=# call f1(); - 1: is null - 2: 2 - 3: 3 - 4: 4 - 5: is null - f1 - ---- - - (1 row) - ``` - -- unnest_table(anyindexbytable) - - Description: Returns the set of elements in an index-by table sorted by index. - - Return type: setof anyelement - - Restriction: The tableof type cannot be nested with the tableof type, or the tableof type cannot be nested with other types and then the tableof type. Only the index by int type is supported. The index by varchar type is not supported. - - Example: - - ```sql - create or replace procedure f1() - as - type t1 is table of int index by int; - v2 t1 := t1(1=>1, -10=>(-10), 6=>6, 4=>null); - tmp int; - cursor c1 is select * from unnest_table(v2); - begin - open c1; - for i in 1 .. v2.count loop - fetch c1 into tmp; - if tmp is null then - dbe_output.print_line(i || ': is null'); - else - dbe_output.print_line(i || ': ' || tmp); - end if; - end loop; - close c1; - end; - / - - MogDB=# call f1(); - 1: -10 - 2: 1 - 3: is null - 4: 6 - f1 - ---- - - (1 row) - ``` - -## record - -**record Variables** - -Perform the following operations to create a record variable: - -Define a record type and use this type to declare a variable. - -**Syntax** - -For the syntax of the record type, see [Figure 1](#Syntax of the record type). - -**Figure 1** Syntax of the record type - -![syntax-of-the-record-type](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/arrays-and-records-1.png) - -The above syntax diagram is explained as follows: - -- **record_type**: record name -- **field**: record columns -- **datatype**: record data type -- **expression**: expression for setting a default value - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> In MogDB: -> -> - When assigning values to record variables, you can: -> - Declare a record type and define member variables of this type when you declare a function or stored procedure. -> - Assign the value of a record variable to another record variable. -> - Use **SELECT INTO** or **FETCH** to assign values to a record type. -> - Assign the **NULL** value to a record variable. -> - The **INSERT** and **UPDATE** statements cannot use a record variable to insert or update data. -> - Just like a variable, a record column of the compound type does not have a default value in the declaration. -> - **date_type** can also be the **record** type, array type, and collection type defined in the stored procedure (anonymous blocks are not supported). - -**Example** - -```sql -The table used in the following example is defined as follows: -MogDB=# \d emp_rec - Table "public.emp_rec" - Column | Type | Modifiers -----------+--------------------------------+----------- - empno | numeric(4,0) | not null - ename | character varying(10) | - job | character varying(9) | - mgr | numeric(4,0) | - hiredate | timestamp(0) without time zone | - sal | numeric(7,2) | - comm | numeric(7,2) | - deptno | numeric(2,0) | - --- Perform array operations in the function. -MogDB=# CREATE OR REPLACE FUNCTION regress_record(p_w VARCHAR2) -RETURNS -VARCHAR2 AS $$ -DECLARE - - -- Declare a record type. - type rec_type is record (name varchar2(100), epno int); - employer rec_type; - - -- Use %type to declare the record type. - type rec_type1 is record (name emp_rec.ename%type, epno int not null :=10); - employer1 rec_type1; - - -- Declare a record type with a default value. - type rec_type2 is record ( - name varchar2 not null := 'SCOTT', - epno int not null :=10); - employer2 rec_type2; - CURSOR C1 IS select ename,empno from emp_rec order by 1 limit 1; - -BEGIN - -- Assign a value to a member record variable. - employer.name := 'WARD'; - employer.epno = 18; - raise info 'employer name: % , epno:%', employer.name, employer.epno; - - -- Assign the value of a record variable to another variable. - employer1 := employer; - raise info 'employer1 name: % , epno: %',employer1.name, employer1.epno; - - -- Assign the NULL value to a record variable. - employer1 := NULL; - raise info 'employer1 name: % , epno: %',employer1.name, employer1.epno; - - -- Obtain the default value of a record variable. - raise info 'employer2 name: % ,epno: %', employer2.name, employer2.epno; - - -- Use a record variable in the FOR loop. - for employer in select ename,empno from emp_rec order by 1 limit 1 - loop - raise info 'employer name: % , epno: %', employer.name, employer.epno; - end loop; - - -- Use a record variable in the SELECT INTO statement. - select ename,empno into employer2 from emp_rec order by 1 limit 1; - raise info 'employer name: % , epno: %', employer2.name, employer2.epno; - - -- Use a record variable in a cursor. - OPEN C1; - FETCH C1 INTO employer2; - raise info 'employer name: % , epno: %', employer2.name, employer2.epno; - CLOSE C1; - RETURN employer.name; -END; -$$ -LANGUAGE plpgsql; - --- Invoke the function. -MogDB=# CALL regress_record('abc'); - --- Delete the function. -MogDB=# DROP FUNCTION regress_record; -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-5-declare-syntax.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-5-declare-syntax.md deleted file mode 100644 index 0ae3fb79..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-5-declare-syntax.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: DECLARE Syntax -summary: DECLARE Syntax -author: Guo Huan -date: 2021-03-04 ---- - -# DECLARE Syntax - -## Basic Structure - -**Structure** - -A PL/SQL block can contain a sub-block which can be placed in any section. The following describes the architecture of a PL/SQL block: - -- **DECLARE**: declares variables, types, cursors, and regional stored procedures and functions used in the PL/SQL block. - - ```sql - DECLARE - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** - > This part is optional if no variable needs to be declared. - > - > - An anonymous block may omit the **DECLARE** keyword if no variable needs to be declared. - > - For a stored procedure, **AS** is used, which is equivalent to **DECLARE**. The **AS** keyword must be reserved even if there is no variable declaration part. - -- **EXECUTION**: specifies procedure and SQL statements. It is the main part of a program. Mandatory. - - ```sql - BEGIN - ``` - -- Exception part: processes errors. Optional. - - ```sql - EXCEPTION - ``` - -- End - - ``` - END; - / - ``` - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > You are not allowed to use consecutive tabs in the PL/SQL block because they may result in an exception when the **gsql** tool is executed with the **-r** parameter specified. - -**Category** - -PL/SQL blocks are classified into the following types: - -- Anonymous block: a dynamic block that can be executed only for once. For details about the syntax, see [Figure 1](#anonymous_block::=). -- Subprogram: a stored procedure, function, operator, or packages stored in a database. A subprogram created in a database can be called by other programs. - -## Anonymous Blocks - -An anonymous block applies to a script infrequently executed or a one-off activity. An anonymous block is executed in a session and is not stored. - -**Syntax** - -[Figure 1](#anonymous_block::=) shows the syntax diagrams for an anonymous block. - -**Figure 1** anonymous_block::= - -![anonymous_block](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/declare-syntax-1.png) - -Details about the syntax diagram are as follows: - -- The execute part of an anonymous block starts with a **BEGIN** statement, has a break with an **END** statement, and ends with a semicolon (;). Type a slash (/) and press **Enter** to execute the statement. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > The terminator "/" must be written in an independent row. - -- The declaration section includes the variable definition, type, and cursor definition. - -- A simplest anonymous block does not execute any commands. At least one statement, even a **NULL** statement, must be presented in any implementation blocks. - -## Subprogram - -A subprogram stores stored procedures, functions, operators, and advanced packages. A subprogram created in a database can be called by other programs. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-6-basic-statements.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-6-basic-statements.md deleted file mode 100644 index 5e882997..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-6-basic-statements.md +++ /dev/null @@ -1,215 +0,0 @@ ---- -title: Basic Statements -summary: Basic Statements -author: Guo Huan -date: 2021-03-04 ---- - -# Basic Statements - -During PL/SQL programming, you may define some variables, assign values to variables, and call other stored procedures. This chapter describes basic PL/SQL statements, including variable definition statements, value assignment statements, call statements, and return statements. - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> You are advised not to call the SQL statements containing passwords in the stored procedures because authorized users may view the stored procedure file in the database and password information is leaked. If a stored procedure contains other sensitive information, permission to access this procedure must be configured, preventing information leakage. - -## Variable Definition Statements - -This section describes the declaration of variables in the PL/SQL and the scope of this variable in codes. - -**Variable Declaration** - -For details about the variable declaration syntax, see [Figure 1](#declare_variable::=). - -**Figure 1** declare_variable::= - -![declare_variable](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/basic-statements-1.png) - -The above syntax diagram is explained as follows: - -- **variable_name** indicates the name of a variable. -- **type** indicates the type of a variable. -- **value** indicates the initial value of the variable. (If the initial value is not given, NULL is taken as the initial value.) **value** can also be an expression. - -**Examples** - -```sql -MogDB=# DECLARE - emp_id INTEGER := 7788; -- Define a variable and assign a value to it. -BEGIN - emp_id := 5*7784; -- Assign a value to the variable. -END; -/ -``` - -In addition to the declaration of basic variable types, **%TYPE** and **%ROWTYPE** can be used to declare variables related to table columns or table structures. - -**%TYPE Attribute** - -**%TYPE** declares a variable to be of the same data type as a previously declared variable (for example, a column in a table). For example, if you want to define a *my_name* variable whose data type is the same as the data type of the **firstname** column in the **employee** table, you can define the variable as follows: - -``` -my_name employee.firstname%TYPE -``` - -In this way, you can declare *my_name* without the need of knowing the data type of **firstname** in **employee**, and the data type of **my_name** can be automatically updated when the data type of **firstname** changes. - -``` -TYPE employee_record is record (id INTEGER, firstname VARCHAR2(20)); -my_employee employee_record; -my_id my_employee.id%TYPE; -my_id_copy my_id%TYPE; -``` - -**%ROWTYPE Attribute** - -**%ROWTYPE** declares data types of a set of data. It stores a row of table data or results fetched from a cursor. For example, if you want to define a set of data with the same column names and column data types as the **employee** table, you can define the data as follows: - -``` -my_employee employee%ROWTYPE -``` - -The attribute can also be used on the cursor. The column names and column data types of this set of data are the same as those of the **employee** table. For the cursor in a package, **%ROWTYPE** can be omitted. **%TYPE** can also reference the type of a column in the cursor. You can define the data as follows: - -``` -cursor cur is select * from employee; -my_employee cur%ROWTYPE -my_name cur.firstname%TYPE -my_employee2 cur -- For the cursor defined in a package, %ROWTYPE can be omitted. -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - **%TYPE** cannot reference the type of a composite variable or a record variable, a column type of the record type, a column type of a variable of the cross-package composite type, or a column type of a cursor variable of the cross-package type. -> - **%ROWTYPE** cannot reference the type of a composite variable or a record variable and the type of a cross-package cursor. - -**Scope of a Variable** - -The scope of a variable indicates the accessibility and availability of the variable in code block. In other words, a variable takes effect only within its scope. - -- To define a function scope, a variable must declare and create a **BEGIN-END** block in the declaration section. The necessity of such declaration is also determined by block structure, which requires that a variable has different scopes and lifetime during a process. -- A variable can be defined multiple times in different scopes, and inner definition can cover outer one. -- A variable defined in an outer block can also be used in a nested block. However, the outer block cannot access variables in the nested block. - -## Assignment Statements - -**Syntax** - -[Figure 2](#assignment_value) shows the syntax diagram for assigning a value to a variable. - -**Figure 2** assignment_value::= - -![assignment_value](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/basic-statements-2.png) - -The above syntax diagram is explained as follows: - -- **variable_name** indicates the name of a variable. -- **value** can be a value or an expression. The type of **value** must be compatible with the type of **variable_name**. - -**Example** - -```sql -MogDB=# DECLARE - emp_id INTEGER := 7788; --Assignment -BEGIN - emp_id := 5; --Assignment - emp_id := 5*7784; -END; -/ -``` - -**Nested Value Assignment** - -[Figure 3](#nested_assignment_value) shows the syntax diagram for assigning a nested value to a variable. - -**Figure 3** nested_assignment_value::= - -![nested_assignment_value](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/basic-statements-3.png) - -The syntax in Figure 3 is described as follows: - -- **variable_name**: variable name -- **col_name**: column name -- **subscript**: subscript, which is used for an array variable. The value can be a value or an expression and must be of the int type. -- **value**: value or expression. The type of **value** must be compatible with the type of **variable_name**. - -**Example** - -```sql -MogDB=# CREATE TYPE o1 as (a int, b int); -MogDB=# DECLARE - TYPE r1 is VARRAY(10) of o1; - emp_id r1; -BEGIN - emp_id(1).a := 5;-- Assign a value. - emp_id(1).b := 5*7784; -END; -/ -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> -> - In INTO mode, values can be assigned only to the columns at the first layer. Two-dimensional or above arrays are not supported. -> - When a nested column value is referenced, if an array subscript exists, only one parenthesis can exist in the first three layers of columns. You are advised to use square brackets to reference the subscript. - -## INTO/BULK COLLECT INTO - -**INTO** and **BULK COLLECT INTO** store values returned by statements in a stored procedure to variables. **BULK COLLECT INTO** allows some or all returned values to be temporarily stored in an array. - -**Example** - -``` -MogDB=# DECLARE - my_id integer; -BEGIN - select id into my_id from customers limit 1; -- Assign a value. -END; -/ - -MogDB=# DECLARE - type id_list is varray(6) of customers.id%type; - id_arr id_list; -BEGIN - select id bulk collect into id_arr from customers order by id DESC limit 20; -- Assign values in batches. -END; -/ -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** **BULK COLLECT INTO** can only assign values to arrays in batches. Use **LIMIT** properly to prevent performance deterioration caused by excessive operations on data. - -## Call Statement - -**Syntax** - -[Figure 4](#call_clause) shows the syntax diagram for calling a clause. - -**Figure 4** call_clause::= - -![call_clause](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/basic-statements-4.png) - -The above syntax diagram is explained as follows: - -- **procedure_name** specifies the name of a stored procedure. -- **parameter** specifies the parameters for the stored procedure. You can set no parameter or multiple parameters. - -**Example** - -```sql --- Create the stored procedure proc_staffs: -MogDB=# CREATE OR REPLACE PROCEDURE proc_staffs -( -section NUMBER(6), -salary_sum out NUMBER(8,2), -staffs_count out INTEGER -) -IS -BEGIN -SELECT sum(salary), count(*) INTO salary_sum, staffs_count FROM hr.staffs where section_id = section; -END; -/ - --- Invoke a stored procedure proc_return: -MogDB=# CALL proc_staffs(2,8,6); - --- Delete a stored procedure: -MogDB=# DROP PROCEDURE proc_staffs; -``` diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-7-dynamic-statements.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-7-dynamic-statements.md deleted file mode 100644 index 5e116cca..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-7-dynamic-statements.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: Dynamic Statements -summary: Dynamic Statements -author: Guo Huan -date: 2021-03-04 ---- - -# Dynamic Statements - -## Executing Dynamic Query Statements - -You can perform dynamic queries MogDB provides two modes: EXECUTE IMMEDIATE and OPEN FOR. **EXECUTE IMMEDIATE** dynamically executes **SELECT** statements and **OPEN FOR** combines use of cursors. If you need to store query results in a data set, use **OPEN FOR**. - -**EXECUTE IMMEDIATE** - -[Figure 1](#EXECUTE IMMEDIATE) shows the syntax diagram. - -**Figure 1** EXECUTE IMMEDIATE dynamic_select_clause::= - -![execute-immediate-dynamic_select_clause](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/dynamic-statements-1.png) - -[Figure 2](#using_clause) shows the syntax diagram for **using_clause**. - -**Figure 2** using_clause::= - -![using_clause](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/dynamic-statements-2.png) - -The above syntax diagram is explained as follows: - -- **define_variable**: specifies variables to store single-line query results. - -- **USING IN bind_argument**: specifies where the variable passed to the dynamic SQL value is stored, that is, in the dynamic placeholder of **dynamic_select_string**. - -- **USING OUT bind_argument**: specifies where the dynamic SQL returns the value of the variable. - - > ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** - > - > - In query statements, **INTO** and **OUT** cannot coexist. - > - A placeholder name starts with a colon (:) followed by digits, characters, or strings, corresponding to **bind_argument** in the **USING** clause. - > - **bind_argument** can only be a value, variable, or expression. It cannot be a database object such as a table name, column name, and data type. That is, **bind_argument** cannot be used to transfer schema objects for dynamic SQL statements. If a stored procedure needs to transfer database objects through **bind_argument** to construct dynamic SQL statements (generally, DDL statements), you are advised to use double vertical bars (||) to concatenate **dynamic_select_clause** with a database object. - > - A dynamic PL/SQL block allows duplicate placeholders. That is, a placeholder can correspond to only one **bind_argument** in the **USING** clause. - -**OPEN FOR** - -Dynamic query statements can be executed by using **OPEN FOR** to open dynamic cursors. - -[Figure 3](#open_for) shows the syntax diagram. - -**Figure 3** open_for::= - -![open_for](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/dynamic-statements-3.png) - -Parameter description: - -- **cursor_name**: specifies the name of the cursor to be opened. -- **dynamic_string**: specifies the dynamic query statement. -- **USING value**: applies when a placeholder exists in dynamic_string. - -For use of cursors, see [Cursors](1-11-cursors.md). - -## Executing Dynamic Non-query Statements - -**Syntax** - -[Figure 4](#noselect) shows the syntax diagram. - -**Figure 4** noselect::= - -![noselect](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/dynamic-statements-4.png) - -[Figure 5](#using_clause::=) shows the syntax diagram for **using_clause**. - -**Figure 5** using_clause::= - -![using_clause-0](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/dynamic-statements-5.png) - -The above syntax diagram is explained as follows: - -**USING IN bind_argument** is used to specify the variable whose value is passed to the dynamic SQL statement. The variable is used when a placeholder exists in **dynamic_noselect_string**. That is, a placeholder is replaced by the corresponding **bind_argument** when a dynamic SQL statement is executed. Note that **bind_argument** can only be a value, variable, or expression, and cannot be a database object such as a table name, column name, and data type. If a stored procedure needs to transfer database objects through **bind_argument** to construct dynamic SQL statements (generally, DDL statements), you are advised to use double vertical bars (||) to concatenate **dynamic_select_clause** with a database object. In addition, a dynamic PL/SQL block allows duplicate placeholders. That is, a placeholder can correspond to only one **bind_argument**. - -**Example** - -```sql --- Create a table: -MogDB=# CREATE TABLE sections_t1 -( - section NUMBER(4) , - section_name VARCHAR2(30), - manager_id NUMBER(6), - place_id NUMBER(4) -); - --- Declare a variable: -MogDB=# DECLARE - section NUMBER(4) := 280; - section_name VARCHAR2(30) := 'Info support'; - manager_id NUMBER(6) := 103; - place_id NUMBER(4) := 1400; - new_colname VARCHAR2(10) := 'sec_name'; -BEGIN --- Execute the query: - EXECUTE IMMEDIATE 'insert into sections_t1 values(:1, :2, :3, :4)' - USING section, section_name, manager_id,place_id; --- Execute the query (duplicate placeholders): - EXECUTE IMMEDIATE 'insert into sections_t1 values(:1, :2, :3, :1)' - USING section, section_name, manager_id; --- Run the ALTER statement. (You are advised to use double vertical bars (||) to concatenate the dynamic DDL statement with a database object.) - EXECUTE IMMEDIATE 'alter table sections_t1 rename section_name to ' || new_colname; -END; -/ - --- Query data: -MogDB=# SELECT * FROM sections_t1; - ---Delete the table. -MogDB=# DROP TABLE sections_t1; -``` - -## Dynamically Calling Stored Procedures - -This section describes how to dynamically call store procedures. You must use anonymous statement blocks to package stored procedures or statement blocks and append **IN** and **OUT** behind the **EXECUTE IMMEDIATE…USING** statement to input and output parameters. - -**Syntax** - -[Figure 6](#call_procedure) shows the syntax diagram. - -**Figure 6** call_procedure::= - -![call_procedure](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/dynamic-statements-6.png) - -[Figure 7](#Figure 2) shows the syntax diagram for **using_clause**. - -**Figure 7** using_clause::= - -![using_clause-1](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/dynamic-statements-7.png) - -The above syntax diagram is explained as follows: - -- **CALL procedure_name**: calls the stored procedure. -- **[:placeholder1,:placeholder2,…]**: specifies the placeholder list of the stored procedure parameters. The numbers of the placeholders and parameters are the same. -- **USING [IN|OUT|IN OUT]bind_argument**: specifies where the variable passed to the stored procedure parameter value is stored. The modifiers in front of **bind_argument** and of the corresponding parameter are the same. - -## Dynamically Calling Anonymous Blocks - -This section describes how to execute anonymous blocks in dynamic statements. Append **IN** and **OUT** behind the **EXECUTE IMMEDIATE…USING** statement to input and output parameters. - -**Syntax** - -[Figure 8](#call_anonymous_block) shows the syntax diagram. - -**Figure 8** call_anonymous_block::= - -![call_anonymous_block](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/dynamic-statements-8.png) - -[Figure 9](#Figure 2using_clause) shows the syntax diagram for **using_clause**. - -**Figure 9** using_clause::= - -![using_clause-2](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/dynamic-statements-9.png) - -The above syntax diagram is explained as follows: - -- The execute part of an anonymous block starts with a **BEGIN** statement, has a break with an **END** statement, and ends with a semicolon (;). -- **USING [IN|OUT|IN OUT]bind_argument**: specifies where the variable passed to the stored procedure parameter value is stored. The modifiers in front of **bind_argument** and of the corresponding parameter are the same. -- The input and output parameters in the middle of an anonymous block are designated by placeholders. The numbers of the placeholders and parameters are the same. The sequences of the parameters corresponding to the placeholders and the USING parameters are the same. -- Currently in MogDB, when dynamic statements call anonymous blocks, placeholders cannot be used to pass input and output parameters in an **EXCEPTION** statement. diff --git a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-8-control-statements.md b/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-8-control-statements.md deleted file mode 100644 index a85d662f..00000000 --- a/product/en/docs-mogdb/v5.2/developer-guide/plpgsql/1-8-control-statements.md +++ /dev/null @@ -1,690 +0,0 @@ ---- -title: Control Statements -summary: Control Statements -author: Guo Huan -date: 2021-03-04 ---- - -# Control Statements - -## RETURN Statements - -In MogDB, data can be returned in either of the following ways:**RETURN**, **RETURN NEXT**, or **RETURN QUERY**. **RETURN NEXT** and **RETURN QUERY** are used only for functions and cannot be used for stored procedures. - -### RETURN - -**Syntax** - -[Figure 1](#return_clause::=) shows the syntax diagram for a return statement. - -**Figure 1** return_clause::= - -![return_clause](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-1.jpg) - -The above syntax diagram is explained as follows: - -This statement returns control from a stored procedure or function to a caller. - -**Examples** - -See [Example](1-6-basic-statements.md#call-statement) for call statement examples. - -### RETURN NEXT and RETURN QUERY - -**Syntax** - -When creating a function, specify **SETOF datatype** for the return values. - -return_next_clause::= - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-2.png) - -return_query_clause::= - -![img](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-3.png) - -The above syntax diagram is explained as follows: - -If a function needs to return a result set, use **RETURN NEXT** or **RETURN QUERY** to add results to the result set, and then continue to execute the next statement of the function. As the **RETURN NEXT** or **RETURN QUERY** statement is executed repeatedly, more and more results will be added to the result set. After the function is executed, all results are returned. - -**RETURN NEXT** can be used for scalar and compound data types. - -**RETURN QUERY** has a variant **RETURN QUERY EXECUTE**. You can add dynamic queries and add parameters to the queries by **USING**. - -**Examples** - -```sql -MogDB=# CREATE TABLE t1(a int); -MogDB=# INSERT INTO t1 VALUES(1),(10); - ---RETURN NEXT -MogDB=# CREATE OR REPLACE FUNCTION fun_for_return_next() RETURNS SETOF t1 AS $$ -DECLARE - r t1%ROWTYPE; -BEGIN - FOR r IN select * from t1 - LOOP - RETURN NEXT r; - END LOOP; - RETURN; -END; -$$ LANGUAGE PLPGSQL; -MogDB=# call fun_for_return_next(); - a ---- - 1 - 10 -(2 rows) - --- RETURN QUERY -MogDB=# CREATE OR REPLACE FUNCTION fun_for_return_query() RETURNS SETOF t1 AS $$ -DECLARE - r t1%ROWTYPE; -BEGIN - RETURN QUERY select * from t1; -END; -$$ -language plpgsql; -MogDB=# call fun_for_return_query(); - a ---- - 1 - 10 -(2 rows) -``` - -## Conditional Statements - -Conditional statements are used to decide whether given conditions are met. Operations are executed based on the decisions made. - -MogDB supports five usages of **IF**: - -- IF_THEN - - **Figure 2** IF_THEN::= - - ![if_then](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-4.jpg) - - **IF_THEN** is the simplest form of **IF**. If the condition is true, statements are executed. If it is false, they are skipped. - - Example: - - ```sql - MogDB=# IF v_user_id <> 0 THEN - UPDATE users SET email = v_email WHERE user_id = v_user_id; - END IF; - ``` - -- IF_THEN_ELSE - - **Figure 3** IF_THEN_ELSE::= - - ![if_then_else](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-5.jpg) - - **IF-THEN-ELSE** statements add **ELSE** branches and can be executed if the condition is false. - - Example: - - ```sql - MogDB=# IF parentid IS NULL OR parentid = '' - THEN - RETURN; - ELSE - hp_true_filename(parentid); -- Call the stored procedure. - END IF; - ``` - -- IF_THEN_ELSE IF - - **IF** statements can be nested in the following way: - - ```sql - MogDB=# IF sex = 'm' THEN - pretty_sex := 'man'; - ELSE - IF sex = 'f' THEN - pretty_sex := 'woman'; - END IF; - END IF; - ``` - - Actually, this is a way of an **IF** statement nesting in the **ELSE** part of another **IF** statement. Therefore, an **END IF** statement is required for each nesting **IF** statement and another **END IF** statement is required to end the parent **IF-ELSE** statement. To set multiple options, use the following form: - -- IF_THEN_ELSIF_ELSE - - **Figure 4** IF_THEN_ELSIF_ELSE::= - - ![if_then_elsif_else](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-6.png) - - Example: - - ```sql - IF number_tmp = 0 THEN - result := 'zero'; - ELSIF number_tmp > 0 THEN - result := 'positive'; - ELSIF number_tmp < 0 THEN - result := 'negative'; - ELSE - result := 'NULL'; - END IF; - ``` - -- IF_THEN_ELSEIF_ELSE - - **ELSEIF** is an alias of **ELSIF**. - - Example: - - ```sql - CREATE OR REPLACE PROCEDURE proc_control_structure(i in integer) - AS - BEGIN - IF i > 0 THEN - raise info 'i:% is greater than 0. ',i; - ELSIF i < 0 THEN - raise info 'i:% is smaller than 0. ',i; - ELSE - raise info 'i:% is equal to 0. ',i; - END IF; - RETURN; - END; - / - - CALL proc_control_structure(3); - - -- Delete the stored procedure. - DROP PROCEDURE proc_control_structure; - ``` - -## Loop Statements - -**Simple LOOP Statements** - -The syntax diagram is as follows: - -**Figure 5** loop::= - -![loop](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-7.png) - -**Example** - -```sql -CREATE OR REPLACE PROCEDURE proc_loop(i in integer, count out integer) -AS - BEGIN - count:=0; - LOOP - IF count > i THEN - raise info 'count is %. ', count; - EXIT; - ELSE - count:=count+1; - END IF; - END LOOP; - END; -/ - -CALL proc_loop(10,5); -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** -> The loop must be exploited together with **EXIT**; otherwise, a dead loop occurs. - -**WHILE-LOOP Statements** - -**Syntax diagram** - -**Figure 6** while_loop::= - -![while_loop](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-8.png) - -If the conditional expression is true, a series of statements in the WHILE statement are repeatedly executed and the condition is decided each time the loop body is executed. - -**Example** - -```sql -CREATE TABLE integertable(c1 integer) ; -CREATE OR REPLACE PROCEDURE proc_while_loop(maxval in integer) -AS - DECLARE - i int :=1; - BEGIN - WHILE i < maxval LOOP - INSERT INTO integertable VALUES(i); - i:=i+1; - END LOOP; - END; -/ - --- Invoke a function: -CALL proc_while_loop(10); - --- Delete the stored procedure and table. -DROP PROCEDURE proc_while_loop; -DROP TABLE integertable; -``` - -**FOR_LOOP (Integer variable) Statement** - -**Syntax diagram** - -**Figure 7** for_loop::= - -![for_loop](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-9.png) - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - The variable **name** is automatically defined as the **integer** type and exists only in this loop. The variable name falls between lower_bound and upper_bound. -> - When the keyword **REVERSE** is used, the lower bound must be greater than or equal to the upper bound; otherwise, the loop body is not executed. - -**FOR_LOOP Query Statements** - -**Syntax diagram** - -**Figure 8** for_loop_query::= - -![for_loop_query](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-10.png) - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> The variable **target** is automatically defined, its type is the same as that in the **query** result, and it is valid only in this loop. The target value is the query result. - -**FORALL Batch Query Statements** - -**Syntax diagram** - -**Figure 9** forall::= - -![forall](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-11.png) - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> - The variable *index* is automatically defined as the integer type and exists only in this loop. The value of *index* falls between the value of **low_bound** and the value of **upper_bound**. -> - If **SAVE EXCEPTIONS** is specified, exceptions occurred during DML execution in the loop body are saved in **SQL&BULK_EXCEPTIONS** and an exception is thrown after the execution is complete. If there is no abnormal execution result in the loop, the loop will not be rolled back in the current subtransaction. - -**Example** - -```sql -CREATE TABLE hdfs_t1 ( - title NUMBER(6), - did VARCHAR2(20), - data_peroid VARCHAR2(25), - kind VARCHAR2(25), - interval VARCHAR2(20), - time DATE, - isModified VARCHAR2(10) -); - -INSERT INTO hdfs_t1 VALUES( 8, 'Donald', 'OConnell', 'DOCONNEL', '650.507.9833', to_date('21-06-1999', 'dd-mm-yyyy'), 'SH_CLERK' ); - -CREATE OR REPLACE PROCEDURE proc_forall() -AS -BEGIN - FORALL i IN 100..120 - update hdfs_t1 set title = title + 100*i; -END; -/ - --- Invoke a function: -CALL proc_forall(); - --Query the invocation result of the stored procedure. -SELECT * FROM hdfs_t1 WHERE title BETWEEN 100 AND 120; - --- Delete the stored procedure and table. -DROP PROCEDURE proc_forall; -DROP TABLE hdfs_t1; -``` - -## LABEL_LOOP Statements - -**Syntax** - -``` -[label_begin:] LOOP - statements -END LOOP [label_end] -``` - -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-note.gif) **NOTE:** -> -> The usage of the label is added based on the simple loop statement. The label rules are as follows: -> -> - label_begin can appear independently (without label_end). However, if label_end is used, label_begin must appear. -> - The label can be referenced by the CONTINUE or EXIT statement. In the B-compatible database, the ITERATE or LEAVE statement can also be used. -> -> ![img](https://cdn-mogdb.enmotech.com/docs-media/icon/icon-notice.gif) **NOTICE:** This loop is used only in the B-compatible database. An error is reported in other databases. This loop must be used together with EXIT. (In B-compatible mode, LEAVE has the same effect as EXIT, and ITERATE has the same effect as CONTINUE.) Otherwise, an infinite loop occurs. - -**Example** - -``` -CREATE OR REPLACE PROCEDURE label_loop(i in integer, count out integer) -AS - BEGIN - count:=0; - label: - LOOP - IF count > i THEN - raise info 'count is %. ', count; - LEAVE; - ELSE - count:=count+1; - END IF; - END LOOP label; - END; -/ - -CALL proc_loop(10,5); -``` - -## Branch Statements - -**Syntax** - -[Figure 10](#case_when) shows the syntax diagram for a branch statement. - -**Figure 10** case_when::= - -![case_when](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-12.png) - -[Figure 11](#when_clause) shows the syntax diagram for **when_clause**. - -**Figure 11** when_clause::= - -![when_clause](https://cdn-mogdb.enmotech.com/docs-media/mogdb/developer-guide/control-statements-13.png) - -Parameter description: - -- **case_expression**: specifies the variable or expression. -- **when_expression**: specifies the constant or conditional expression. -- **statement**: specifies the statement to be executed. - -**Examples** - -```sql -CREATE OR REPLACE PROCEDURE proc_case_branch(pi_result in integer, pi_return out integer) -AS - BEGIN - CASE pi_result - WHEN 1 THEN - pi_return := 111; - WHEN 2 THEN - pi_return := 222; - WHEN 3 THEN - pi_return := 333; - WHEN 6 THEN - pi_return := 444; - WHEN 7 THEN - pi_return := 555; - WHEN 8 THEN - pi_return := 666; - WHEN 9 THEN - pi_return := 777; - WHEN 10 THEN - pi_return := 888; - ELSE - pi_return := 999; - END CASE; - raise info 'pi_return : %',pi_return ; -END; -/ - -CALL proc_case_branch(3,0); - --- Delete the stored procedure. -DROP PROCEDURE proc_case_branch; -``` - -## NULL Statements - -In PL/SQL programs, **NULL** statements are used to indicate "nothing should be done", equal to placeholders. They grant meanings to some statements and improve program readability. - -**Syntax** - -The following shows example use of **NULL** statements. - -```sql -DECLARE - ... -BEGIN - ... - IF v_num IS NULL THEN - NULL; --No data needs to be processed. - END IF; -END; -/ -``` - -## Error Trapping Statements - -By default, any error occurring in a PL/SQL function aborts execution of the function, and indeed of the surrounding transaction as well. You can trap errors and restore from them by using a **BEGIN** block with an **EXCEPTION** clause. The syntax is an extension of the normal syntax for a **BEGIN** block: - -```sql -[<